VirtualBox

Changeset 43387 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Sep 21, 2012 9:40:25 AM (13 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
80859
Message:

VMM: HM cleanup.

Location:
trunk/src/VBox/VMM
Files:
50 edited
10 moved

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r42777 r43387  
    120120        VMMR3/EM.cpp \
    121121        VMMR3/EMRaw.cpp \
    122         VMMR3/EMHwaccm.cpp \
     122        VMMR3/EMHM.cpp \
    123123        VMMR3/FTM.cpp \
    124124        VMMR3/IEMR3.cpp \
     
    160160        VMMR3/VMMSwitcher.cpp \
    161161        VMMR3/VMMTests.cpp \
    162         VMMR3/HWACCM.cpp \
     162        VMMR3/HM.cpp \
    163163        VMMR3/CSAM.cpp \
    164164        VMMR3/PATM.cpp \
     
    170170        VMMAll/CPUMStack.cpp \
    171171        VMMAll/DBGFAll.cpp \
    172         VMMAll/HWACCMAll.cpp \
     172        VMMAll/HMAll.cpp \
    173173        VMMAll/IOMAll.cpp \
    174174        VMMAll/IOMAllMMIO.cpp \
     
    390390        VMMRC/VMMRC.cpp \
    391391        VMMRC/VMMRCA.asm \
    392         VMMRC/HWACCMRCA.asm \
     392        VMMRC/HMRCA.asm \
    393393        VMMRC/CSAMRC.cpp \
    394394        VMMRC/PATMRC.cpp \
     
    488488        VMMR0/GMMR0.cpp \
    489489        VMMR0/GVMMR0.cpp \
    490         VMMR0/HWACCMR0.cpp \
    491         VMMR0/HWACCMR0A.asm \
     490        VMMR0/HMR0.cpp \
     491        VMMR0/HMR0A.asm \
    492492        VMMR0/HWSVMR0.cpp \
    493493        VMMR0/HWVMXR0.cpp \
     
    508508        VMMAll/EMAllA.asm \
    509509        VMMAll/FTMAll.cpp \
    510         VMMAll/HWACCMAll.cpp \
     510        VMMAll/HMAll.cpp \
    511511        VMMAll/IOMAll.cpp \
    512512        VMMAll/IOMAllMMIO.cpp \
  • trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp

    r43151 r43387  
    3535#include <VBox/dis.h>
    3636#include <VBox/log.h>
    37 #include <VBox/vmm/hwaccm.h>
     37#include <VBox/vmm/hm.h>
    3838#include <VBox/vmm/tm.h>
    3939#include <iprt/assert.h>
     
    8989{
    9090    Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
    91     Assert(!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));
     91    Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
    9292    Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT);
    9393
     
    545545#ifdef VBOX_WITH_IEM
    546546# ifdef VBOX_WITH_RAW_MODE_NOT_R0
    547     if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     547    if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    548548        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    549549# endif
     
    559559#ifdef VBOX_WITH_IEM
    560560# ifdef VBOX_WITH_RAW_MODE_NOT_R0
    561     if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     561    if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    562562        VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
    563563# endif
     
    573573#ifdef VBOX_WITH_IEM
    574574# ifdef VBOX_WITH_RAW_MODE_NOT_R0
    575     if (!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     575    if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    576576        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    577577# endif
     
    588588    if (   (   ldtr != 0
    589589            || pVCpu->cpum.s.Guest.ldtr.Sel != 0)
    590         && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     590        && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    591591        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    592592# endif
     
    12361236            {
    12371237                /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/);
    1238                 HWACCMFlushTLB(pVCpu);
     1238                HMFlushTLB(pVCpu);
    12391239
    12401240                /* Notify PGM about NXE changes. */
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r42780 r43387  
    3434#include <VBox/vmm/vm.h>
    3535#include <VBox/vmm/vmm.h>
    36 #include <VBox/vmm/hwaccm.h>
     36#include <VBox/vmm/hm.h>
    3737#include <VBox/vmm/tm.h>
    3838#include <VBox/vmm/pdmapi.h>
     
    378378                    if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT)
    379379                    {
    380                         HWACCMInvalidatePage(pVCpu, uSrcAddr);
     380                        HMInvalidatePage(pVCpu, uSrcAddr);
    381381                        if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) !=  (uSrcAddr >> PAGE_SHIFT))
    382                             HWACCMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
     382                            HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);
    383383                    }
    384384#endif
     
    29262926
    29272927
    2928 #ifdef IN_RC /** @todo test+enable for HWACCM as well. */
     2928#ifdef IN_RC /** @todo test+enable for HM as well. */
    29292929/**
    29302930 * [LOCK] XADD emulation.
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r43373 r43387  
    11/* $Id$ */
    22/** @file
    3  * HWACCM - All contexts.
     3 * HM - All contexts.
    44 */
    55
     
    2020*   Header Files                                                               *
    2121*******************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_HWACCM
    23 #include <VBox/vmm/hwaccm.h>
     22#define LOG_GROUP LOG_GROUP_HM
     23#include <VBox/vmm/hm.h>
    2424#include <VBox/vmm/pgm.h>
    25 #include "HWACCMInternal.h"
     25#include "HMInternal.h"
    2626#include <VBox/vmm/vm.h>
    27 #include <VBox/vmm/hwacc_vmx.h>
    28 #include <VBox/vmm/hwacc_svm.h>
     27#include <VBox/vmm/hm_vmx.h>
     28#include <VBox/vmm/hm_svm.h>
    2929#include <VBox/err.h>
    3030#include <VBox/log.h>
     
    4343 * @param   GCVirt      Page to invalidate
    4444 */
    45 static void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
     45static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
    4646{
    4747    /* Nothing to do if a TLB flush is already pending */
     
    5353#else
    5454    Be very careful when activating this code!
    55     if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages))
     55    if (iPage == RT_ELEMENTS(pVCpu->hm.s.TlbShootdown.aPages))
    5656        VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    5757    else
     
    6767 * @param   GCVirt      Page to invalidate
    6868 */
    69 VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
    70 {
    71     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
     69VMMDECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
     70{
     71    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
    7272#ifdef IN_RING0
    7373    PVM pVM = pVCpu->CTX_SUFF(pVM);
    74     if (pVM->hwaccm.s.vmx.fSupported)
     74    if (pVM->hm.s.vmx.fSupported)
    7575        return VMXR0InvalidatePage(pVM, pVCpu, GCVirt);
    7676
    77     Assert(pVM->hwaccm.s.svm.fSupported);
     77    Assert(pVM->hm.s.svm.fSupported);
    7878    return SVMR0InvalidatePage(pVM, pVCpu, GCVirt);
    7979
    8080#else
    81     hwaccmQueueInvlPage(pVCpu, GCVirt);
     81    hmQueueInvlPage(pVCpu, GCVirt);
    8282    return VINF_SUCCESS;
    8383#endif
     
    9090 * @param   pVCpu       Pointer to the VMCPU.
    9191 */
    92 VMMDECL(int) HWACCMFlushTLB(PVMCPU pVCpu)
    93 {
    94     LogFlow(("HWACCMFlushTLB\n"));
     92VMMDECL(int) HMFlushTLB(PVMCPU pVCpu)
     93{
     94    LogFlow(("HMFlushTLB\n"));
    9595
    9696    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    97     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBManual);
     97    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBManual);
    9898    return VINF_SUCCESS;
    9999}
     
    105105 *
    106106 */
    107 static DECLCALLBACK(void) hwaccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
     107static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
    108108{
    109109    NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
     
    116116static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
    117117{
    118     uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
    119 
    120     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatPoke, x);
     118    uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
     119
     120    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
    121121    int rc = RTMpPokeCpu(idHostCpu);
    122     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatPoke, x);
     122    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
    123123
    124124    /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
     
    126126    if (rc == VERR_NOT_SUPPORTED)
    127127    {
    128         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
     128        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
    129129        /* synchronous. */
    130         RTMpOnSpecific(idHostCpu, hwaccmFlushHandler, 0, 0);
    131         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
     130        RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
     131        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
    132132    }
    133133    else
    134134    {
    135135        if (rc == VINF_SUCCESS)
    136             STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPoke, z);
     136            STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
    137137        else
    138             STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
     138            STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
    139139
    140140/** @todo If more than one CPU is going to be poked, we could optimize this
     
    144144 *        then. */
    145145        /* Spin until the VCPU has switched back (poking is async). */
    146         while (   ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush)
    147                && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hwaccm.s.cWorldSwitchExits))
     146        while (   ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
     147               && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
    148148            ASMNopPause();
    149149
    150150        if (rc == VINF_SUCCESS)
    151             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPoke, z);
     151            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
    152152        else
    153             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatSpinPokeFailed, z);
     153            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
    154154    }
    155155}
     
    167167static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
    168168{
    169     if (ASMAtomicUoReadBool(&pVCpu->hwaccm.s.fCheckedTLBFlush))
     169    if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
    170170    {
    171171        if (fAccountFlushStat)
    172             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdownFlush);
     172            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
    173173        else
    174             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
     174            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    175175#ifdef IN_RING0
    176         RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu;
     176        RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
    177177        if (idHostCpu != NIL_RTCPUID)
    178178            hmR0PokeCpu(pVCpu, idHostCpu);
     
    182182    }
    183183    else
    184         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);
     184        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
    185185}
    186186
     
    193193 * @param   GCVirt      Page to invalidate
    194194 */
    195 VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
     195VMMDECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)
    196196{
    197197    VMCPUID idCurCpu = VMMGetCpuId(pVM);
    198     STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hwaccm.s.StatFlushPage);
     198    STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
    199199
    200200    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     
    208208
    209209        if (pVCpu->idCpu == idCurCpu)
    210             HWACCMInvalidatePage(pVCpu, GCPtr);
     210            HMInvalidatePage(pVCpu, GCPtr);
    211211        else
    212212        {
    213             hwaccmQueueInvlPage(pVCpu, GCPtr);
     213            hmQueueInvlPage(pVCpu, GCPtr);
    214214            hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/);
    215215        }
     
    226226 * @param   pVM       Pointer to the VM.
    227227 */
    228 VMMDECL(int) HWACCMFlushTLBOnAllVCpus(PVM pVM)
     228VMMDECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
    229229{
    230230    if (pVM->cCpus == 1)
    231         return HWACCMFlushTLB(&pVM->aCpus[0]);
     231        return HMFlushTLB(&pVM->aCpus[0]);
    232232
    233233    VMCPUID idThisCpu = VMMGetCpuId(pVM);
    234234
    235     STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hwaccm.s.StatFlushTLB);
     235    STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTLB);
    236236
    237237    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
     
    260260 * @param   pVM         Pointer to the VM.
    261261 */
    262 VMMDECL(bool) HWACCMIsNestedPagingActive(PVM pVM)
    263 {
    264     return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging;
     262VMMDECL(bool) HMIsNestedPagingActive(PVM pVM)
     263{
     264    return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
    265265}
    266266
     
    271271 * @param   pVM         Pointer to the VM.
    272272 */
    273 VMMDECL(PGMMODE) HWACCMGetShwPagingMode(PVM pVM)
    274 {
    275     Assert(HWACCMIsNestedPagingActive(pVM));
    276     if (pVM->hwaccm.s.svm.fSupported)
     273VMMDECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
     274{
     275    Assert(HMIsNestedPagingActive(pVM));
     276    if (pVM->hm.s.svm.fSupported)
    277277        return PGMMODE_NESTED;
    278278
    279     Assert(pVM->hwaccm.s.vmx.fSupported);
     279    Assert(pVM->hm.s.vmx.fSupported);
    280280    return PGMMODE_EPT;
    281281}
     
    290290 * @param   GCPhys      Page to invalidate
    291291 */
    292 VMMDECL(int) HWACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
    293 {
    294     if (!HWACCMIsNestedPagingActive(pVM))
     292VMMDECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
     293{
     294    if (!HMIsNestedPagingActive(pVM))
    295295        return VINF_SUCCESS;
    296296
    297297#ifdef IN_RING0
    298     if (pVM->hwaccm.s.vmx.fSupported)
     298    if (pVM->hm.s.vmx.fSupported)
    299299    {
    300300        VMCPUID idThisCpu = VMMGetCpuId(pVM);
     
    317317    /* AMD-V doesn't support invalidation with guest physical addresses; see
    318318       comment in SVMR0InvalidatePhysPage. */
    319     Assert(pVM->hwaccm.s.svm.fSupported);
     319    Assert(pVM->hm.s.svm.fSupported);
    320320#else
    321321    NOREF(GCPhys);
    322322#endif
    323323
    324     HWACCMFlushTLBOnAllVCpus(pVM);
     324    HMFlushTLBOnAllVCpus(pVM);
    325325    return VINF_SUCCESS;
    326326}
     
    332332 * @param   pVM         Pointer to the VM.
    333333 */
    334 VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM)
     334VMMDECL(bool) HMHasPendingIrq(PVM pVM)
    335335{
    336336    PVMCPU pVCpu = VMMGetCpu(pVM);
    337     return !!pVCpu->hwaccm.s.Event.fPending;
    338 }
    339 
     337    return !!pVCpu->hm.s.Event.fPending;
     338}
     339
  • trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp

    r42726 r43387  
    3535#include <VBox/vmm/vm.h>
    3636#include <VBox/vmm/vmm.h>
    37 #include <VBox/vmm/hwaccm.h>
     37#include <VBox/vmm/hm.h>
    3838#include "IOMInline.h"
    3939
     
    23602360
    23612361    /* This currently only works in real mode, protected mode without paging or with nested paging. */
    2362     if (    !HWACCMIsEnabled(pVM)       /* useless without VT-x/AMD-V */
     2362    if (    !HMIsEnabled(pVM)       /* useless without VT-x/AMD-V */
    23632363        ||  (   CPUMIsGuestInPagedProtectedMode(pVCpu)
    2364              && !HWACCMIsNestedPagingActive(pVM)))
     2364             && !HMIsNestedPagingActive(pVM)))
    23652365        return VINF_SUCCESS;    /* ignore */
    23662366
     
    24292429
    24302430    AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
    2431     Assert(HWACCMIsEnabled(pVM));
     2431    Assert(HMIsEnabled(pVM));
    24322432
    24332433    PVMCPU pVCpu = VMMGetCpu(pVM);
     
    24812481
    24822482    /* This currently only works in real mode, protected mode without paging or with nested paging. */
    2483     if (    !HWACCMIsEnabled(pVM)       /* useless without VT-x/AMD-V */
     2483    if (    !HMIsEnabled(pVM)       /* useless without VT-x/AMD-V */
    24842484        ||  (   CPUMIsGuestInPagedProtectedMode(pVCpu)
    2485              && !HWACCMIsNestedPagingActive(pVM)))
     2485             && !HMIsNestedPagingActive(pVM)))
    24862486        return VINF_SUCCESS;    /* ignore */
    24872487
  • trunk/src/VBox/VMM/VMMAll/MMAll.cpp

    r41965 r43387  
    640640        TAG2STR(VMM);
    641641
    642         TAG2STR(HWACCM);
     642        TAG2STR(HM);
    643643
    644644        #undef TAG2STR
  • trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp

    r41965 r43387  
    2727#include <VBox/vmm/vm.h>
    2828#include <VBox/err.h>
    29 #include <VBox/vmm/hwaccm.h>
     29#include <VBox/vmm/hm.h>
    3030
    3131#include <VBox/log.h>
     
    254254    {
    255255        /*
    256          * Leave HWACCM context while waiting if necessary.
     256         * Leave HM context while waiting if necessary.
    257257         */
    258258        int rc;
     
    267267            PVM     pVM   = pCritSect->s.CTX_SUFF(pVM);
    268268            PVMCPU  pVCpu = VMMGetCpu(pVM);
    269             HWACCMR0Leave(pVM, pVCpu);
     269            HMR0Leave(pVM, pVCpu);
    270270            RTThreadPreemptRestore(NIL_RTTHREAD, ????);
    271271
     
    273273
    274274            RTThreadPreemptDisable(NIL_RTTHREAD, ????);
    275             HWACCMR0Enter(pVM, pVCpu);
     275            HMR0Enter(pVM, pVCpu);
    276276        }
    277277        return rc;
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r41965 r43387  
    3434#endif
    3535#include <VBox/vmm/em.h>
    36 #include <VBox/vmm/hwaccm.h>
    37 #include <VBox/vmm/hwacc_vmx.h>
     36#include <VBox/vmm/hm.h>
     37#include <VBox/vmm/hm_vmx.h>
    3838#include "PGMInternal.h"
    3939#include <VBox/vmm/vm.h>
     
    958958                     * No need to monitor anything in this case.
    959959                     */
    960                     Assert(!HWACCMIsEnabled(pVM));
     960                    Assert(!HMIsEnabled(pVM));
    961961
    962962                    GCPdPt  = uGstPdpe & X86_PDPE_PG_MASK;
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r42188 r43387  
    10171017                */
    10181018#   if PGM_SHW_TYPE == PGM_TYPE_EPT
    1019                 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault);
     1019                HMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault);
    10201020#   else
    10211021                PGM_INVL_PG(pVCpu, pvFault);
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r41965 r43387  
    437437        PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
    438438# else
    439     HWACCMFlushTLBOnAllVCpus(pVM);
     439    HMFlushTLBOnAllVCpus(pVM);
    440440# endif
    441441
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r43197 r43387  
    3232#include "PGMInline.h"
    3333#include <VBox/disopcode.h>
    34 #include <VBox/vmm/hwacc_vmx.h>
     34#include <VBox/vmm/hm_vmx.h>
    3535
    3636#include <VBox/log.h>
     
    742742#ifndef IN_RC
    743743    /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
    744     if (   HWACCMHasPendingIrq(pVM)
     744    if (   HMHasPendingIrq(pVM)
    745745        && (pRegFrame->rsp - pvFault) < 32)
    746746    {
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r41802 r43387  
    479479                SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte);
    480480# if PGM_SHW_TYPE == PGM_TYPE_EPT
    481                 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
     481                HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);
    482482# else
    483483                PGM_INVL_PG_ALL_VCPU(pVM, GCPtr);
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r42427 r43387  
    2525#include <VBox/vmm/mm.h>
    2626#include <VBox/vmm/pgm.h>
    27 #include <VBox/vmm/hwaccm.h>
     27#include <VBox/vmm/hm.h>
    2828#include "SELMInternal.h"
    2929#include <VBox/vmm/vm.h>
     
    786786
    787787    /* Undo ring compression. */
    788     if ((SelCPL & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     788    if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    789789        SelCPL &= ~X86_SEL_RPL;
    790790    Assert(pSRegCS->Sel == SelCS);
    791     if ((SelCS  & X86_SEL_RPL) == 1 && !HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))
     791    if ((SelCS  & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM)))
    792792        SelCS  &= ~X86_SEL_RPL;
    793793#else
  • trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp

    r42464 r43387  
    2626#include <VBox/err.h>
    2727#include <VBox/log.h>
    28 #include <VBox/vmm/hwaccm.h>
     28#include <VBox/vmm/hm.h>
    2929#include <iprt/assert.h>
    3030#include <iprt/asm-amd64-x86.h>
     
    407407        if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE))
    408408        {
    409             HWACCMR0SaveFPUState(pVM, pVCpu, pCtx);
     409            HMR0SaveFPUState(pVM, pVCpu, pCtx);
    410410            cpumR0RestoreHostFPUState(&pVCpu->cpum.s);
    411411        }
     
    493493            uint64_t dr6 = pCtx->dr[6];
    494494
    495             HWACCMR0SaveDebugState(pVM, pVCpu, pCtx);
     495            HMR0SaveDebugState(pVM, pVCpu, pCtx);
    496496            if (!fDR6) /* dr6 was already up-to-date */
    497497                pCtx->dr[6] = dr6;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r43373 r43387  
    2020*   Header Files                                                               *
    2121*******************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_HWACCM
    23 #include <VBox/vmm/hwaccm.h>
     22#define LOG_GROUP LOG_GROUP_HM
     23#include <VBox/vmm/hm.h>
    2424#include <VBox/vmm/pgm.h>
    25 #include "HWACCMInternal.h"
     25#include "HMInternal.h"
    2626#include <VBox/vmm/vm.h>
    27 #include <VBox/vmm/hwacc_vmx.h>
    28 #include <VBox/vmm/hwacc_svm.h>
     27#include <VBox/vmm/hm_vmx.h>
     28#include <VBox/vmm/hm_svm.h>
    2929#include <VBox/err.h>
    3030#include <VBox/log.h>
     
    178178     *          simpler and hopefully easier to understand. */
    179179    bool                            fEnabled;
    180     /** Serialize initialization in HWACCMR0EnableAllCpus. */
     180    /** Serialize initialization in HMR0EnableAllCpus. */
    181181    RTONCE                          EnableAllCpusOnce;
    182182} g_HvmR0;
     
    605605 * @returns VBox status code.
    606606 */
    607 VMMR0DECL(int) HWACCMR0Init(void)
     607VMMR0DECL(int) HMR0Init(void)
    608608{
    609609    /*
     
    676676            hmR0InitAmd(u32FeaturesEDX);
    677677        else
    678             g_HvmR0.lLastError = VERR_HWACCM_UNKNOWN_CPU;
     678            g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU;
    679679    }
    680680    else
    681         g_HvmR0.lLastError = VERR_HWACCM_NO_CPUID;
     681        g_HvmR0.lLastError = VERR_HM_NO_CPUID;
    682682
    683683    /*
     
    705705 * @returns VBox status code.
    706706 */
    707 VMMR0DECL(int) HWACCMR0Term(void)
     707VMMR0DECL(int) HMR0Term(void)
    708708{
    709709    int rc;
     
    768768
    769769/**
    770  * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
     770 * Worker function used by hmR0PowerCallback  and HMR0Init to initalize
    771771 * VT-x on a CPU.
    772772 *
     
    809809
    810810/**
    811  * Worker function used by hmR0PowerCallback  and HWACCMR0Init to initalize
     811 * Worker function used by hmR0PowerCallback  and HMR0Init to initalize
    812812 * VT-x / AMD-V on a CPU.
    813813 *
     
    911911
    912912/**
    913  * RTOnce callback employed by HWACCMR0EnableAllCpus.
     913 * RTOnce callback employed by HMR0EnableAllCpus.
    914914 *
    915915 * @returns VBox status code.
     
    934934     * The global init variable is set by the first VM.
    935935     */
    936     g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit;
     936    g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit;
    937937
    938938    for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++)
     
    953953        if (RT_SUCCESS(rc))
    954954            /* If the host provides a VT-x init API, then we'll rely on that for global init. */
    955             g_HvmR0.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true;
     955            g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true;
    956956        else
    957957            AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc));
     
    996996
    997997/**
    998  * Sets up HWACCM on all cpus.
     998 * Sets up HM on all cpus.
    999999 *
    10001000 * @returns VBox status code.
    10011001 * @param   pVM                 Pointer to the VM.
    10021002 */
    1003 VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM)
    1004 {
    1005     /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     1003VMMR0DECL(int) HMR0EnableAllCpus(PVM pVM)
     1004{
     1005    /* Make sure we don't touch hm after we've disabled hm in
    10061006       preparation of a suspend. */
    10071007    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    1008         return VERR_HWACCM_SUSPEND_PENDING;
     1008        return VERR_HM_SUSPEND_PENDING;
    10091009
    10101010    return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL);
     
    10861086    /*
    10871087     * We only care about uninitializing a CPU that is going offline. When a
    1088      * CPU comes online, the initialization is done lazily in HWACCMR0Enter().
     1088     * CPU comes online, the initialization is done lazily in HMR0Enter().
    10891089     */
    10901090    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    11801180 * @param   pVM         Pointer to the VM.
    11811181 */
    1182 VMMR0DECL(int) HWACCMR0InitVM(PVM pVM)
     1182VMMR0DECL(int) HMR0InitVM(PVM pVM)
    11831183{
    11841184    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    11851185
    11861186#ifdef LOG_ENABLED
    1187     SUPR0Printf("HWACCMR0InitVM: %p\n", pVM);
     1187    SUPR0Printf("HMR0InitVM: %p\n", pVM);
    11881188#endif
    11891189
    1190     /* Make sure we don't touch hwaccm after we've disabled hwaccm in preparation of a suspend. */
     1190    /* Make sure we don't touch hm after we've disabled hm in preparation of a suspend. */
    11911191    if (ASMAtomicReadBool(&g_HvmR0.fSuspended))
    1192         return VERR_HWACCM_SUSPEND_PENDING;
     1192        return VERR_HM_SUSPEND_PENDING;
    11931193
    11941194    /*
    11951195     * Copy globals to the VM structure.
    11961196     */
    1197     pVM->hwaccm.s.vmx.fSupported            = g_HvmR0.vmx.fSupported;
    1198     pVM->hwaccm.s.svm.fSupported            = g_HvmR0.svm.fSupported;
    1199 
    1200     pVM->hwaccm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
    1201     pVM->hwaccm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
    1202     pVM->hwaccm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.feature_ctrl;
    1203     pVM->hwaccm.s.vmx.hostCR4               = g_HvmR0.vmx.hostCR4;
    1204     pVM->hwaccm.s.vmx.hostEFER              = g_HvmR0.vmx.hostEFER;
    1205     pVM->hwaccm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.vmx_basic_info;
    1206     pVM->hwaccm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.vmx_pin_ctls;
    1207     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.vmx_proc_ctls;
    1208     pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.vmx_proc_ctls2;
    1209     pVM->hwaccm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.vmx_exit;
    1210     pVM->hwaccm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.vmx_entry;
    1211     pVM->hwaccm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.vmx_misc;
    1212     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
    1213     pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
    1214     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
    1215     pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
    1216     pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
    1217     pVM->hwaccm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
    1218     pVM->hwaccm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
    1219     pVM->hwaccm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
    1220     pVM->hwaccm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
    1221     pVM->hwaccm.s.cpuid.u32AMDFeatureECX    = g_HvmR0.cpuid.u32AMDFeatureECX;
    1222     pVM->hwaccm.s.cpuid.u32AMDFeatureEDX    = g_HvmR0.cpuid.u32AMDFeatureEDX;
    1223     pVM->hwaccm.s.lLastError                = g_HvmR0.lLastError;
    1224 
    1225     pVM->hwaccm.s.uMaxASID                  = g_HvmR0.uMaxASID;
    1226 
    1227 
    1228     if (!pVM->hwaccm.s.cMaxResumeLoops) /* allow ring-3 overrides */
    1229     {
    1230         pVM->hwaccm.s.cMaxResumeLoops       = 1024;
     1197    pVM->hm.s.vmx.fSupported            = g_HvmR0.vmx.fSupported;
     1198    pVM->hm.s.svm.fSupported            = g_HvmR0.svm.fSupported;
     1199
     1200    pVM->hm.s.vmx.fUsePreemptTimer      = g_HvmR0.vmx.fUsePreemptTimer;
     1201    pVM->hm.s.vmx.cPreemptTimerShift    = g_HvmR0.vmx.cPreemptTimerShift;
     1202    pVM->hm.s.vmx.msr.feature_ctrl      = g_HvmR0.vmx.msr.feature_ctrl;
     1203    pVM->hm.s.vmx.hostCR4               = g_HvmR0.vmx.hostCR4;
     1204    pVM->hm.s.vmx.hostEFER              = g_HvmR0.vmx.hostEFER;
     1205    pVM->hm.s.vmx.msr.vmx_basic_info    = g_HvmR0.vmx.msr.vmx_basic_info;
     1206    pVM->hm.s.vmx.msr.vmx_pin_ctls      = g_HvmR0.vmx.msr.vmx_pin_ctls;
     1207    pVM->hm.s.vmx.msr.vmx_proc_ctls     = g_HvmR0.vmx.msr.vmx_proc_ctls;
     1208    pVM->hm.s.vmx.msr.vmx_proc_ctls2    = g_HvmR0.vmx.msr.vmx_proc_ctls2;
     1209    pVM->hm.s.vmx.msr.vmx_exit          = g_HvmR0.vmx.msr.vmx_exit;
     1210    pVM->hm.s.vmx.msr.vmx_entry         = g_HvmR0.vmx.msr.vmx_entry;
     1211    pVM->hm.s.vmx.msr.vmx_misc          = g_HvmR0.vmx.msr.vmx_misc;
     1212    pVM->hm.s.vmx.msr.vmx_cr0_fixed0    = g_HvmR0.vmx.msr.vmx_cr0_fixed0;
     1213    pVM->hm.s.vmx.msr.vmx_cr0_fixed1    = g_HvmR0.vmx.msr.vmx_cr0_fixed1;
     1214    pVM->hm.s.vmx.msr.vmx_cr4_fixed0    = g_HvmR0.vmx.msr.vmx_cr4_fixed0;
     1215    pVM->hm.s.vmx.msr.vmx_cr4_fixed1    = g_HvmR0.vmx.msr.vmx_cr4_fixed1;
     1216    pVM->hm.s.vmx.msr.vmx_vmcs_enum     = g_HvmR0.vmx.msr.vmx_vmcs_enum;
     1217    pVM->hm.s.vmx.msr.vmx_eptcaps       = g_HvmR0.vmx.msr.vmx_eptcaps;
     1218    pVM->hm.s.svm.msrHWCR               = g_HvmR0.svm.msrHWCR;
     1219    pVM->hm.s.svm.u32Rev                = g_HvmR0.svm.u32Rev;
     1220    pVM->hm.s.svm.u32Features           = g_HvmR0.svm.u32Features;
     1221    pVM->hm.s.cpuid.u32AMDFeatureECX    = g_HvmR0.cpuid.u32AMDFeatureECX;
     1222    pVM->hm.s.cpuid.u32AMDFeatureEDX    = g_HvmR0.cpuid.u32AMDFeatureEDX;
     1223    pVM->hm.s.lLastError                = g_HvmR0.lLastError;
     1224
     1225    pVM->hm.s.uMaxASID                  = g_HvmR0.uMaxASID;
     1226
     1227
     1228    if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */
     1229    {
     1230        pVM->hm.s.cMaxResumeLoops       = 1024;
    12311231#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    12321232        if (RTThreadPreemptIsPendingTrusty())
    1233             pVM->hwaccm.s.cMaxResumeLoops   = 8192;
     1233            pVM->hm.s.cMaxResumeLoops   = 8192;
    12341234#endif
    12351235    }
     
    12421242        PVMCPU pVCpu = &pVM->aCpus[i];
    12431243
    1244         pVCpu->hwaccm.s.idEnteredCpu        = NIL_RTCPUID;
     1244        pVCpu->hm.s.idEnteredCpu        = NIL_RTCPUID;
    12451245
    12461246        /* Invalidate the last cpu we were running on. */
    1247         pVCpu->hwaccm.s.idLastCpu           = NIL_RTCPUID;
     1247        pVCpu->hm.s.idLastCpu           = NIL_RTCPUID;
    12481248
    12491249        /* We'll aways increment this the first time (host uses ASID 0) */
    1250         pVCpu->hwaccm.s.uCurrentASID        = 0;
     1250        pVCpu->hm.s.uCurrentASID        = 0;
    12511251    }
    12521252
     
    12601260     */
    12611261    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1262     PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
     1262    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    12631263    ASMAtomicWriteBool(&pCpu->fInUse, true);
    12641264    ASMSetFlags(fFlags);
     
    12771277 * @param   pVM         Pointer to the VM.
    12781278 */
    1279 VMMR0DECL(int) HWACCMR0TermVM(PVM pVM)
    1280 {
    1281     Log(("HWACCMR0TermVM: %p\n", pVM));
     1279VMMR0DECL(int) HMR0TermVM(PVM pVM)
     1280{
     1281    Log(("HMR0TermVM: %p\n", pVM));
    12821282    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    12831283
    1284     /* Make sure we don't touch hm after we've disabled hwaccm in preparation
     1284    /* Make sure we don't touch hm after we've disabled hm in preparation
    12851285       of a suspend. */
    12861286    /** @todo r=bird: This cannot be right, the termination functions are
    12871287     *        just freeing memory and resetting pVM/pVCpu members...
    12881288     *  ==> memory leak. */
    1289     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1289    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    12901290
    12911291    /*
     
    12961296     */
    12971297    RTCCUINTREG     fFlags = ASMIntDisableFlags();
    1298     PHMGLOBLCPUINFO pCpu   = HWACCMR0GetCurrentCpu();
     1298    PHMGLOBLCPUINFO pCpu   = HMR0GetCurrentCpu();
    12991299    ASMAtomicWriteBool(&pCpu->fInUse, true);
    13001300    ASMSetFlags(fFlags);
     
    13151315 * @param   pVM         Pointer to the VM.
    13161316 */
    1317 VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM)
    1318 {
    1319     Log(("HWACCMR0SetupVM: %p\n", pVM));
     1317VMMR0DECL(int) HMR0SetupVM(PVM pVM)
     1318{
     1319    Log(("HMR0SetupVM: %p\n", pVM));
    13201320    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    13211321
    1322     /* Make sure we don't touch hwaccm after we've disabled hwaccm in
     1322    /* Make sure we don't touch hm after we've disabled hm in
    13231323       preparation of a suspend. */
    1324     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1324    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    13251325
    13261326
     
    13361336    /* On first entry we'll sync everything. */
    13371337    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    1338         pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
     1338        pVM->aCpus[i].hm.s.fContextUseFlags = HM_CHANGED_ALL;
    13391339
    13401340    /* Enable VT-x or AMD-V if local init is required. */
     
    13721372 * @remarks This is called with preemption disabled.
    13731373 */
    1374 VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu)
     1374VMMR0DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu)
    13751375{
    13761376    RTCPUID         idCpu = RTMpCpuId();
     
    13781378
    13791379    /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */
    1380     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1380    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    13811381    ASMAtomicWriteBool(&pCpu->fInUse, true);
    13821382
    1383     AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));
    1384     pVCpu->hwaccm.s.idEnteredCpu = idCpu;
     1383    AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu));
     1384    pVCpu->hm.s.idEnteredCpu = idCpu;
    13851385
    13861386    PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
     
    13931393
    13941394    /* Always reload the host context and the guest's CR0 register. (!!!!) */
    1395     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;
     1395    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT;
    13961396
    13971397    /* Setup the register and mask according to the current execution mode. */
    13981398    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
    1399         pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
     1399        pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);
    14001400    else
    1401         pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
     1401        pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);
    14021402
    14031403    /* Enable VT-x or AMD-V if local init is required, or enable if it's a
     
    14321432       and ring-3 calls. */
    14331433    if (RT_FAILURE(rc))
    1434         pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
     1434        pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    14351435    return rc;
    14361436}
     
    14441444 * @param   pVCpu      Pointer to the VMCPU.
    14451445 *
    1446  * @remarks Called with preemption disabled just like HWACCMR0Enter, our
     1446 * @remarks Called with preemption disabled just like HMR0Enter, our
    14471447 *          counterpart.
    14481448 */
    1449 VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu)
     1449VMMR0DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu)
    14501450{
    14511451    int             rc;
     
    14551455
    14561456    /** @todo r=bird: This can't be entirely right? */
    1457     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1457    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    14581458
    14591459    /*
     
    14701470        CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx);
    14711471
    1472         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1472        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    14731473        Assert(!CPUMIsGuestFPUStateActive(pVCpu));
    14741474    }
     
    14791479       guests, so we must make sure the recompiler flushes its TLB the next
    14801480       time it executes code. */
    1481     if (    pVM->hwaccm.s.fNestedPaging
     1481    if (    pVM->hm.s.fNestedPaging
    14821482        &&  CPUMIsGuestInPagedProtectedModeEx(pCtx))
    14831483        CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
     
    14851485    /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness
    14861486       and ring-3 calls. */
    1487     AssertMsgStmt(   pVCpu->hwaccm.s.idEnteredCpu == idCpu
     1487    AssertMsgStmt(   pVCpu->hm.s.idEnteredCpu == idCpu
    14881488                  || RT_FAILURE_NP(rc),
    1489                   ("Owner is %u, I'm %u", pVCpu->hwaccm.s.idEnteredCpu, idCpu),
     1489                  ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu),
    14901490                  rc = VERR_HM_WRONG_CPU_1);
    1491     pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID;
     1491    pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID;
    14921492
    14931493    /*
     
    15001500
    15011501        /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */
    1502         pVCpu->hwaccm.s.idLastCpu    = NIL_RTCPUID;
    1503         pVCpu->hwaccm.s.uCurrentASID = 0;
     1502        pVCpu->hm.s.idLastCpu    = NIL_RTCPUID;
     1503        pVCpu->hm.s.uCurrentASID = 0;
    15041504        VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    15051505    }
     
    15181518 *
    15191519 * @remarks Called with preemption disabled and after first having called
    1520  *          HWACCMR0Enter.
    1521  */
    1522 VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
     1520 *          HMR0Enter.
     1521 */
     1522VMMR0DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)
    15231523{
    15241524#ifdef VBOX_STRICT
     
    15261526    Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
    15271527    Assert(pCpu->fConfigured);
    1528     AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HWACCM_SUSPEND_PENDING);
     1528    AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING);
    15291529    Assert(ASMAtomicReadBool(&pCpu->fInUse) == true);
    15301530#endif
     
    15521552 * @param   pCtx        Pointer to the guest CPU context.
    15531553 */
    1554 VMMR0DECL(int)   HWACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1555 {
    1556     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFpu64SwitchBack);
    1557     if (pVM->hwaccm.s.vmx.fSupported)
    1558         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
    1559     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestFPU64, 0, NULL);
     1554VMMR0DECL(int)   HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1555{
     1556    STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack);
     1557    if (pVM->hm.s.vmx.fSupported)
     1558        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
     1559    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
    15601560}
    15611561
     
    15691569 * @param   pCtx        Pointer to the guest CPU context.
    15701570 */
    1571 VMMR0DECL(int)   HWACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    1572 {
    1573     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDebug64SwitchBack);
    1574     if (pVM->hwaccm.s.vmx.fSupported)
    1575         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
    1576     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSaveGuestDebug64, 0, NULL);
     1571VMMR0DECL(int)   HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1572{
     1573    STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack);
     1574    if (pVM->hm.s.vmx.fSupported)
     1575        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
     1576    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
    15771577}
    15781578
     
    15841584 * @param   pVM         Pointer to the VM.
    15851585 */
    1586 VMMR0DECL(int)   HWACCMR0TestSwitcher3264(PVM pVM)
     1586VMMR0DECL(int)   HMR0TestSwitcher3264(PVM pVM)
    15871587{
    15881588    PVMCPU   pVCpu = &pVM->aCpus[0];
     
    15911591    int      rc;
    15921592
    1593     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
    1594     if (pVM->hwaccm.s.vmx.fSupported)
    1595         rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
     1593    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
     1594    if (pVM->hm.s.vmx.fSupported)
     1595        rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
    15961596    else
    1597         rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnTest64, 5, &aParam[0]);
    1598     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     1597        rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
     1598    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    15991599
    16001600    return rc;
     
    16081608 * @returns Suspend pending or not.
    16091609 */
    1610 VMMR0DECL(bool) HWACCMR0SuspendPending(void)
     1610VMMR0DECL(bool) HMR0SuspendPending(void)
    16111611{
    16121612    return ASMAtomicReadBool(&g_HvmR0.fSuspended);
     
    16201620 * @returns The cpu structure pointer.
    16211621 */
    1622 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void)
     1622VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void)
    16231623{
    16241624    RTCPUID idCpu = RTMpCpuId();
     
    16351635 * @param   idCpu       id of the VCPU.
    16361636 */
    1637 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu)
     1637VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu)
    16381638{
    16391639    Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo));
     
    16521652 * @param   cbSize          Read size.
    16531653 */
    1654 VMMR0DECL(void) HWACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
    1655 {
    1656     pVCpu->hwaccm.s.PendingIO.enmType         = HWACCMPENDINGIO_PORT_READ;
    1657     pVCpu->hwaccm.s.PendingIO.GCPtrRip        = GCPtrRip;
    1658     pVCpu->hwaccm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
    1659     pVCpu->hwaccm.s.PendingIO.s.Port.uPort    = uPort;
    1660     pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal  = uAndVal;
    1661     pVCpu->hwaccm.s.PendingIO.s.Port.cbSize   = cbSize;
     1654VMMR0DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
     1655{
     1656    pVCpu->hm.s.PendingIO.enmType         = HMPENDINGIO_PORT_READ;
     1657    pVCpu->hm.s.PendingIO.GCPtrRip        = GCPtrRip;
     1658    pVCpu->hm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
     1659    pVCpu->hm.s.PendingIO.s.Port.uPort    = uPort;
     1660    pVCpu->hm.s.PendingIO.s.Port.uAndVal  = uAndVal;
     1661    pVCpu->hm.s.PendingIO.s.Port.cbSize   = cbSize;
    16621662    return;
    16631663}
     
    16731673 * @param   cbSize          Read size.
    16741674 */
    1675 VMMR0DECL(void) HWACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
    1676 {
    1677     pVCpu->hwaccm.s.PendingIO.enmType         = HWACCMPENDINGIO_PORT_WRITE;
    1678     pVCpu->hwaccm.s.PendingIO.GCPtrRip        = GCPtrRip;
    1679     pVCpu->hwaccm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
    1680     pVCpu->hwaccm.s.PendingIO.s.Port.uPort    = uPort;
    1681     pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal  = uAndVal;
    1682     pVCpu->hwaccm.s.PendingIO.s.Port.cbSize   = cbSize;
     1675VMMR0DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)
     1676{
     1677    pVCpu->hm.s.PendingIO.enmType         = HMPENDINGIO_PORT_WRITE;
     1678    pVCpu->hm.s.PendingIO.GCPtrRip        = GCPtrRip;
     1679    pVCpu->hm.s.PendingIO.GCPtrRipNext    = GCPtrRipNext;
     1680    pVCpu->hm.s.PendingIO.s.Port.uPort    = uPort;
     1681    pVCpu->hm.s.PendingIO.s.Port.uAndVal  = uAndVal;
     1682    pVCpu->hm.s.PendingIO.s.Port.cbSize   = cbSize;
    16831683    return;
    16841684}
     
    16911691 * @returns VBox status code.
    16921692 * @param   pVM             Pointer to the VM.
     1693 * @param   enmSwitcher     The switcher we're about to use.
    16931694 * @param   pfVTxDisabled   Where to store whether VT-x was disabled or not.
    16941695 */
    1695 VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)
     1696VMMR0DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled)
    16961697{
    16971698    Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     
    16991700    *pfVTxDisabled = false;
    17001701
    1701     if (   !g_HvmR0.fEnabled
    1702         || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */
    1703         || !g_HvmR0.fGlobalInit    /* Local init implies the CPU is currently not in VMX root mode. */)
    1704         return VINF_SUCCESS;    /* nothing to do */
    1705 
    1706     switch (VMMGetSwitcher(pVM))
     1702    /* No such issues with AMD-V */
     1703    if (!g_HvmR0.vmx.fSupported)
     1704        return VINF_SUCCESS;
     1705
     1706    /* Check if the swithcing we're up to is safe. */
     1707    switch (enmSwitcher)
    17071708    {
    17081709        case VMMSWITCHER_32_TO_32:
     
    17201721    }
    17211722
    1722     PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
     1723    /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x,
     1724       regardless of whether we're currently using VT-x or not. */
     1725    if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     1726    {
     1727        *pfVTxDisabled = SUPR0SuspendVTxOnCpu();
     1728        return VINF_SUCCESS;
     1729    }
     1730
     1731    /** @todo Check if this code is presumtive wrt other VT-x users on the
     1732    *        system... */
     1733
     1734    /* Nothing to do if we haven't enabled VT-x. */
     1735    if (!g_HvmR0.fEnabled)
     1736        return VINF_SUCCESS;
     1737
     1738    /* Local init implies the CPU is currently not in VMX root mode. */
     1739    if (!g_HvmR0.fGlobalInit)
     1740        return VINF_SUCCESS;
     1741
     1742    /* Ok, disable VT-x. */
     1743    PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
    17231744    AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
    17241745
     
    17341755 * switcher turned off paging.
    17351756 *
    1736  * @returns VBox status code.
    17371757 * @param   pVM             Pointer to the VM.
    17381758 * @param   fVTxDisabled    Whether VT-x was disabled or not.
    17391759 */
    1740 VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
     1760VMMR0DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)
    17411761{
    17421762    Assert(!(ASMGetFlags() & X86_EFL_IF));
    17431763
    17441764    if (!fVTxDisabled)
    1745         return VINF_SUCCESS;    /* nothing to do */
    1746 
    1747     Assert(g_HvmR0.fEnabled);
     1765        return;         /* nothing to do */
     1766
    17481767    Assert(g_HvmR0.vmx.fSupported);
    1749     Assert(g_HvmR0.fGlobalInit);
    1750 
    1751     PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    1752     AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2);
    1753 
    1754     void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
    1755     RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    1756     return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     1768    if (g_HvmR0.vmx.fUsingSUPR0EnableVTx)
     1769        SUPR0ResumeVTxOnCpu(fVTxDisabled);
     1770    else
     1771    {
     1772        Assert(g_HvmR0.fEnabled);
     1773        Assert(g_HvmR0.fGlobalInit);
     1774
     1775        PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     1776        AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ);
     1777
     1778        void           *pvCpuPage     = RTR0MemObjAddress(pCpu->hMemObj);
     1779        RTHCPHYS        HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
     1780        VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false);
     1781    }
    17571782}
    17581783
     
    17661791 * @param   pszMsg   Message to prepend the log entry with.
    17671792 */
    1768 VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
     1793VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)
    17691794{
    17701795    /*
     
    18871912 * @param   pCtx        Pointer to the CPU context.
    18881913 */
    1889 VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1914VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    18901915{
    18911916    NOREF(pVM);
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r43373 r43387  
    2121%include "VBox/asmdefs.mac"
    2222%include "VBox/err.mac"
    23 %include "VBox/vmm/hwacc_vmx.mac"
     23%include "VBox/vmm/hm_vmx.mac"
    2424%include "VBox/vmm/cpum.mac"
    2525%include "iprt/x86.mac"
    26 %include "HWACCMInternal.mac"
     26%include "HMInternal.mac"
    2727
    2828%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
     
    5656   ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
    5757   ; risk loading a stale LDT value or something invalid.
    58    %define HWACCM_64_BIT_USE_NULL_SEL
     58   %define HM_64_BIT_USE_NULL_SEL
    5959  %endif
    6060 %endif
     
    157157; trashes, rax, rdx & rcx
    158158%macro MYPUSHSEGS64 2
    159  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     159 %ifndef HM_64_BIT_USE_NULL_SEL
    160160   mov     %2, es
    161161   push    %1
     
    169169   push    rdx
    170170   push    rax
    171  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     171 %ifndef HM_64_BIT_USE_NULL_SEL
    172172   push    fs
    173173 %endif
     
    178178   push    rdx
    179179   push    rax
    180  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     180 %ifndef HM_64_BIT_USE_NULL_SEL
    181181   push    gs
    182182 %endif
     
    186186%macro MYPOPSEGS64 2
    187187   ; Note: do not step through this code with a debugger!
    188  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     188 %ifndef HM_64_BIT_USE_NULL_SEL
    189189   xor     eax, eax
    190190   mov     ds, ax
     
    194194 %endif
    195195
    196  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     196 %ifndef HM_64_BIT_USE_NULL_SEL
    197197   pop     gs
    198198 %endif
     
    202202   wrmsr
    203203
    204  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     204 %ifndef HM_64_BIT_USE_NULL_SEL
    205205   pop     fs
    206206 %endif
     
    211211   ; Now it's safe to step again
    212212
    213  %ifndef HWACCM_64_BIT_USE_NULL_SEL
     213 %ifndef HM_64_BIT_USE_NULL_SEL
    214214   pop     %1
    215215   mov     ds, %2
     
    971971; * @param  pIdtr        Where to store the 64-bit IDTR.
    972972; */
    973 ;DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
    974 ALIGNCODE(16)
    975 BEGINPROC hwaccmR0Get64bitGDTRandIDTR
     973;DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
     974ALIGNCODE(16)
     975BEGINPROC hmR0Get64bitGDTRandIDTR
    976976    db      0xea                        ; jmp far .sixtyfourbit_mode
    977977    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     
    991991    dd      .the_end, NAME(SUPR0AbsKernelCS)
    992992BITS 32
    993 ENDPROC   hwaccmR0Get64bitGDTRandIDTR
     993ENDPROC   hmR0Get64bitGDTRandIDTR
    994994
    995995
     
    998998; * @returns CR3
    999999; */
    1000 ;DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
    1001 ALIGNCODE(16)
    1002 BEGINPROC hwaccmR0Get64bitCR3
     1000;DECLASM(uint64_t) hmR0Get64bitCR3(void);
     1001ALIGNCODE(16)
     1002BEGINPROC hmR0Get64bitCR3
    10031003    db      0xea                        ; jmp far .sixtyfourbit_mode
    10041004    dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
     
    10161016    dd      .the_end, NAME(SUPR0AbsKernelCS)
    10171017BITS 32
    1018 ENDPROC   hwaccmR0Get64bitCR3
     1018ENDPROC   hmR0Get64bitCR3
    10191019
    10201020%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
     
    10261026; load the guest ones when necessary.
    10271027;
    1028 ; @cproto       DECLASM(int) hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
     1028; @cproto       DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
    10291029;
    10301030; @returns      eax
     
    10371037; @param        pfnStartVM      msc:[rbp+38h]
    10381038;
    1039 ; @remarks      This is essentially the same code as hwaccmR0SVMRunWrapXMM, only the parameters differ a little bit.
     1039; @remarks      This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit.
    10401040;
    10411041; ASSUMING 64-bit and windows for now.
    10421042ALIGNCODE(16)
    1043 BEGINPROC hwaccmR0VMXStartVMWrapXMM
     1043BEGINPROC hmR0VMXStartVMWrapXMM
    10441044        push    xBP
    10451045        mov     xBP, xSP
     
    11481148        leave
    11491149        ret
    1150 ENDPROC   hwaccmR0VMXStartVMWrapXMM
     1150ENDPROC   hmR0VMXStartVMWrapXMM
    11511151
    11521152;;
     
    11541154; load the guest ones when necessary.
    11551155;
    1156 ; @cproto       DECLASM(int) hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
     1156; @cproto       DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
    11571157;
    11581158; @returns      eax
     
    11651165; @param        pfnVMRun        msc:[rbp+38h]
    11661166;
    1167 ; @remarks      This is essentially the same code as hwaccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
     1167; @remarks      This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit.
    11681168;
    11691169; ASSUMING 64-bit and windows for now.
    11701170ALIGNCODE(16)
    1171 BEGINPROC hwaccmR0SVMRunWrapXMM
     1171BEGINPROC hmR0SVMRunWrapXMM
    11721172        push    xBP
    11731173        mov     xBP, xSP
     
    12761276        leave
    12771277        ret
    1278 ENDPROC   hwaccmR0SVMRunWrapXMM
     1278ENDPROC   hmR0SVMRunWrapXMM
    12791279
    12801280%endif ; VBOX_WITH_KERNEL_USING_XMM
     
    13001300%endif
    13011301
    1302 %include "HWACCMR0Mixed.mac"
     1302%include "HMR0Mixed.mac"
    13031303
    13041304
     
    15031503 %define MYPOPSEGS      MYPOPSEGS64
    15041504
    1505  %include "HWACCMR0Mixed.mac"
     1505 %include "HMR0Mixed.mac"
    15061506%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r43373 r43387  
    11; $Id$
    22;; @file
    3 ; HWACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.
     3; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
    44;
    5 ; Included by HWACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
     5; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
    66;
    77
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r43353 r43387  
    1919*   Header Files                                                               *
    2020*******************************************************************************/
    21 #define LOG_GROUP LOG_GROUP_HWACCM
    22 #include <VBox/vmm/hwaccm.h>
     21#define LOG_GROUP LOG_GROUP_HM
     22#include <VBox/vmm/hm.h>
    2323#include <VBox/vmm/pgm.h>
    2424#include <VBox/vmm/selm.h>
     
    2828#include <VBox/vmm/tm.h>
    2929#include <VBox/vmm/pdmapi.h>
    30 #include "HWACCMInternal.h"
     30#include "HMInternal.h"
    3131#include <VBox/vmm/vm.h>
    32 #include <VBox/vmm/hwacc_svm.h>
     32#include <VBox/vmm/hm_svm.h>
    3333#include <VBox/err.h>
    3434#include <VBox/log.h>
     
    9292         */
    9393        if (    pVM
    94             &&  pVM->hwaccm.s.svm.fIgnoreInUseError)
     94            &&  pVM->hm.s.svm.fIgnoreInUseError)
    9595        {
    9696            pCpu->fIgnoreAMDVInUseError = true;
     
    159159    int rc;
    160160
    161     pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
     161    pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
    162162
    163163    /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */
    164     rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);
     164    rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);
    165165    if (RT_FAILURE(rc))
    166166        return rc;
    167167
    168     pVM->hwaccm.s.svm.pIOBitmap     = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);
    169     pVM->hwaccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);
     168    pVM->hm.s.svm.pIOBitmap     = RTR0MemObjAddress(pVM->hm.s.svm.pMemObjIOBitmap);
     169    pVM->hm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.pMemObjIOBitmap, 0);
    170170    /* Set all bits to intercept all IO accesses. */
    171     ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
     171    ASMMemFill32(pVM->hm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);
    172172
    173173    /*
     
    199199    {
    200200        Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
    201         pVM->hwaccm.s.svm.fAlwaysFlushTLB = true;
     201        pVM->hm.s.svm.fAlwaysFlushTLB = true;
    202202    }
    203203
     
    207207        PVMCPU pVCpu = &pVM->aCpus[i];
    208208
    209         pVCpu->hwaccm.s.svm.pMemObjVMCBHost  = NIL_RTR0MEMOBJ;
    210         pVCpu->hwaccm.s.svm.pMemObjVMCB      = NIL_RTR0MEMOBJ;
    211         pVCpu->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     209        pVCpu->hm.s.svm.pMemObjVMCBHost  = NIL_RTR0MEMOBJ;
     210        pVCpu->hm.s.svm.pMemObjVMCB      = NIL_RTR0MEMOBJ;
     211        pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    212212
    213213        /* Allocate one page for the host context */
    214         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
     214        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
    215215        if (RT_FAILURE(rc))
    216216            return rc;
    217217
    218         pVCpu->hwaccm.s.svm.pVMCBHost     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCBHost);
    219         pVCpu->hwaccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 0);
    220         Assert(pVCpu->hwaccm.s.svm.pVMCBHostPhys < _4G);
    221         ASMMemZeroPage(pVCpu->hwaccm.s.svm.pVMCBHost);
     218        pVCpu->hm.s.svm.pVMCBHost     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCBHost);
     219        pVCpu->hm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCBHost, 0);
     220        Assert(pVCpu->hm.s.svm.pVMCBHostPhys < _4G);
     221        ASMMemZeroPage(pVCpu->hm.s.svm.pVMCBHost);
    222222
    223223        /* Allocate one page for the VM control block (VMCB). */
    224         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
     224        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);
    225225        if (RT_FAILURE(rc))
    226226            return rc;
    227227
    228         pVCpu->hwaccm.s.svm.pVMCB     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCB);
    229         pVCpu->hwaccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCB, 0);
    230         Assert(pVCpu->hwaccm.s.svm.pVMCBPhys < _4G);
    231         ASMMemZeroPage(pVCpu->hwaccm.s.svm.pVMCB);
     228        pVCpu->hm.s.svm.pVMCB     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCB);
     229        pVCpu->hm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCB, 0);
     230        Assert(pVCpu->hm.s.svm.pVMCBPhys < _4G);
     231        ASMMemZeroPage(pVCpu->hm.s.svm.pVMCB);
    232232
    233233        /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
    234         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);
     234        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);
    235235        if (RT_FAILURE(rc))
    236236            return rc;
    237237
    238         pVCpu->hwaccm.s.svm.pMSRBitmap     = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap);
    239         pVCpu->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0);
     238        pVCpu->hm.s.svm.pMSRBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjMSRBitmap);
     239        pVCpu->hm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjMSRBitmap, 0);
    240240        /* Set all bits to intercept all MSR accesses. */
    241         ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);
     241        ASMMemFill32(pVCpu->hm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);
    242242    }
    243243
     
    258258        PVMCPU pVCpu = &pVM->aCpus[i];
    259259
    260         if (pVCpu->hwaccm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)
    261         {
    262             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, false);
    263             pVCpu->hwaccm.s.svm.pVMCBHost       = 0;
    264             pVCpu->hwaccm.s.svm.pVMCBHostPhys   = 0;
    265             pVCpu->hwaccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
    266         }
    267 
    268         if (pVCpu->hwaccm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)
    269         {
    270             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjVMCB, false);
    271             pVCpu->hwaccm.s.svm.pVMCB       = 0;
    272             pVCpu->hwaccm.s.svm.pVMCBPhys   = 0;
    273             pVCpu->hwaccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
    274         }
    275         if (pVCpu->hwaccm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    276         {
    277             RTR0MemObjFree(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, false);
    278             pVCpu->hwaccm.s.svm.pMSRBitmap       = 0;
    279             pVCpu->hwaccm.s.svm.pMSRBitmapPhys   = 0;
    280             pVCpu->hwaccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    281         }
    282     }
    283     if (pVM->hwaccm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)
    284     {
    285         RTR0MemObjFree(pVM->hwaccm.s.svm.pMemObjIOBitmap, false);
    286         pVM->hwaccm.s.svm.pIOBitmap       = 0;
    287         pVM->hwaccm.s.svm.pIOBitmapPhys   = 0;
    288         pVM->hwaccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
     260        if (pVCpu->hm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)
     261        {
     262            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCBHost, false);
     263            pVCpu->hm.s.svm.pVMCBHost       = 0;
     264            pVCpu->hm.s.svm.pVMCBHostPhys   = 0;
     265            pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;
     266        }
     267
     268        if (pVCpu->hm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)
     269        {
     270            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCB, false);
     271            pVCpu->hm.s.svm.pVMCB       = 0;
     272            pVCpu->hm.s.svm.pVMCBPhys   = 0;
     273            pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;
     274        }
     275        if (pVCpu->hm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
     276        {
     277            RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjMSRBitmap, false);
     278            pVCpu->hm.s.svm.pMSRBitmap       = 0;
     279            pVCpu->hm.s.svm.pMSRBitmapPhys   = 0;
     280            pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     281        }
     282    }
     283    if (pVM->hm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)
     284    {
     285        RTR0MemObjFree(pVM->hm.s.svm.pMemObjIOBitmap, false);
     286        pVM->hm.s.svm.pIOBitmap       = 0;
     287        pVM->hm.s.svm.pIOBitmapPhys   = 0;
     288        pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;
    289289    }
    290290    return VINF_SUCCESS;
     
    303303
    304304    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    305     Assert(pVM->hwaccm.s.svm.fSupported);
     305    Assert(pVM->hm.s.svm.fSupported);
    306306
    307307    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    308308    {
    309309        PVMCPU    pVCpu = &pVM->aCpus[i];
    310         SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hwaccm.s.svm.pVMCB;
     310        SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pVMCB;
    311311
    312312        AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
     
    382382
    383383        /* Set IO and MSR bitmap addresses. */
    384         pVMCB->ctrl.u64IOPMPhysAddr  = pVM->hwaccm.s.svm.pIOBitmapPhys;
    385         pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hwaccm.s.svm.pMSRBitmapPhys;
     384        pVMCB->ctrl.u64IOPMPhysAddr  = pVM->hm.s.svm.pIOBitmapPhys;
     385        pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.pMSRBitmapPhys;
    386386
    387387        /* No LBR virtualization. */
     
    399399
    400400        /* If nested paging is not in use, additional intercepts have to be set up. */
    401         if (!pVM->hwaccm.s.fNestedPaging)
     401        if (!pVM->hm.s.fNestedPaging)
    402402        {
    403403            /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */
     
    448448{
    449449    unsigned ulBit;
    450     uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.svm.pMSRBitmap;
     450    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pMSRBitmap;
    451451
    452452    if (ulMSR <= 0x00001FFF)
     
    498498{
    499499#ifdef VBOX_WITH_STATISTICS
    500     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
     500    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
    501501#endif
    502502
     
    539539     * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
    540540     */
    541     if (pVCpu->hwaccm.s.Event.fPending)
     541    if (pVCpu->hm.s.Event.fPending)
    542542    {
    543543        SVM_EVENT Event;
    544544
    545         Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode,
     545        Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode,
    546546             (RTGCPTR)pCtx->rip));
    547         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    548         Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     547        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
     548        Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    549549        hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event);
    550550
    551         pVCpu->hwaccm.s.Event.fPending = false;
     551        pVCpu->hm.s.Event.fPending = false;
    552552        return VINF_SUCCESS;
    553553    }
     
    614614                    /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    615615                    Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    616                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
     616                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    617617                    /* Just continue */
    618618                }
     
    681681            Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
    682682
    683         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntInject);
     683        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
    684684        hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event);
    685685    } /* if (interrupts can be dispatched) */
     
    724724
    725725    /* Setup AMD SVM. */
    726     Assert(pVM->hwaccm.s.svm.fSupported);
    727 
    728     pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     726    Assert(pVM->hm.s.svm.fSupported);
     727
     728    pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    729729    AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    730730
    731731    /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
    732     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
     732    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
    733733    {
    734734        SVM_WRITE_SELREG(CS, cs);
     
    741741
    742742    /* Guest CPU context: LDTR. */
    743     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     743    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
    744744    {
    745745        SVM_WRITE_SELREG(LDTR, ldtr);
     
    747747
    748748    /* Guest CPU context: TR. */
    749     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     749    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
    750750    {
    751751        SVM_WRITE_SELREG(TR, tr);
     
    753753
    754754    /* Guest CPU context: GDTR. */
    755     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     755    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
    756756    {
    757757        pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
     
    760760
    761761    /* Guest CPU context: IDTR. */
    762     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     762    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
    763763    {
    764764        pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
     
    774774
    775775    /* Control registers */
    776     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     776    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    777777    {
    778778        val = pCtx->cr0;
     
    790790
    791791                /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
    792                 if (!pVCpu->hwaccm.s.fFPUOldStyleOverride)
     792                if (!pVCpu->hm.s.fFPUOldStyleOverride)
    793793                {
    794794                    pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
    795                     pVCpu->hwaccm.s.fFPUOldStyleOverride = true;
     795                    pVCpu->hm.s.fFPUOldStyleOverride = true;
    796796                }
    797797            }
     
    806806         * translation will remain active.
    807807         */
    808         if (!pVM->hwaccm.s.fNestedPaging)
     808        if (!pVM->hm.s.fNestedPaging)
    809809        {
    810810            val |= X86_CR0_PG;  /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
     
    816816    pVMCB->guest.u64CR2 = pCtx->cr2;
    817817
    818     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
     818    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
    819819    {
    820820        /* Save our shadow CR3 register. */
    821         if (pVM->hwaccm.s.fNestedPaging)
     821        if (pVM->hm.s.fNestedPaging)
    822822        {
    823823            PGMMODE enmShwPagingMode;
     
    841841    }
    842842
    843     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     843    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
    844844    {
    845845        val = pCtx->cr4;
    846         if (!pVM->hwaccm.s.fNestedPaging)
    847         {
    848             switch (pVCpu->hwaccm.s.enmShadowMode)
     846        if (!pVM->hm.s.fNestedPaging)
     847        {
     848            switch (pVCpu->hm.s.enmShadowMode)
    849849            {
    850850                case PGMMODE_REAL:
     
    881881
    882882    /* Debug registers. */
    883     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     883    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
    884884    {
    885885        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    916916            && !DBGFIsStepping(pVCpu))
    917917        {
    918             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
     918            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    919919
    920920            /* Disable drx move intercepts. */
     
    948948        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    949949#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    950         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
     950        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
    951951#else
    952952# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    953         if (!pVM->hwaccm.s.fAllow64BitGuests)
     953        if (!pVM->hm.s.fAllow64BitGuests)
    954954            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    955955# endif
    956         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun64;
    957 #endif
    958         /* Unconditionally update these as wrmsr might have changed them. (HWACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
     956        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64;
     957#endif
     958        /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
    959959        pVMCB->guest.FS.u64Base    = pCtx->fs.u64Base;
    960960        pVMCB->guest.GS.u64Base    = pCtx->gs.u64Base;
     
    965965        pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;
    966966
    967         pVCpu->hwaccm.s.svm.pfnVMRun = SVMR0VMRun;
     967        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun;
    968968    }
    969969
     
    976976            pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
    977977            pVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
    978             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
     978            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset);
    979979        }
    980980        else
     
    986986            pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    987987            pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    988             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow);
     988            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
    989989        }
    990990    }
     
    993993        pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    994994        pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
    995         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
     995        STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept);
    996996    }
    997997
     
    10131013
    10141014    /* Done. */
    1015     pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     1015    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
    10161016
    10171017    return VINF_SUCCESS;
     
    10321032    AssertPtr(pVCpu);
    10331033
    1034     SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
    1035     pCpu = HWACCMR0GetCurrentCpu();
     1034    SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
     1035    pCpu = HMR0GetCurrentCpu();
    10361036
    10371037    /*
     
    10421042     */
    10431043    bool fNewASID = false;
    1044     if (    pVCpu->hwaccm.s.idLastCpu   != pCpu->idCpu
    1045         ||  pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1046     {
    1047         pVCpu->hwaccm.s.fForceTLBFlush = true;
     1044    if (    pVCpu->hm.s.idLastCpu   != pCpu->idCpu
     1045        ||  pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1046    {
     1047        pVCpu->hm.s.fForceTLBFlush = true;
    10481048        fNewASID = true;
    10491049    }
     
    10521052     * Set TLB flush state as checked until we return from the world switch.
    10531053     */
    1054     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
     1054    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    10551055
    10561056    /*
     
    10581058     */
    10591059    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    1060         pVCpu->hwaccm.s.fForceTLBFlush = true;
    1061 
    1062     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
     1060        pVCpu->hm.s.fForceTLBFlush = true;
     1061
     1062    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
    10631063    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;
    10641064
    1065     if (RT_UNLIKELY(pVM->hwaccm.s.svm.fAlwaysFlushTLB))
     1065    if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB))
    10661066    {
    10671067        /*
     
    10691069         */
    10701070        pCpu->uCurrentASID               = 1;
    1071         pVCpu->hwaccm.s.uCurrentASID     = 1;
    1072         pVCpu->hwaccm.s.cTLBFlushes      = pCpu->cTLBFlushes;
     1071        pVCpu->hm.s.uCurrentASID     = 1;
     1072        pVCpu->hm.s.cTLBFlushes      = pCpu->cTLBFlushes;
    10731073        pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;
    10741074    }
    1075     else if (pVCpu->hwaccm.s.fForceTLBFlush)
     1075    else if (pVCpu->hm.s.fForceTLBFlush)
    10761076    {
    10771077        if (fNewASID)
     
    10791079            ++pCpu->uCurrentASID;
    10801080            bool fHitASIDLimit = false;
    1081             if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     1081            if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    10821082            {
    10831083                pCpu->uCurrentASID        = 1;  /* start at 1; host uses 0 */
     
    10851085                fHitASIDLimit             = true;
    10861086
    1087                 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1087                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    10881088                {
    10891089                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
     
    11001100                && pCpu->fFlushASIDBeforeUse)
    11011101            {
    1102                 if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1102                if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    11031103                    pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    11041104                else
     
    11091109            }
    11101110
    1111             pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
    1112             pVCpu->hwaccm.s.cTLBFlushes  = pCpu->cTLBFlushes;
     1111            pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
     1112            pVCpu->hm.s.cTLBFlushes  = pCpu->cTLBFlushes;
    11131113        }
    11141114        else
    11151115        {
    1116             if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
     1116            if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)
    11171117                pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;
    11181118            else
     
    11201120        }
    11211121
    1122         pVCpu->hwaccm.s.fForceTLBFlush = false;
     1122        pVCpu->hm.s.fForceTLBFlush = false;
    11231123    }
    11241124    else
    11251125    {
    11261126        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    1127          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     1127         *        not be executed. See hmQueueInvlPage() where it is commented
    11281128         *        out. Support individual entry flushing someday. */
    11291129        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    11301130        {
    11311131            /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
    1132             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    1133             for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    1134                 SVMR0InvlpgA(pVCpu->hwaccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
    1135         }
    1136     }
    1137 
    1138     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     1132            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
     1133            for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     1134                SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);
     1135        }
     1136    }
     1137
     1138    pVCpu->hm.s.TlbShootdown.cPages = 0;
    11391139    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    11401140
    11411141    /* Update VMCB with the ASID. */
    1142     pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
    1143 
    1144     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    1145               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1146     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     1142    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID;
     1143
     1144    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     1145              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1146    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    11471147              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1148     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    1149               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1148    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     1149              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
    11501150
    11511151#ifdef VBOX_WITH_STATISTICS
    11521152    if (pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)
    1153         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     1153        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    11541154    else if (   pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT
    11551155             || pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
    11561156    {
    1157         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
     1157        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
    11581158    }
    11591159    else
    1160         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     1160        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    11611161#endif
    11621162}
     
    11731173VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    11741174{
    1175     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatEntry, x);
    1176     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit1);
    1177     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit2);
     1175    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     1176    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     1177    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    11781178
    11791179    VBOXSTRICTRC    rc = VINF_SUCCESS;
     
    11941194#endif
    11951195
    1196     pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     1196    pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    11971197    AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    11981198
     
    12011201     */
    12021202ResumeExecution:
    1203     if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hwaccm.s.StatEntry))
    1204         STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);
    1205     Assert(!HWACCMR0SuspendPending());
     1203    if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
     1204        STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
     1205    Assert(!HMR0SuspendPending());
    12061206
    12071207    /*
    12081208     * Safety precaution; looping for too long here can have a very bad effect on the host.
    12091209     */
    1210     if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    1211     {
    1212         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     1210    if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
     1211    {
     1212        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    12131213        rc = VINF_EM_RAW_INTERRUPT;
    12141214        goto end;
     
    12561256     * Check for pending actions that force us to go back to ring-3.
    12571257     */
    1258     if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
     1258    if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    12591259        ||  VMCPU_FF_ISPENDING(pVCpu,
    1260                                  VMCPU_FF_HWACCM_TO_R3_MASK
     1260                                 VMCPU_FF_HM_TO_R3_MASK
    12611261                               | VMCPU_FF_PGM_SYNC_CR3
    12621262                               | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
     
    12801280#endif
    12811281        {
    1282             if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
    1283                 ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     1282            if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
     1283                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    12841284            {
    1285                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
     1285                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
    12861286                rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    12871287                goto end;
     
    13261326    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    13271327    {
    1328         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     1328        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
    13291329        rc = VINF_EM_RAW_INTERRUPT;
    13301330        goto end;
     
    13481348    /** @todo query and update the TPR only when it could have been changed (mmio access)
    13491349     */
    1350     if (pVM->hwaccm.s.fHasIoApic)
     1350    if (pVM->hm.s.fHasIoApic)
    13511351    {
    13521352        /* TPR caching in CR8 */
     
    13551355        AssertRC(rc2);
    13561356
    1357         if (pVM->hwaccm.s.fTPRPatchingActive)
     1357        if (pVM->hm.s.fTPRPatchingActive)
    13581358        {
    13591359            /* Our patch code uses LSTAR for TPR caching. */
     
    13991399
    14001400    /* Enable nested paging if necessary (disabled each time after #VMEXIT). */
    1401     pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hwaccm.s.fNestedPaging;
     1401    pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
    14021402
    14031403#ifdef LOG_ENABLED
    1404     pCpu = HWACCMR0GetCurrentCpu();
    1405     if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
    1406         LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu));
    1407     else if (pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    1408         LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1404    pCpu = HMR0GetCurrentCpu();
     1405    if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
     1406        LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu));
     1407    else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     1408        LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
    14091409    else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
    14101410        LogFlow(("Manual TLB flush\n"));
     
    14381438    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    14391439#endif
    1440     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
     1440    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    14411441
    14421442    /* Setup TLB control and ASID in the VMCB. */
     
    14441444
    14451445    /* In case we execute a goto ResumeExecution later on. */
    1446     pVCpu->hwaccm.s.fResumeVM      = true;
    1447     pVCpu->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;
    1448 
    1449     Assert(sizeof(pVCpu->hwaccm.s.svm.pVMCBPhys) == 8);
     1446    pVCpu->hm.s.fResumeVM      = true;
     1447    pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB;
     1448
     1449    Assert(sizeof(pVCpu->hm.s.svm.pVMCBPhys) == 8);
    14501450    Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking);
    1451     Assert(pVMCB->ctrl.u64IOPMPhysAddr  == pVM->hwaccm.s.svm.pIOBitmapPhys);
    1452     Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hwaccm.s.svm.pMSRBitmapPhys);
     1451    Assert(pVMCB->ctrl.u64IOPMPhysAddr  == pVM->hm.s.svm.pIOBitmapPhys);
     1452    Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.pMSRBitmapPhys);
    14531453    Assert(pVMCB->ctrl.u64LBRVirt == 0);
    14541454
     
    14621462     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    14631463     */
    1464     u32HostExtFeatures = pVM->hwaccm.s.cpuid.u32AMDFeatureEDX;
     1464    u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
    14651465    if (    (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    14661466        && !(pVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
    14671467    {
    1468         pVCpu->hwaccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
     1468        pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    14691469        uint64_t u64GuestTSCAux = 0;
    14701470        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     
    14741474
    14751475#ifdef VBOX_WITH_KERNEL_USING_XMM
    1476     hwaccmR0SVMRunWrapXMM(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,
    1477                           pVCpu->hwaccm.s.svm.pfnVMRun);
     1476    hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,
     1477                          pVCpu->hm.s.svm.pfnVMRun);
    14781478#else
    1479     pVCpu->hwaccm.s.svm.pfnVMRun(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);
    1480 #endif
    1481     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    1482     ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
     1479    pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);
     1480#endif
     1481    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
     1482    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
    14831483    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    14841484    if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
     
    14861486        /* Restore host's TSC_AUX. */
    14871487        if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    1488             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.u64HostTSCAux);
     1488            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
    14891489
    14901490        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() +
     
    14931493    TMNotifyEndOfExecution(pVCpu);
    14941494    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    1495     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);
     1495    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    14961496    ASMSetFlags(uOldEFlags);
    14971497#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     
    15101510    if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID))      /* Invalid guest state. */
    15111511    {
    1512         HWACCMDumpRegs(pVM, pVCpu, pCtx);
     1512        HMDumpRegs(pVM, pVCpu, pCtx);
    15131513#ifdef DEBUG
    15141514        Log(("ctrl.u16InterceptRdCRx            %x\n",      pVMCB->ctrl.u16InterceptRdCRx));
     
    17131713     * unless in the nested paging case where CR3 can be changed by the guest.
    17141714     */
    1715     if (   pVM->hwaccm.s.fNestedPaging
     1715    if (   pVM->hm.s.fNestedPaging
    17161716        && pCtx->cr3 != pVMCB->guest.u64CR3)
    17171717    {
     
    17401740
    17411741    /* Check if an injected event was interrupted prematurely. */
    1742     pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
     1742    pVCpu->hm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
    17431743    if (    pVMCB->ctrl.ExitIntInfo.n.u1Valid
    17441744            /* we don't care about 'int xx' as the instruction will be restarted. */
    17451745        &&  pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)
    17461746    {
    1747         Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
     1747        Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
    17481748
    17491749#ifdef LOG_ENABLED
    17501750        SVM_EVENT Event;
    1751         Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     1751        Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    17521752
    17531753        if (    exitCode == SVM_EXIT_EXCEPTION_E
     
    17581758#endif
    17591759
    1760         pVCpu->hwaccm.s.Event.fPending = true;
     1760        pVCpu->hm.s.Event.fPending = true;
    17611761        /* Error code present? (redundant) */
    17621762        if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)
    1763             pVCpu->hwaccm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
     1763            pVCpu->hm.s.Event.errCode  = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;
    17641764        else
    1765             pVCpu->hwaccm.s.Event.errCode  = 0;
     1765            pVCpu->hm.s.Event.errCode  = 0;
    17661766    }
    17671767#ifdef VBOX_WITH_STATISTICS
    17681768    if (exitCode == SVM_EXIT_NPF)
    1769         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
     1769        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF);
    17701770    else
    1771         STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
     1771        STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);
    17721772#endif
    17731773
     
    17751775    if (fSyncTPR)
    17761776    {
    1777         if (pVM->hwaccm.s.fTPRPatchingActive)
     1777        if (pVM->hm.s.fTPRPatchingActive)
    17781778        {
    17791779            if ((pCtx->msrLSTAR & 0xff) != u8LastTPR)
     
    18041804                            pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);
    18051805#endif
    1806     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);
     1806    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    18071807
    18081808    /* Deal with the reason of the VM-exit. */
     
    18271827        case X86_XCPT_DB:
    18281828        {
    1829             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDB);
     1829            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    18301830
    18311831            /* Note that we don't support guest and host-initiated debugging at the same time. */
     
    18611861            {
    18621862                Assert(CPUMIsGuestFPUStateActive(pVCpu));
    1863                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowNM);
     1863                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    18641864
    18651865                /* Continue execution. */
    1866                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     1866                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    18671867
    18681868                goto ResumeExecution;
     
    18701870
    18711871            Log(("Forward #NM fault to the guest\n"));
    1872             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
     1872            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    18731873
    18741874            Event.au64[0]    = 0;
     
    18871887
    18881888#ifdef VBOX_ALWAYS_TRAP_PF
    1889             if (pVM->hwaccm.s.fNestedPaging)
     1889            if (pVM->hm.s.fNestedPaging)
    18901890            {
    18911891                /*
     
    18941894                Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
    18951895                     uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
    1896                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     1896                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    18971897
    18981898                /* Now we must update CR2. */
     
    19101910            }
    19111911#endif
    1912             Assert(!pVM->hwaccm.s.fNestedPaging);
    1913 
    1914 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     1912            Assert(!pVM->hm.s.fNestedPaging);
     1913
     1914#ifdef VBOX_HM_WITH_GUEST_PATCHING
    19151915            /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    1916             if (    pVM->hwaccm.s.fTRPPatchingAllowed
     1916            if (    pVM->hm.s.fTRPPatchingAllowed
    19171917                &&  (uFaultAddress & 0xfff) == 0x080
    19181918                &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    19191919                &&  CPUMGetGuestCPL(pVCpu) == 0
    19201920                &&  !CPUMIsGuestInLongModeEx(pCtx)
    1921                 &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     1921                &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    19221922            {
    19231923                RTGCPHYS GCPhysApicBase, GCPhys;
     
    19301930                {
    19311931                    /* Only attempt to patch the instruction once. */
    1932                     PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     1932                    PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    19331933                    if (!pPatch)
    19341934                    {
    1935                         rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     1935                        rc = VINF_EM_HM_PATCH_TPR_INSTR;
    19361936                        break;
    19371937                    }
     
    19531953                /* We've successfully synced our shadow pages, so let's just continue execution. */
    19541954                Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
    1955                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     1955                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    19561956
    19571957                TRPMResetTrap(pVCpu);
     
    19641964                 */
    19651965                Log2(("Forward page fault to the guest\n"));
    1966                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     1966                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    19671967                /* The error code might have been changed. */
    19681968                errCode = TRPMGetErrorCode(pVCpu);
     
    19941994        case X86_XCPT_MF: /* Floating point exception. */
    19951995        {
    1996             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestMF);
     1996            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    19971997            if (!(pCtx->cr0 & X86_CR0_NE))
    19981998            {
     
    20292029            {
    20302030                case X86_XCPT_GP:
    2031                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
     2031                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    20322032                    Event.n.u1ErrorCodeValid    = 1;
    20332033                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     
    20372037                    break;
    20382038                case X86_XCPT_DE:
    2039                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
     2039                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
    20402040                    break;
    20412041                case X86_XCPT_UD:
    2042                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
     2042                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
    20432043                    break;
    20442044                case X86_XCPT_SS:
    2045                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
     2045                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
    20462046                    Event.n.u1ErrorCodeValid    = 1;
    20472047                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
    20482048                    break;
    20492049                case X86_XCPT_NP:
    2050                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
     2050                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
    20512051                    Event.n.u1ErrorCodeValid    = 1;
    20522052                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     
    20742074        PGMMODE     enmShwPagingMode;
    20752075
    2076         Assert(pVM->hwaccm.s.fNestedPaging);
     2076        Assert(pVM->hm.s.fNestedPaging);
    20772077        LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
    20782078
    2079 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     2079#ifdef VBOX_HM_WITH_GUEST_PATCHING
    20802080        /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    2081         if (    pVM->hwaccm.s.fTRPPatchingAllowed
     2081        if (    pVM->hm.s.fTRPPatchingAllowed
    20822082            &&  (GCPhysFault & PAGE_OFFSET_MASK) == 0x080
    20832083            &&  (   !(errCode & X86_TRAP_PF_P)  /* not present */
     
    20852085            &&  CPUMGetGuestCPL(pVCpu) == 0
    20862086            &&  !CPUMIsGuestInLongModeEx(pCtx)
    2087             &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     2087            &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    20882088        {
    20892089            RTGCPHYS GCPhysApicBase;
     
    20942094            {
    20952095                /* Only attempt to patch the instruction once. */
    2096                 PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     2096                PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    20972097                if (!pPatch)
    20982098                {
    2099                     rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     2099                    rc = VINF_EM_HM_PATCH_TPR_INSTR;
    21002100                    break;
    21012101                }
     
    21532153            /* We've successfully synced our shadow pages, so let's just continue execution. */
    21542154            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
    2155             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     2155            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    21562156
    21572157            TRPMResetTrap(pVCpu);
     
    21862186    case SVM_EXIT_WBINVD:
    21872187    case SVM_EXIT_INVD:                 /* Guest software attempted to execute INVD. */
    2188         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvd);
     2188        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    21892189        /* Skip instruction and continue directly. */
    21902190        pCtx->rip += 2;     /* Note! hardcoded opcode size! */
     
    21952195    {
    21962196        Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax));
    2197         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid);
     2197        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    21982198        rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    21992199        if (rc == VINF_SUCCESS)
     
    22112211    {
    22122212        Log2(("SVM: Rdtsc\n"));
    2213         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
     2213        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    22142214        rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    22152215        if (rc == VINF_SUCCESS)
     
    22262226    {
    22272227        Log2(("SVM: Rdpmc %x\n", pCtx->ecx));
    2228         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc);
     2228        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
    22292229        rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    22302230        if (rc == VINF_SUCCESS)
     
    22412241    {
    22422242        Log2(("SVM: Rdtscp\n"));
    2243         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
     2243        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    22442244        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
    22452245        if (rc == VINF_SUCCESS)
     
    22572257    {
    22582258        Log2(("SVM: invlpg\n"));
    2259         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvlpg);
    2260 
    2261         Assert(!pVM->hwaccm.s.fNestedPaging);
     2259        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
     2260
     2261        Assert(!pVM->hm.s.fNestedPaging);
    22622262
    22632263        /* Truly a pita. Why can't SVM give the same information as VT-x? */
     
    22652265        if (rc == VINF_SUCCESS)
    22662266        {
    2267             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageInvlpg);
     2267            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageInvlpg);
    22682268            goto ResumeExecution;   /* eip already updated */
    22692269        }
     
    22772277    {
    22782278        Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));
    2279         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
     2279        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);
    22802280        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
    22812281
     
    22832283        {
    22842284            case 0:
    2285                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2285                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    22862286                break;
    22872287            case 2:
    22882288                break;
    22892289            case 3:
    2290                 Assert(!pVM->hwaccm.s.fNestedPaging);
    2291                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     2290                Assert(!pVM->hm.s.fNestedPaging);
     2291                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    22922292                break;
    22932293            case 4:
    2294                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     2294                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    22952295                break;
    22962296            case 8:
     
    23152315    {
    23162316        Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0));
    2317         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
     2317        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);
    23182318        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
    23192319        if (rc == VINF_SUCCESS)
     
    23332333    {
    23342334        Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));
    2335         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     2335        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    23362336
    23372337        if (   !DBGFIsStepping(pVCpu)
    23382338            && !CPUMIsHyperDebugStateActive(pVCpu))
    23392339        {
    2340             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     2340            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    23412341
    23422342            /* Disable drx move intercepts. */
     
    23542354        {
    23552355            /* EIP has been updated already. */
    2356             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2356            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    23572357
    23582358            /* Only resume if successful. */
     
    23692369    {
    23702370        Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0));
    2371         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     2371        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    23722372
    23732373        if (!DBGFIsStepping(pVCpu))
    23742374        {
    2375             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     2375            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    23762376
    23772377            /* Disable DRx move intercepts. */
     
    24152415        {
    24162416            /* ins/outs */
    2417             PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
     2417            PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    24182418
    24192419            /* Disassemble manually to deal with segment prefixes. */
     
    24242424                {
    24252425                    Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
    2426                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringWrite);
     2426                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
    24272427                    rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
    24282428                                            (DISCPUMODE)pDis->uAddrMode, uIOSize);
     
    24312431                {
    24322432                    Log2(("IOMInterpretINSEx  %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize));
    2433                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringRead);
     2433                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
    24342434                    rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix,
    24352435                                           (DISCPUMODE)pDis->uAddrMode, uIOSize);
     
    24482448                Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
    24492449                      uIOSize));
    2450                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
     2450                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    24512451                rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
    24522452                if (rc == VINF_IOM_R3_IOPORT_WRITE)
    24532453                {
    2454                     HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2454                    HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
    24552455                                                   uAndVal, uIOSize);
    24562456                }
     
    24602460                uint32_t u32Val = 0;
    24612461
    2462                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIORead);
     2462                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    24632463                rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize);
    24642464                if (IOM_SUCCESS(rc))
     
    24712471                else if (rc == VINF_IOM_R3_IOPORT_READ)
    24722472                {
    2473                     HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2473                    HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
    24742474                                                  uAndVal, uIOSize);
    24752475                }
     
    24932493                    static uint32_t const aIOSize[4] = { 1, 2, 0, 4 };
    24942494
    2495                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
     2495                    STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck);
    24962496                    for (unsigned i = 0; i < 4; i++)
    24972497                    {
     
    25662566    case SVM_EXIT_HLT:
    25672567        /* Check if external interrupts are pending; if so, don't switch back. */
    2568         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     2568        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    25692569        pCtx->rip++;    /* skip hlt */
    25702570        if (EMShouldContinueAfterHalt(pVCpu, pCtx))
     
    25762576    case SVM_EXIT_MWAIT_UNCOND:
    25772577        Log2(("SVM: mwait\n"));
    2578         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMwait);
     2578        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    25792579        rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    25802580        if (    rc == VINF_EM_HALT
     
    25982598        Log2(("SVM: monitor\n"));
    25992599
    2600         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMonitor);
     2600        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    26012601        rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    26022602        if (rc == VINF_SUCCESS)
     
    26442644    {
    26452645        /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
    2646         if (    pVM->hwaccm.s.fTPRPatchingActive
     2646        if (    pVM->hm.s.fTPRPatchingActive
    26472647            &&  pCtx->ecx == MSR_K8_LSTAR
    26482648            &&  pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)
     
    26682668         * so we play safe by completely disassembling the instruction.
    26692669         */
    2670         STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
     2670        STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
    26712671        Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
    26722672        rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
     
    26852685        Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2));
    26862686        if (    !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
    2687             &&  pVCpu->hwaccm.s.Event.fPending)
     2687            &&  pVCpu->hm.s.Event.fPending)
    26882688        {
    26892689            SVM_EVENT Event;
    2690             Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     2690            Event.au64[0] = pVCpu->hm.s.Event.intInfo;
    26912691
    26922692            /* Caused by an injected interrupt. */
    2693             pVCpu->hwaccm.s.Event.fPending = false;
     2693            pVCpu->hm.s.Event.fPending = false;
    26942694            switch (Event.n.u3Type)
    26952695            {
     
    27582758    if (exitCode == SVM_EXIT_INTR)
    27592759    {
    2760         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatPendingHostIrq);
     2760        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    27612761        /* On the next entry we'll only sync the host context. */
    2762         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     2762        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
    27632763    }
    27642764    else
     
    27672767        /** @todo we can do better than this */
    27682768        /* Not in the VINF_PGM_CHANGE_MODE though! */
    2769         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     2769        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    27702770    }
    27712771
     
    27832783#endif
    27842784
    2785     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2, x);
    2786     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    2787     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     2785    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     2786    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     2787    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    27882788    return VBOXSTRICTRC_TODO(rc);
    27892789}
     
    28092809        uint8_t u8Tpr;
    28102810
    2811         PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     2811        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    28122812        if (!pPatch)
    28132813            break;
     
    28152815        switch (pPatch->enmType)
    28162816        {
    2817             case HWACCMTPRINSTR_READ:
     2817            case HMTPRINSTR_READ:
    28182818                /* TPR caching in CR8 */
    28192819                rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending);
     
    28272827                break;
    28282828
    2829             case HWACCMTPRINSTR_WRITE_REG:
    2830             case HWACCMTPRINSTR_WRITE_IMM:
     2829            case HMTPRINSTR_WRITE_REG:
     2830            case HMTPRINSTR_WRITE_IMM:
    28312831                /* Fetch the new TPR value */
    2832                 if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG)
     2832                if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
    28332833                {
    28342834                    uint32_t val;
     
    28652865VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    28662866{
    2867     Assert(pVM->hwaccm.s.svm.fSupported);
    2868 
    2869     LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hwaccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));
    2870     pVCpu->hwaccm.s.fResumeVM = false;
     2867    Assert(pVM->hm.s.svm.fSupported);
     2868
     2869    LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentASID));
     2870    pVCpu->hm.s.fResumeVM = false;
    28712871
    28722872    /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */
    2873     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;
     2873    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_LDTR;
    28742874
    28752875    return VINF_SUCCESS;
     
    28872887VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    28882888{
    2889     SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
    2890 
    2891     Assert(pVM->hwaccm.s.svm.fSupported);
     2889    SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
     2890
     2891    Assert(pVM->hm.s.svm.fSupported);
    28922892
    28932893#ifdef DEBUG
     
    29082908
    29092909        /* Resync the debug registers the next time. */
    2910         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     2910        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    29112911    }
    29122912    else
     
    29792979    if (CPUMGetGuestCodeBits(pVCpu) != 16)
    29802980    {
    2981         PDISSTATE pDis = &pVCpu->hwaccm.s.DisState;
     2981        PDISSTATE pDis = &pVCpu->hm.s.DisState;
    29822982        int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
    29832983        if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG)
     
    30033003VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
    30043004{
    3005     bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
     3005    bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
    30063006
    30073007    /* Skip it if a TLB flush is already pending. */
     
    30123012        Log2(("SVMR0InvalidatePage %RGv\n", GCVirt));
    30133013        AssertReturn(pVM, VERR_INVALID_PARAMETER);
    3014         Assert(pVM->hwaccm.s.svm.fSupported);
    3015 
    3016         pVMCB = (SVM_VMCB *)pVCpu->hwaccm.s.svm.pVMCB;
     3014        Assert(pVM->hm.s.svm.fSupported);
     3015
     3016        pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;
    30173017        AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    30183018
     
    30403040VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    30413041{
    3042     Assert(pVM->hwaccm.s.fNestedPaging);
     3042    Assert(pVM->hm.s.fNestedPaging);
    30433043    /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */
    30443044    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    3045     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBInvlpga);
     3045    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBInvlpga);
    30463046    return VINF_SUCCESS;
    30473047}
     
    30693069    aParam[3] = (uint32_t)(pVMCBPhys >> 32);                /* Param 2: pVMCBPhys - Hi. */
    30703070
    3071     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
     3071    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
    30723072}
    30733073
     
    31053105        CPUMPushHyper(pVCpu, paParam[i]);
    31063106
    3107     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     3107    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    31083108    /* Call switcher. */
    3109     rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
    3110     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     3109    rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     3110    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    31113111
    31123112    ASMSetFlags(uOldEFlags);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r43307 r43387  
    2424#include <VBox/vmm/stam.h>
    2525#include <VBox/dis.h>
    26 #include <VBox/vmm/hwaccm.h>
     26#include <VBox/vmm/hm.h>
    2727#include <VBox/vmm/pgm.h>
    28 #include <VBox/vmm/hwacc_svm.h>
     28#include <VBox/vmm/hm_svm.h>
    2929
    3030RT_C_DECLS_BEGIN
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r43379 r43387  
    2020*   Header Files                                                               *
    2121*******************************************************************************/
    22 #define LOG_GROUP LOG_GROUP_HWACCM
     22#define LOG_GROUP LOG_GROUP_HM
    2323#include <iprt/asm-amd64-x86.h>
    24 #include <VBox/vmm/hwaccm.h>
     24#include <VBox/vmm/hm.h>
    2525#include <VBox/vmm/pgm.h>
    2626#include <VBox/vmm/dbgf.h>
     
    3232#endif
    3333#include <VBox/vmm/tm.h>
    34 #include "HWACCMInternal.h"
     34#include "HMInternal.h"
    3535#include <VBox/vmm/vm.h>
    3636#include <VBox/vmm/pdmapi.h>
     
    7070
    7171#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    72 /** See HWACCMR0A.asm. */
     72/** See HMR0A.asm. */
    7373extern "C" uint32_t g_fVMXIs64bitHost;
    7474#endif
     
    9090
    9191/**
    92  * Updates error from VMCS to HWACCMCPU's lasterror record.
     92 * Updates error from VMCS to HMCPU's lasterror record.
    9393 *
    9494 * @param    pVM            Pointer to the VM.
     
    103103
    104104        VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
    105         pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
    106     }
    107     pVM->hwaccm.s.lLastError = rc;
     105        pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError;
     106    }
     107    pVM->hm.s.lLastError = rc;
    108108}
    109109
     
    130130        {
    131131            /* Set revision dword at the beginning of the VMXON structure. */
    132             *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     132            *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
    133133        }
    134134
     
    165165     */
    166166    if (   pVM
    167         && pVM->hwaccm.s.vmx.fVPID
    168         && (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
     167        && pVM->hm.s.vmx.fVPID
     168        && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))
    169169    {
    170170        hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
     
    224224#endif
    225225
    226     pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
    227 
    228     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     226    pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
     227
     228    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    229229    {
    230230        /* Allocate one page for the APIC physical page (serves for filtering accesses). */
    231         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);
     231        rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);
    232232        AssertRC(rc);
    233233        if (RT_FAILURE(rc))
    234234            return rc;
    235235
    236         pVM->hwaccm.s.vmx.pAPIC     = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjAPIC);
    237         pVM->hwaccm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjAPIC, 0);
    238         ASMMemZero32(pVM->hwaccm.s.vmx.pAPIC, PAGE_SIZE);
     236        pVM->hm.s.vmx.pAPIC     = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjAPIC);
     237        pVM->hm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjAPIC, 0);
     238        ASMMemZero32(pVM->hm.s.vmx.pAPIC, PAGE_SIZE);
    239239    }
    240240    else
    241241    {
    242         pVM->hwaccm.s.vmx.pMemObjAPIC = 0;
    243         pVM->hwaccm.s.vmx.pAPIC       = 0;
    244         pVM->hwaccm.s.vmx.pAPICPhys   = 0;
     242        pVM->hm.s.vmx.pMemObjAPIC = 0;
     243        pVM->hm.s.vmx.pAPIC       = 0;
     244        pVM->hm.s.vmx.pAPICPhys   = 0;
    245245    }
    246246
    247247#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    248248    {
    249         rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);
     249        rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);
    250250        AssertRC(rc);
    251251        if (RT_FAILURE(rc))
    252252            return rc;
    253253
    254         pVM->hwaccm.s.vmx.pScratch     = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjScratch);
    255         pVM->hwaccm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjScratch, 0);
    256 
    257         ASMMemZero32(pVM->hwaccm.s.vmx.pScratch, PAGE_SIZE);
    258         strcpy((char *)pVM->hwaccm.s.vmx.pScratch, "SCRATCH Magic");
    259         *(uint64_t *)(pVM->hwaccm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
     254        pVM->hm.s.vmx.pScratch     = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjScratch);
     255        pVM->hm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjScratch, 0);
     256
     257        ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE);
     258        strcpy((char *)pVM->hm.s.vmx.pScratch, "SCRATCH Magic");
     259        *(uint64_t *)(pVM->hm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
    260260    }
    261261#endif
     
    266266        PVMCPU pVCpu = &pVM->aCpus[i];
    267267
    268         pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
     268        pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
    269269
    270270        /* Allocate one page for the VM control structure (VMCS). */
    271         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);
     271        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);
    272272        AssertRC(rc);
    273273        if (RT_FAILURE(rc))
    274274            return rc;
    275275
    276         pVCpu->hwaccm.s.vmx.pvVMCS     = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVMCS);
    277         pVCpu->hwaccm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVMCS, 0);
    278         ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pvVMCS);
    279 
    280         pVCpu->hwaccm.s.vmx.cr0_mask = 0;
    281         pVCpu->hwaccm.s.vmx.cr4_mask = 0;
     276        pVCpu->hm.s.vmx.pvVMCS     = RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVMCS);
     277        pVCpu->hm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVMCS, 0);
     278        ASMMemZeroPage(pVCpu->hm.s.vmx.pvVMCS);
     279
     280        pVCpu->hm.s.vmx.cr0_mask = 0;
     281        pVCpu->hm.s.vmx.cr4_mask = 0;
    282282
    283283        /* Allocate one page for the virtual APIC page for TPR caching. */
    284         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);
     284        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);
    285285        AssertRC(rc);
    286286        if (RT_FAILURE(rc))
    287287            return rc;
    288288
    289         pVCpu->hwaccm.s.vmx.pbVAPIC     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVAPIC);
    290         pVCpu->hwaccm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, 0);
    291         ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pbVAPIC);
     289        pVCpu->hm.s.vmx.pbVAPIC     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVAPIC);
     290        pVCpu->hm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVAPIC, 0);
     291        ASMMemZeroPage(pVCpu->hm.s.vmx.pbVAPIC);
    292292
    293293        /* Allocate the MSR bitmap if this feature is supported. */
    294         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    295         {
    296             rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);
     294        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     295        {
     296            rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);
    297297            AssertRC(rc);
    298298            if (RT_FAILURE(rc))
    299299                return rc;
    300300
    301             pVCpu->hwaccm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);
    302             pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
    303             memset(pVCpu->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
     301            pVCpu->hm.s.vmx.pMSRBitmap     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjMSRBitmap);
     302            pVCpu->hm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjMSRBitmap, 0);
     303            memset(pVCpu->hm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
    304304        }
    305305
    306306#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    307307        /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
    308         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);
     308        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);
    309309        AssertRC(rc);
    310310        if (RT_FAILURE(rc))
    311311            return rc;
    312312
    313         pVCpu->hwaccm.s.vmx.pGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);
    314         pVCpu->hwaccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);
    315         Assert(!(pVCpu->hwaccm.s.vmx.pGuestMSRPhys & 0xf));
    316         memset(pVCpu->hwaccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
     313        pVCpu->hm.s.vmx.pGuestMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjGuestMSR);
     314        pVCpu->hm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjGuestMSR, 0);
     315        Assert(!(pVCpu->hm.s.vmx.pGuestMSRPhys & 0xf));
     316        memset(pVCpu->hm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
    317317
    318318        /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
    319         rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);
     319        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);
    320320        AssertRC(rc);
    321321        if (RT_FAILURE(rc))
    322322            return rc;
    323323
    324         pVCpu->hwaccm.s.vmx.pHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);
    325         pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);
    326         Assert(!(pVCpu->hwaccm.s.vmx.pHostMSRPhys & 0xf));
    327         memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE);
     324        pVCpu->hm.s.vmx.pHostMSR     = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjHostMSR);
     325        pVCpu->hm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjHostMSR, 0);
     326        Assert(!(pVCpu->hm.s.vmx.pHostMSRPhys & 0xf));
     327        memset(pVCpu->hm.s.vmx.pHostMSR, 0, PAGE_SIZE);
    328328#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    329329
    330330        /* Current guest paging mode. */
    331         pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
     331        pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
    332332
    333333#ifdef LOG_ENABLED
    334         SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hwaccm.s.vmx.pvVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     334        SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hm.s.vmx.pvVMCS, (uint32_t)pVCpu->hm.s.vmx.HCPhysVMCS);
    335335#endif
    336336    }
     
    352352        PVMCPU pVCpu = &pVM->aCpus[i];
    353353
    354         if (pVCpu->hwaccm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)
    355         {
    356             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVMCS, false);
    357             pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
    358             pVCpu->hwaccm.s.vmx.pvVMCS      = 0;
    359             pVCpu->hwaccm.s.vmx.HCPhysVMCS  = 0;
    360         }
    361         if (pVCpu->hwaccm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)
    362         {
    363             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, false);
    364             pVCpu->hwaccm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;
    365             pVCpu->hwaccm.s.vmx.pbVAPIC      = 0;
    366             pVCpu->hwaccm.s.vmx.HCPhysVAPIC  = 0;
    367         }
    368         if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
    369         {
    370             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, false);
    371             pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
    372             pVCpu->hwaccm.s.vmx.pMSRBitmap       = 0;
    373             pVCpu->hwaccm.s.vmx.pMSRBitmapPhys   = 0;
     354        if (pVCpu->hm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)
     355        {
     356            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVMCS, false);
     357            pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
     358            pVCpu->hm.s.vmx.pvVMCS      = 0;
     359            pVCpu->hm.s.vmx.HCPhysVMCS  = 0;
     360        }
     361        if (pVCpu->hm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)
     362        {
     363            RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVAPIC, false);
     364            pVCpu->hm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;
     365            pVCpu->hm.s.vmx.pbVAPIC      = 0;
     366            pVCpu->hm.s.vmx.HCPhysVAPIC  = 0;
     367        }
     368        if (pVCpu->hm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
     369        {
     370            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjMSRBitmap, false);
     371            pVCpu->hm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
     372            pVCpu->hm.s.vmx.pMSRBitmap       = 0;
     373            pVCpu->hm.s.vmx.pMSRBitmapPhys   = 0;
    374374        }
    375375#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    376         if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
    377         {
    378             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, false);
    379             pVCpu->hwaccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
    380             pVCpu->hwaccm.s.vmx.pHostMSR       = 0;
    381             pVCpu->hwaccm.s.vmx.pHostMSRPhys   = 0;
    382         }
    383         if (pVCpu->hwaccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
    384         {
    385             RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, false);
    386             pVCpu->hwaccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
    387             pVCpu->hwaccm.s.vmx.pGuestMSR       = 0;
    388             pVCpu->hwaccm.s.vmx.pGuestMSRPhys   = 0;
     376        if (pVCpu->hm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
     377        {
     378            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjHostMSR, false);
     379            pVCpu->hm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
     380            pVCpu->hm.s.vmx.pHostMSR       = 0;
     381            pVCpu->hm.s.vmx.pHostMSRPhys   = 0;
     382        }
     383        if (pVCpu->hm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
     384        {
     385            RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjGuestMSR, false);
     386            pVCpu->hm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
     387            pVCpu->hm.s.vmx.pGuestMSR       = 0;
     388            pVCpu->hm.s.vmx.pGuestMSRPhys   = 0;
    389389        }
    390390#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    391391    }
    392     if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
    393     {
    394         RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjAPIC, false);
    395         pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
    396         pVM->hwaccm.s.vmx.pAPIC       = 0;
    397         pVM->hwaccm.s.vmx.pAPICPhys   = 0;
     392    if (pVM->hm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
     393    {
     394        RTR0MemObjFree(pVM->hm.s.vmx.pMemObjAPIC, false);
     395        pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
     396        pVM->hm.s.vmx.pAPIC       = 0;
     397        pVM->hm.s.vmx.pAPICPhys   = 0;
    398398    }
    399399#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    400     if (pVM->hwaccm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)
    401     {
    402         ASMMemZero32(pVM->hwaccm.s.vmx.pScratch, PAGE_SIZE);
    403         RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjScratch, false);
    404         pVM->hwaccm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;
    405         pVM->hwaccm.s.vmx.pScratch       = 0;
    406         pVM->hwaccm.s.vmx.pScratchPhys   = 0;
     400    if (pVM->hm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)
     401    {
     402        ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE);
     403        RTR0MemObjFree(pVM->hm.s.vmx.pMemObjScratch, false);
     404        pVM->hm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;
     405        pVM->hm.s.vmx.pScratch       = 0;
     406        pVM->hm.s.vmx.pScratchPhys   = 0;
    407407    }
    408408#endif
     
    424424    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    425425
    426     /* Initialize these always, see hwaccmR3InitFinalizeR0().*/
    427     pVM->hwaccm.s.vmx.enmFlushEPT  = VMX_FLUSH_EPT_NONE;
    428     pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;
     426    /* Initialize these always, see hmR3InitFinalizeR0().*/
     427    pVM->hm.s.vmx.enmFlushEPT  = VMX_FLUSH_EPT_NONE;
     428    pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;
    429429
    430430    /* Determine optimal flush type for EPT. */
    431     if (pVM->hwaccm.s.fNestedPaging)
    432     {
    433         if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
    434         {
    435             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
    436                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;
    437             else if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
    438                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;
     431    if (pVM->hm.s.fNestedPaging)
     432    {
     433        if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
     434        {
     435            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
     436                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;
     437            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
     438                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;
    439439            else
    440440            {
     
    443443                 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution.
    444444                 */
    445                 pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     445                pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
    446446                return VERR_VMX_GENERIC;
    447447            }
     
    452452             * Should never really happen. EPT is supported but INVEPT instruction is not supported.
    453453             */
    454             pVM->hwaccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
     454            pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;
    455455            return VERR_VMX_GENERIC;
    456456        }
     
    458458
    459459    /* Determine optimal flush type for VPID. */
    460     if (pVM->hwaccm.s.vmx.fVPID)
    461     {
    462         if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
    463         {
    464             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    465                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;
    466             else if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
    467                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;
     460    if (pVM->hm.s.vmx.fVPID)
     461    {
     462        if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
     463        {
     464            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
     465                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;
     466            else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
     467                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;
    468468            else
    469469            {
     
    472472                 * We do not handle other flush type combinations, ignore VPID capabilities.
    473473                 */
    474                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     474                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    475475                    Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_INDIV_ADDR supported. Ignoring VPID.\n"));
    476                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
     476                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
    477477                    Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    478                 pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    479                 pVM->hwaccm.s.vmx.fVPID = false;
     478                pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
     479                pVM->hm.s.vmx.fVPID = false;
    480480            }
    481481        }
     
    487487             */
    488488            Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n"));
    489             pVM->hwaccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
    490             pVM->hwaccm.s.vmx.fVPID = false;
     489            pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;
     490            pVM->hm.s.vmx.fVPID = false;
    491491        }
    492492    }
     
    496496        PVMCPU pVCpu = &pVM->aCpus[i];
    497497
    498         AssertPtr(pVCpu->hwaccm.s.vmx.pvVMCS);
     498        AssertPtr(pVCpu->hm.s.vmx.pvVMCS);
    499499
    500500        /* Set revision dword at the beginning of the VMCS structure. */
    501         *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
     501        *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
    502502
    503503        /*
    504504         * Clear and activate the VMCS.
    505505         */
    506         Log(("HCPhysVMCS  = %RHp\n", pVCpu->hwaccm.s.vmx.HCPhysVMCS));
    507         rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     506        Log(("HCPhysVMCS  = %RHp\n", pVCpu->hm.s.vmx.HCPhysVMCS));
     507        rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    508508        if (RT_FAILURE(rc))
    509509            goto vmx_end;
    510510
    511         rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     511        rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    512512        if (RT_FAILURE(rc))
    513513            goto vmx_end;
     
    517517         * Set required bits to one and zero according to the MSR capabilities.
    518518         */
    519         val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
     519        val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    520520        val |=    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT      /* External interrupts */
    521521                | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;         /* Non-maskable interrupts */
     
    524524         * Enable the VMX preemption timer.
    525525         */
    526         if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
     526        if (pVM->hm.s.vmx.fUsePreemptTimer)
    527527            val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
    528         val &= pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
     528        val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    529529
    530530        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val);
     
    535535         * Set required bits to one and zero according to the MSR capabilities.
    536536         */
    537         val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
     537        val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    538538        /* Program which event cause VM-exits and which features we want to use. */
    539539        val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
     
    547547
    548548        /* Without nested paging we should intercept invlpg and cr3 mov instructions. */
    549         if (!pVM->hwaccm.s.fNestedPaging)
     549        if (!pVM->hm.s.fNestedPaging)
    550550        {
    551551            val |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     
    558558         * failure with an invalid control fields error. (combined with some other exit reasons)
    559559         */
    560         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     560        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    561561        {
    562562            /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */
    563563            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;
    564             Assert(pVM->hwaccm.s.vmx.pAPIC);
     564            Assert(pVM->hm.s.vmx.pAPIC);
    565565        }
    566566        else
     
    568568            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
    569569
    570         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    571         {
    572             Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
     570        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     571        {
     572            Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys);
    573573            val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
    574574        }
     
    579579        /* Mask away the bits that the CPU doesn't support */
    580580        /** @todo make sure they don't conflict with the above requirements. */
    581         val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
    582         pVCpu->hwaccm.s.vmx.proc_ctls = val;
     581        val &= pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
     582        pVCpu->hm.s.vmx.proc_ctls = val;
    583583
    584584        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val);
    585585        AssertRC(rc);
    586586
    587         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     587        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    588588        {
    589589            /*
     
    591591             * Set required bits to one and zero according to the MSR capabilities.
    592592             */
    593             val  = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
     593            val  = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
    594594            val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
    595595
    596             if (pVM->hwaccm.s.fNestedPaging)
     596            if (pVM->hm.s.fNestedPaging)
    597597                val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
    598598
    599             if (pVM->hwaccm.s.vmx.fVPID)
     599            if (pVM->hm.s.vmx.fVPID)
    600600                val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
    601601
    602             if (pVM->hwaccm.s.fHasIoApic)
     602            if (pVM->hm.s.fHasIoApic)
    603603                val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
    604604
    605             if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     605            if (pVM->hm.s.vmx.fUnrestrictedGuest)
    606606                val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE;
    607607
    608             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     608            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    609609                val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
    610610
    611611            /* Mask away the bits that the CPU doesn't support */
    612612            /** @todo make sure they don't conflict with the above requirements. */
    613             val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
    614             pVCpu->hwaccm.s.vmx.proc_ctls2 = val;
     613            val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
     614            pVCpu->hm.s.vmx.proc_ctls2 = val;
    615615            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val);
    616616            AssertRC(rc);
     
    656656         * Set the MSR bitmap address.
    657657         */
    658         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    659         {
    660             Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
    661 
    662             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
     658        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
     659        {
     660            Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys);
     661
     662            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.pMSRBitmapPhys);
    663663            AssertRC(rc);
    664664
     
    676676            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
    677677            hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
    678             if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     678            if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    679679                hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true);
    680680        }
     
    684684         * Set the guest & host MSR load/store physical addresses.
    685685         */
    686         Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
    687         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     686        Assert(pVCpu->hm.s.vmx.pGuestMSRPhys);
     687        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys);
    688688        AssertRC(rc);
    689         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
     689        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys);
    690690        AssertRC(rc);
    691         Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
    692         rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hwaccm.s.vmx.pHostMSRPhys);
     691        Assert(pVCpu->hm.s.vmx.pHostMSRPhys);
     692        rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL,  pVCpu->hm.s.vmx.pHostMSRPhys);
    693693        AssertRC(rc);
    694694#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
     
    701701        AssertRC(rc);
    702702
    703         if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    704         {
    705             Assert(pVM->hwaccm.s.vmx.pMemObjAPIC);
     703        if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
     704        {
     705            Assert(pVM->hm.s.vmx.pMemObjAPIC);
    706706            /* Optional */
    707707            rc  = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0);
    708             rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx.HCPhysVAPIC);
    709 
    710             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    711                 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys);
     708            rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVAPIC);
     709
     710            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     711                rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.pAPICPhys);
    712712
    713713            AssertRC(rc);
     
    722722         * VMCS data back to memory.
    723723         */
    724         rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     724        rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    725725        AssertRC(rc);
    726726
     
    728728         * Configure the VMCS read cache.
    729729         */
    730         PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     730        PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    731731
    732732        VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_RIP);
     
    769769        VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_IDT_ERRCODE);
    770770
    771         if (pVM->hwaccm.s.fNestedPaging)
     771        if (pVM->hm.s.fNestedPaging)
    772772        {
    773773            VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_CR3);
     
    782782     * Setup the right TLB function based on CPU capabilities.
    783783     */
    784     if (pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)
    785         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
    786     else if (pVM->hwaccm.s.fNestedPaging)
    787         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
    788     else if (pVM->hwaccm.s.vmx.fVPID)
    789         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
     784    if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID)
     785        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;
     786    else if (pVM->hm.s.fNestedPaging)
     787        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;
     788    else if (pVM->hm.s.vmx.fVPID)
     789        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;
    790790    else
    791         pVM->hwaccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
     791        pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;
    792792
    793793vmx_end:
     
    808808{
    809809    unsigned ulBit;
    810     uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
     810    uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pMSRBitmap;
    811811
    812812    /*
     
    867867
    868868#ifdef VBOX_WITH_STATISTICS
    869     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);
     869    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);
    870870#endif
    871871
     
    892892
    893893    if (    CPUMIsGuestInRealModeEx(pCtx)
    894         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     894        &&  pVM->hm.s.vmx.pRealModeTSS)
    895895    {
    896896        RTGCPHYS GCPhysHandler;
     
    967967        pCtx->eflags.u     &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
    968968
    969         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;
     969        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;
    970970        return VINF_SUCCESS;
    971971    }
     
    998998     * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely).
    999999     */
    1000     if (pVCpu->hwaccm.s.Event.fPending)
    1001     {
    1002         Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hwaccm.s.Event.intInfo,
    1003              pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
    1004         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    1005         rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);
     1000    if (pVCpu->hm.s.Event.fPending)
     1001    {
     1002        Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.intInfo,
     1003             pVCpu->hm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
     1004        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
     1005        rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hm.s.Event.intInfo, 0, pVCpu->hm.s.Event.errCode);
    10061006        AssertRC(rc);
    10071007
    1008         pVCpu->hwaccm.s.Event.fPending = false;
     1008        pVCpu->hm.s.Event.fPending = false;
    10091009        return VINF_SUCCESS;
    10101010    }
     
    10401040            if (!(pCtx->eflags.u32 & X86_EFL_IF))
    10411041            {
    1042                 if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))
     1042                if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))
    10431043                {
    10441044                    LogFlow(("Enable irq window exit!\n"));
    1045                     pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    1046                     rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     1045                    pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
     1046                    rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    10471047                    AssertRC(rc);
    10481048                }
     
    10651065                    /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
    10661066                    Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
    1067                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchGuestIrq);
     1067                    STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
    10681068                    /* Just continue */
    10691069                }
     
    11421142            intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
    11431143
    1144         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntInject);
     1144        STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
    11451145        rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode);
    11461146        AssertRC(rc);
     
    11661166     * Host CPU Context.
    11671167     */
    1168     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     1168    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
    11691169    {
    11701170        RTIDTR      idtr;
     
    11851185        if (VMX_IS_64BIT_HOST_MODE())
    11861186        {
    1187             cr3 = hwaccmR0Get64bitCR3();
     1187            cr3 = hmR0Get64bitCR3();
    11881188            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3,     cr3);
    11891189        }
     
    12501250        {
    12511251            X86XDTR64 gdtr64, idtr64;
    1252             hwaccmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);
     1252            hmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);
    12531253            rc  = VMXWriteVMCS64(VMX_VMCS_HOST_GDTR_BASE, gdtr64.uAddr);
    12541254            rc |= VMXWriteVMCS64(VMX_VMCS_HOST_IDTR_BASE, gdtr64.uAddr);
     
    13561356         * the world switch back to the host.
    13571357         */
    1358         PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
     1358        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pHostMSR;
    13591359        unsigned idxMsr = 0;
    13601360
     
    14041404# endif
    14051405
    1406         if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     1406        if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    14071407        {
    14081408            pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
     
    14181418#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    14191419
    1420         pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
     1420        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
    14211421    }
    14221422    return rc;
     
    15151515     */
    15161516    /** @todo NP state won't change so maybe we should build the initial trap mask up front? */
    1517     if (!pVM->hwaccm.s.fNestedPaging)
     1517    if (!pVM->hm.s.fNestedPaging)
    15181518        u32TrapMask |= RT_BIT(X86_XCPT_PF);
    15191519
     
    15311531    /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */
    15321532    if (    CPUMIsGuestInRealModeEx(pCtx)
    1533         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1533        &&  pVM->hm.s.vmx.pRealModeTSS)
    15341534    {
    15351535        u32TrapMask |=   RT_BIT(X86_XCPT_DE)
     
    15721572    X86EFLAGS   eflags;
    15731573
    1574     Assert(!(pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST));
     1574    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST));
    15751575
    15761576    /*
     
    15921592     */
    15931593    if (    CPUMIsGuestInRealModeEx(pCtx)
    1594         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
    1595     {
    1596         pVCpu->hwaccm.s.vmx.RealMode.eflags = eflags;
     1594        &&  pVM->hm.s.vmx.pRealModeTSS)
     1595    {
     1596        pVCpu->hm.s.vmx.RealMode.eflags = eflags;
    15971597
    15981598        eflags.Bits.u1VM   = 1;
     
    16231623     * Set required bits to one and zero according to the MSR capabilities.
    16241624     */
    1625     val  = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
     1625    val  = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
    16261626
    16271627    /*
     
    16381638     * Mask away the bits that the CPU doesn't support.
    16391639     */
    1640     val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
     1640    val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    16411641    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
    16421642    AssertRC(rc);
     
    16461646     * Set required bits to one and zero according to the MSR capabilities.
    16471647     */
    1648     val  = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
     1648    val  = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
    16491649
    16501650    /*
     
    16641664        Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64));
    16651665#endif
    1666     val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
     1666    val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
    16671667
    16681668    /*
     
    16751675     * Guest CPU context: ES, CS, SS, DS, FS, GS.
    16761676     */
    1677     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
    1678     {
    1679         if (pVM->hwaccm.s.vmx.pRealModeTSS)
     1677    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
     1678    {
     1679        if (pVM->hm.s.vmx.pRealModeTSS)
    16801680        {
    16811681            PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
    1682             if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
     1682            if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
    16831683            {
    16841684                /*
    16851685                 * Correct weird requirements for switching to protected mode.
    16861686                 */
    1687                 if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
     1687                if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    16881688                    &&  enmGuestMode >= PGMMODE_PROTECTED)
    16891689                {
     
    17081708                    pCtx->ss.Attr.n.u2Dpl  = 0;
    17091709                }
    1710                 pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
     1710                pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
    17111711            }
    17121712            else if (   CPUMIsGuestInRealModeEx(pCtx)
     
    17411741     * Guest CPU context: LDTR.
    17421742     */
    1743     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
     1743    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
    17441744    {
    17451745        if (pCtx->ldtr.Sel == 0)
     
    17641764     * Guest CPU context: TR.
    17651765     */
    1766     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
     1766    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
    17671767    {
    17681768        /*
     
    17711771         */
    17721772        if (    CPUMIsGuestInRealModeEx(pCtx)
    1773             &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1773            &&  pVM->hm.s.vmx.pRealModeTSS)
    17741774        {
    17751775            RTGCPHYS GCPhys;
    17761776
    17771777            /* We convert it here every time as PCI regions could be reconfigured. */
    1778             rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
     1778            rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
    17791779            AssertRC(rc);
    17801780
    17811781            rc =  VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR,         0);
    1782             rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT,         HWACCM_VTX_TSS_SIZE);
     1782            rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT,         HM_VTX_TSS_SIZE);
    17831783            rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE,          GCPhys /* phys = virt in this mode */);
    17841784
     
    18171817     * Guest CPU context: GDTR.
    18181818     */
    1819     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
     1819    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
    18201820    {
    18211821        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT,       pCtx->gdtr.cbGdt);
     
    18271827     * Guest CPU context: IDTR.
    18281828     */
    1829     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
     1829    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
    18301830    {
    18311831        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT,       pCtx->idtr.cbIdt);
     
    18371837     * Sysenter MSRs.
    18381838     */
    1839     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)
     1839    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
    18401840    {
    18411841        rc  = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS,    pCtx->SysEnter.cs);
     
    18481848     * Guest CPU context: Control registers.
    18491849     */
    1850     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
     1850    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
    18511851    {
    18521852        val = pCtx->cr0;
     
    18671867        }
    18681868        /* Protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
    1869         if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1869        if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    18701870            val |= X86_CR0_PE | X86_CR0_PG;
    18711871
    1872         if (pVM->hwaccm.s.fNestedPaging)
     1872        if (pVM->hm.s.fNestedPaging)
    18731873        {
    18741874            if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
    18751875            {
    18761876                /* Disable CR3 read/write monitoring as we don't need it for EPT. */
    1877                 pVCpu->hwaccm.s.vmx.proc_ctls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     1877                pVCpu->hm.s.vmx.proc_ctls &=  ~(  VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    18781878                                                    | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
    18791879            }
     
    18811881            {
    18821882                /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */
    1883                 pVCpu->hwaccm.s.vmx.proc_ctls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
     1883                pVCpu->hm.s.vmx.proc_ctls |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
    18841884                                                 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
    18851885            }
    1886             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     1886            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    18871887            AssertRC(rc);
    18881888        }
     
    19151915            val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_MP;
    19161916
    1917         pVCpu->hwaccm.s.vmx.cr0_mask = val;
     1917        pVCpu->hm.s.vmx.cr0_mask = val;
    19181918
    19191919        rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val);
     
    19221922    }
    19231923
    1924     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
     1924    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
    19251925    {
    19261926        rc  = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW,   pCtx->cr4);
    19271927        Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
    19281928        /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
    1929         val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
    1930 
    1931         if (!pVM->hwaccm.s.fNestedPaging)
    1932         {
    1933             switch (pVCpu->hwaccm.s.enmShadowMode)
     1929        val = pCtx->cr4 | (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
     1930
     1931        if (!pVM->hm.s.fNestedPaging)
     1932        {
     1933            switch (pVCpu->hm.s.enmShadowMode)
    19341934            {
    19351935                case PGMMODE_REAL:          /* Real mode                 -> emulated using v86 mode */
     
    19591959        }
    19601960        else if (   !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    1961                  && !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1961                 && !pVM->hm.s.vmx.fUnrestrictedGuest)
    19621962        {
    19631963            /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */
     
    19711971         */
    19721972        if (    CPUMIsGuestInRealModeEx(pCtx)
    1973             &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     1973            &&  pVM->hm.s.vmx.pRealModeTSS)
    19741974        {
    19751975            val &= ~X86_CR4_VME;
     
    19881988              | X86_CR4_PSE
    19891989              | X86_CR4_VMXE;
    1990         pVCpu->hwaccm.s.vmx.cr4_mask = val;
     1990        pVCpu->hm.s.vmx.cr4_mask = val;
    19911991
    19921992        rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val);
     
    19971997#if 0
    19981998    /* Enable single stepping if requested and CPU supports it. */
    1999     if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
     1999    if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
    20002000        if (DBGFIsStepping(pVCpu))
    20012001        {
    2002             pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    2003             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2002            pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
     2003            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    20042004            AssertRC(rc);
    20052005        }
    20062006#endif
    20072007
    2008     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
    2009     {
    2010         if (pVM->hwaccm.s.fNestedPaging)
     2008    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
     2009    {
     2010        if (pVM->hm.s.fNestedPaging)
    20112011        {
    20122012            Assert(PGMGetHyperCR3(pVCpu));
    2013             pVCpu->hwaccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
    2014 
    2015             Assert(!(pVCpu->hwaccm.s.vmx.GCPhysEPTP & 0xfff));
     2013            pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
     2014
     2015            Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff));
    20162016            /** @todo Check the IA32_VMX_EPT_VPID_CAP MSR for other supported memory types. */
    2017             pVCpu->hwaccm.s.vmx.GCPhysEPTP |=   VMX_EPT_MEMTYPE_WB
     2017            pVCpu->hm.s.vmx.GCPhysEPTP |=   VMX_EPT_MEMTYPE_WB
    20182018                                             | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
    20192019
    2020             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hwaccm.s.vmx.GCPhysEPTP);
     2020            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP);
    20212021            AssertRC(rc);
    20222022
    20232023            if (    !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    2024                 &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     2024                &&  !pVM->hm.s.vmx.fUnrestrictedGuest)
    20252025            {
    20262026                RTGCPHYS GCPhys;
    20272027
    20282028                /* We convert it here every time as PCI regions could be reconfigured. */
    2029                 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    2030                 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable));
     2029                rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
     2030                AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hm.s.vmx.pNonPagingModeEPTPageTable));
    20312031
    20322032                /*
     
    20582058     * Guest CPU context: Debug registers.
    20592059     */
    2060     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
     2060    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
    20612061    {
    20622062        pCtx->dr[6] |= X86_DR6_INIT_VAL;                                          /* set all reserved bits to 1. */
     
    20942094            &&  !DBGFIsStepping(pVCpu))
    20952095        {
    2096             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxArmed);
     2096            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
    20972097
    20982098            /* Disable DRx move intercepts. */
    2099             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    2100             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2099            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     2100            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    21012101            AssertRC(rc);
    21022102
     
    21232123        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    21242124#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    2125         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0SwitcherStartVM64;
     2125        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0SwitcherStartVM64;
    21262126#else
    21272127# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    2128         if (!pVM->hwaccm.s.fAllow64BitGuests)
     2128        if (!pVM->hm.s.fAllow64BitGuests)
    21292129            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    21302130# endif
    2131         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM64;
    2132 #endif
    2133         if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)
     2131        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0StartVM64;
     2132#endif
     2133        if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
    21342134        {
    21352135            /* Update these as wrmsr might have changed them. */
     
    21422142    else
    21432143    {
    2144         pVCpu->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM32;
     2144        pVCpu->hm.s.vmx.pfnStartVM  = VMXR0StartVM32;
    21452145    }
    21462146
     
    21522152     * during VM-entry and restored into the VM-exit store area during VM-exit.
    21532153     */
    2154     PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     2154    PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR;
    21552155    unsigned idxMsr = 0;
    21562156
     
    21962196    }
    21972197
    2198     if (   pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP
     2198    if (   pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP
    21992199        && (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP))
    22002200    {
     
    22062206    }
    22072207
    2208     pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr;
     2208    pVCpu->hm.s.vmx.cCachedMSRs = idxMsr;
    22092209
    22102210    rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
     
    22162216
    22172217    bool fOffsettedTsc;
    2218     if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
    2219     {
    2220         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2218    if (pVM->hm.s.vmx.fUsePreemptTimer)
     2219    {
     2220        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
    22212221
    22222222        /* Make sure the returned values have sane upper and lower boundaries. */
     
    22262226        cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
    22272227
    2228         cTicksToDeadline >>= pVM->hwaccm.s.vmx.cPreemptTimerShift;
     2228        cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
    22292229        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    22302230        rc = VMXWriteVMCS(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount);
     
    22322232    }
    22332233    else
    2234         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2234        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
    22352235
    22362236    if (fOffsettedTsc)
    22372237    {
    22382238        uint64_t u64CurTSC = ASMReadTSC();
    2239         if (u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
     2239        if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
    22402240        {
    22412241            /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    2242             rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hwaccm.s.vmx.u64TSCOffset);
     2242            rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);
    22432243            AssertRC(rc);
    22442244
    2245             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2246             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2245            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2246            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22472247            AssertRC(rc);
    2248             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
     2248            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset);
    22492249        }
    22502250        else
     
    22522252            /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */
    22532253            LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
    2254                      pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,
    2255                      TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset,
     2254                     pVCpu->hm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset,
     2255                     TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset,
    22562256                     TMCpuTickGet(pVCpu)));
    2257             pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2258             rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2257            pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2258            rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22592259            AssertRC(rc);
    2260             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow);
     2260            STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow);
    22612261        }
    22622262    }
    22632263    else
    22642264    {
    2265         pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
    2266         rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     2265        pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
     2266        rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    22672267        AssertRC(rc);
    2268         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCIntercept);
     2268        STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept);
    22692269    }
    22702270
    22712271    /* Done with the major changes */
    2272     pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
     2272    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
    22732273
    22742274    /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */
     
    23182318    VMXReadCachedVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW,     &valShadow);
    23192319    VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR0,              &val);
    2320     val = (valShadow & pVCpu->hwaccm.s.vmx.cr0_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr0_mask);
     2320    val = (valShadow & pVCpu->hm.s.vmx.cr0_mask) | (val & ~pVCpu->hm.s.vmx.cr0_mask);
    23212321    CPUMSetGuestCR0(pVCpu, val);
    23222322
    23232323    VMXReadCachedVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW,     &valShadow);
    23242324    VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR4,              &val);
    2325     val = (valShadow & pVCpu->hwaccm.s.vmx.cr4_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr4_mask);
     2325    val = (valShadow & pVCpu->hm.s.vmx.cr4_mask) | (val & ~pVCpu->hm.s.vmx.cr4_mask);
    23262326    CPUMSetGuestCR4(pVCpu, val);
    23272327
     
    23302330     * the nested paging case where CR3 & CR4 can be changed by the guest.
    23312331     */
    2332     if (   pVM->hwaccm.s.fNestedPaging
     2332    if (   pVM->hm.s.fNestedPaging
    23332333        && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */
    23342334    {
    2335         PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     2335        PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    23362336
    23372337        /* Can be updated behind our back in the nested paging case. */
     
    23842384    /* Real mode emulation using v86 mode. */
    23852385    if (    CPUMIsGuestInRealModeEx(pCtx)
    2386         &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     2386        &&  pVM->hm.s.vmx.pRealModeTSS)
    23872387    {
    23882388        /* Hide our emulation flags */
     
    23902390
    23912391        /* Restore original IOPL setting as we always use 0. */
    2392         pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL;
     2392        pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
    23932393
    23942394        /* Force a TR resync every time in case we switch modes. */
    2395         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;
     2395        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_TR;
    23962396    }
    23972397    else
     
    24052405     * Save the possibly changed MSRs that we automatically restore and save during a world switch.
    24062406     */
    2407     for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++)
    2408     {
    2409         PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
     2407    for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMSRs; i++)
     2408    {
     2409        PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR;
    24102410        pMsr += i;
    24112411
     
    24582458    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
    24592459    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    2460     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2460    pVCpu->hm.s.TlbShootdown.cPages = 0;
    24612461    return;
    24622462}
     
    24732473    PHMGLOBLCPUINFO pCpu;
    24742474
    2475     Assert(pVM->hwaccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID);
    2476 
    2477     pCpu = HWACCMR0GetCurrentCpu();
     2475    Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID);
     2476
     2477    pCpu = HMR0GetCurrentCpu();
    24782478
    24792479    /*
     
    24842484     */
    24852485    bool fNewASID = false;
    2486     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2487         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    2488     {
    2489         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2486    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2487        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2488    {
     2489        pVCpu->hm.s.fForceTLBFlush = true;
    24902490        fNewASID = true;
    24912491    }
     
    24952495     */
    24962496    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2497         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2498 
    2499     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    2500 
    2501     if (pVCpu->hwaccm.s.fForceTLBFlush)
     2497        pVCpu->hm.s.fForceTLBFlush = true;
     2498
     2499    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2500
     2501    if (pVCpu->hm.s.fForceTLBFlush)
    25022502    {
    25032503        if (fNewASID)
    25042504        {
    25052505            ++pCpu->uCurrentASID;
    2506             if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     2506            if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    25072507            {
    25082508                pCpu->uCurrentASID = 1;       /* start at 1; host uses 0 */
     
    25112511            }
    25122512
    2513             pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;
     2513            pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID;
    25142514            if (pCpu->fFlushASIDBeforeUse)
    25152515            {
    2516                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2516                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    25172517#ifdef VBOX_WITH_STATISTICS
    2518                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
     2518                STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
    25192519#endif
    25202520            }
     
    25222522        else
    25232523        {
    2524             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
     2524            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    25252525                hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
    25262526            else
    2527                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2527                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    25282528
    25292529#ifdef VBOX_WITH_STATISTICS
     
    25322532             * as ASID flushes too, better than including them under StatFlushTLBWorldSwitch.
    25332533             */
    2534             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
    2535 #endif
    2536         }
    2537 
    2538         pVCpu->hwaccm.s.cTLBFlushes    = pCpu->cTLBFlushes;
    2539         pVCpu->hwaccm.s.fForceTLBFlush = false;
     2534            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
     2535#endif
     2536        }
     2537
     2538        pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
     2539        pVCpu->hm.s.fForceTLBFlush = false;
    25402540    }
    25412541    else
    25422542    {
    2543         AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
    2544                   ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2545                    pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
     2543        AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
     2544                  ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
     2545                   pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    25462546                   pCpu->uCurrentASID, pCpu->cTLBFlushes));
    25472547
    25482548        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2549          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2549         *        not be executed. See hmQueueInvlPage() where it is commented
    25502550         *        out. Support individual entry flushing someday. */
    25512551        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
    25522552        {
    2553             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
     2553            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
    25542554
    25552555            /*
     
    25572557             * as supported by the CPU.
    25582558             */
    2559             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     2559            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    25602560            {
    2561                 for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    2562                     hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
     2561                for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     2562                    hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    25632563            }
    25642564            else
    2565                 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2565                hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    25662566        }
    25672567        else
    25682568        {
    25692569#ifdef VBOX_WITH_STATISTICS
    2570             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
    2571 #endif
    2572         }
    2573     }
    2574     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2570            STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
     2571#endif
     2572        }
     2573    }
     2574    pVCpu->hm.s.TlbShootdown.cPages = 0;
    25752575    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    25762576
    2577     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2578               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2579     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     2577    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     2578              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     2579    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    25802580              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2581     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    2582               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     2581    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     2582              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
    25832583
    25842584    /* Update VMCS with the VPID. */
    2585     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
     2585    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
    25862586    AssertRC(rc);
    25872587}
     
    25992599    PHMGLOBLCPUINFO pCpu;
    26002600
    2601     Assert(pVM->hwaccm.s.fNestedPaging);
    2602     Assert(!pVM->hwaccm.s.vmx.fVPID);
    2603 
    2604     pCpu = HWACCMR0GetCurrentCpu();
     2601    Assert(pVM->hm.s.fNestedPaging);
     2602    Assert(!pVM->hm.s.vmx.fVPID);
     2603
     2604    pCpu = HMR0GetCurrentCpu();
    26052605
    26062606    /*
     
    26092609     * A change in the TLB flush count implies the host Cpu is online after a suspend/resume.
    26102610     */
    2611     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2612         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    2613     {
    2614         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2611    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2612        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2613    {
     2614        pVCpu->hm.s.fForceTLBFlush = true;
    26152615    }
    26162616
     
    26192619     */
    26202620    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2621         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2622 
    2623     pVCpu->hwaccm.s.idLastCpu   = pCpu->idCpu;
    2624     pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;
    2625 
    2626     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2627         hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
     2621        pVCpu->hm.s.fForceTLBFlush = true;
     2622
     2623    pVCpu->hm.s.idLastCpu   = pCpu->idCpu;
     2624    pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes;
     2625
     2626    if (pVCpu->hm.s.fForceTLBFlush)
     2627        hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
    26282628    else
    26292629    {
    26302630        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2631          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2631         *        not be executed. See hmQueueInvlPage() where it is commented
    26322632         *        out. Support individual entry flushing someday. */
    26332633        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     
    26362636             * We cannot flush individual entries without VPID support. Flush using EPT.
    26372637             */
    2638             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
    2639             hmR0VmxFlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushEPT);
    2640         }
    2641     }
    2642     pVCpu->hwaccm.s.TlbShootdown.cPages= 0;
     2638            STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
     2639            hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT);
     2640        }
     2641    }
     2642    pVCpu->hm.s.TlbShootdown.cPages= 0;
    26432643    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    26442644
    26452645#ifdef VBOX_WITH_STATISTICS
    2646     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2647         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     2646    if (pVCpu->hm.s.fForceTLBFlush)
     2647        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    26482648    else
    2649         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     2649        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    26502650#endif
    26512651}
     
    26632663    PHMGLOBLCPUINFO pCpu;
    26642664
    2665     Assert(pVM->hwaccm.s.vmx.fVPID);
    2666     Assert(!pVM->hwaccm.s.fNestedPaging);
    2667 
    2668     pCpu = HWACCMR0GetCurrentCpu();
     2665    Assert(pVM->hm.s.vmx.fVPID);
     2666    Assert(!pVM->hm.s.fNestedPaging);
     2667
     2668    pCpu = HMR0GetCurrentCpu();
    26692669
    26702670    /*
     
    26742674     * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
    26752675     */
    2676     if (   pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
    2677         || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
     2676    if (   pVCpu->hm.s.idLastCpu != pCpu->idCpu
     2677        || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
    26782678    {
    26792679        /* Force a TLB flush on VM entry. */
    2680         pVCpu->hwaccm.s.fForceTLBFlush = true;
     2680        pVCpu->hm.s.fForceTLBFlush = true;
    26812681    }
    26822682
     
    26852685     */
    26862686    if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
    2687         pVCpu->hwaccm.s.fForceTLBFlush = true;
    2688 
    2689     pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu;
    2690 
    2691     if (pVCpu->hwaccm.s.fForceTLBFlush)
     2687        pVCpu->hm.s.fForceTLBFlush = true;
     2688
     2689    pVCpu->hm.s.idLastCpu = pCpu->idCpu;
     2690
     2691    if (pVCpu->hm.s.fForceTLBFlush)
    26922692    {
    26932693        ++pCpu->uCurrentASID;
    2694         if (pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID)
     2694        if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID)
    26952695        {
    26962696            pCpu->uCurrentASID               = 1;       /* start at 1; host uses 0 */
     
    26992699        }
    27002700        else
    2701             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushASID);
    2702 
    2703         pVCpu->hwaccm.s.fForceTLBFlush = false;
    2704         pVCpu->hwaccm.s.cTLBFlushes    = pCpu->cTLBFlushes;
    2705         pVCpu->hwaccm.s.uCurrentASID   = pCpu->uCurrentASID;
     2701            STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID);
     2702
     2703        pVCpu->hm.s.fForceTLBFlush = false;
     2704        pVCpu->hm.s.cTLBFlushes    = pCpu->cTLBFlushes;
     2705        pVCpu->hm.s.uCurrentASID   = pCpu->uCurrentASID;
    27062706        if (pCpu->fFlushASIDBeforeUse)
    2707             hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2707            hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    27082708    }
    27092709    else
    27102710    {
    2711         AssertMsg(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID,
    2712                   ("hwaccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
    2713                    pVCpu->hwaccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,
     2711        AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID,
     2712                  ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",
     2713                   pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes,
    27142714                   pCpu->uCurrentASID, pCpu->cTLBFlushes));
    27152715
    27162716        /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
    2717          *        not be executed. See hwaccmQueueInvlPage() where it is commented
     2717         *        not be executed. See hmQueueInvlPage() where it is commented
    27182718         *        out. Support individual entry flushing someday. */
    27192719        if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
     
    27232723             * as supported by the CPU.
    27242724             */
    2725             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     2725            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    27262726            {
    2727                 for (unsigned i = 0; i < pVCpu->hwaccm.s.TlbShootdown.cPages; i++)
    2728                     hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
     2727                for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
     2728                    hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
    27292729            }
    27302730            else
    2731                 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
    2732         }
    2733     }
    2734     pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
     2731                hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */);
     2732        }
     2733    }
     2734    pVCpu->hm.s.TlbShootdown.cPages = 0;
    27352735    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
    27362736
    2737     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
    2738               ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    2739     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     2737    AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes,
     2738              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes));
     2739    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,
    27402740              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    2741     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
    2742               ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
    2743 
    2744     int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID);
     2741    AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,
     2742              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));
     2743
     2744    int rc  = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID);
    27452745    AssertRC(rc);
    27462746
    27472747# ifdef VBOX_WITH_STATISTICS
    2748     if (pVCpu->hwaccm.s.fForceTLBFlush)
    2749         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch);
     2748    if (pVCpu->hm.s.fForceTLBFlush)
     2749        STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch);
    27502750    else
    2751         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch);
     2751        STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch);
    27522752# endif
    27532753}
     
    27642764VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    27652765{
    2766     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatEntry, x);
    2767     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit1);
    2768     STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hwaccm.s.StatExit2);
     2766    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     2767    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
     2768    STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
    27692769
    27702770    VBOXSTRICTRC rc = VINF_SUCCESS;
     
    27892789#endif
    27902790
    2791     Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    2792            || (pVCpu->hwaccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC));
     2791    Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2792           || (pVCpu->hm.s.vmx.pbVAPIC && pVM->hm.s.vmx.pAPIC));
    27932793
    27942794    /*
     
    27962796     */
    27972797    if (    CPUMIsGuestInLongModeEx(pCtx)
    2798         || (   ((   pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    2799                  || pVM->hwaccm.s.fTRPPatchingAllowed)
    2800             &&  pVM->hwaccm.s.fHasIoApic)
     2798        || (   ((   pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2799                 || pVM->hm.s.fTRPPatchingAllowed)
     2800            &&  pVM->hm.s.fHasIoApic)
    28012801       )
    28022802    {
     
    28072807
    28082808    /* This is not ideal, but if we don't clear the event injection in the VMCS right here,
    2809      * we may end up injecting some stale event into a VM, including injecting an event that 
     2809     * we may end up injecting some stale event into a VM, including injecting an event that
    28102810     * originated before a VM reset *after* the VM has been reset. See @bugref{6220}.
    28112811     */
     
    28212821
    28222822        /* allowed zero */
    2823         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
     2823        if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
    28242824            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
    28252825
    28262826        /* allowed one */
    2827         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
     2827        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
    28282828            Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
    28292829
     
    28352835         * Must be set according to the MSR, but can be cleared if nested paging is used.
    28362836         */
    2837         if (pVM->hwaccm.s.fNestedPaging)
     2837        if (pVM->hm.s.fNestedPaging)
    28382838        {
    28392839            val2 |=   VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
     
    28432843
    28442844        /* allowed zero */
    2845         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
     2845        if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
    28462846            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
    28472847
    28482848        /* allowed one */
    2849         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
     2849        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
    28502850            Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
    28512851
     
    28552855
    28562856        /* allowed zero */
    2857         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0)
     2857        if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0)
    28582858            Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
    28592859
    28602860        /* allowed one */
    2861         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
     2861        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
    28622862            Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
    28632863
     
    28672867
    28682868        /* allowed zero */
    2869         if ((val2 & pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0)
     2869        if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0)
    28702870            Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
    28712871
    28722872        /* allowed one */
    2873         if ((val2 & ~pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
     2873        if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
    28742874            Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
    28752875    }
     
    28782878
    28792879#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    2880     pVCpu->hwaccm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();
     2880    pVCpu->hm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();
    28812881#endif
    28822882
     
    28852885     */
    28862886ResumeExecution:
    2887     if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hwaccm.s.StatEntry))
    2888         STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);
    2889     AssertMsg(pVCpu->hwaccm.s.idEnteredCpu == RTMpCpuId(),
     2887    if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
     2888        STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
     2889    AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
    28902890              ("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n",
    2891                (int)pVCpu->hwaccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
    2892     Assert(!HWACCMR0SuspendPending());
     2891               (int)pVCpu->hm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
     2892    Assert(!HMR0SuspendPending());
    28932893    /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */
    28942894    Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx));
     
    28972897     * Safety precaution; looping for too long here can have a very bad effect on the host.
    28982898     */
    2899     if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    2900     {
    2901         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMaxResume);
     2899    if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
     2900    {
     2901        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
    29022902        rc = VINF_EM_RAW_INTERRUPT;
    29032903        goto end;
     
    29472947     * Check for pending actions that force us to go back to ring-3.
    29482948     */
    2949     if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    2950         ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
     2949    if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
     2950        ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
    29512951    {
    29522952        /* Check if a sync operation is pending. */
     
    29672967#endif
    29682968        {
    2969             if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK)
    2970                 ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK))
     2969            if (    VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
     2970                ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
    29712971            {
    2972                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatSwitchToR3);
     2972                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
    29732973                rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
    29742974                goto end;
     
    30133013    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
    30143014    {
    3015         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPreemptPending);
     3015        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
    30163016        rc = VINF_EM_RAW_INTERRUPT;
    30173017        goto end;
     
    30453045        AssertRC(rc2);
    30463046        /* The TPR can be found at offset 0x80 in the APIC mmio page. */
    3047         pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = u8LastTPR;
     3047        pVCpu->hm.s.vmx.pbVAPIC[0x80] = u8LastTPR;
    30483048
    30493049        /*
     
    30593059        AssertRC(VBOXSTRICTRC_VAL(rc));
    30603060
    3061         if (pVM->hwaccm.s.fTPRPatchingActive)
     3061        if (pVM->hm.s.fTPRPatchingActive)
    30623062        {
    30633063            Assert(!CPUMIsGuestInLongModeEx(pCtx));
     
    30833083
    30843084#ifdef LOG_ENABLED
    3085     if (    pVM->hwaccm.s.fNestedPaging
    3086         ||  pVM->hwaccm.s.vmx.fVPID)
    3087     {
    3088         PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu();
    3089         if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu)
    3090         {
    3091             LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu,
     3085    if (    pVM->hm.s.fNestedPaging
     3086        ||  pVM->hm.s.vmx.fVPID)
     3087    {
     3088        PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
     3089        if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
     3090        {
     3091            LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu,
    30923092                     pCpu->idCpu));
    30933093        }
    3094         else if (pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
    3095         {
    3096             LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes,
     3094        else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes)
     3095        {
     3096            LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes,
    30973097                     pCpu->cTLBFlushes));
    30983098        }
     
    31193119     * Save the host state first.
    31203120     */
    3121     if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
     3121    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
    31223122    {
    31233123        rc  = VMXR0SaveHostState(pVM, pVCpu);
     
    31323132     * Load the guest state.
    31333133     */
    3134     if (!pVCpu->hwaccm.s.fContextUseFlags)
     3134    if (!pVCpu->hm.s.fContextUseFlags)
    31353135    {
    31363136        VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx);
    3137         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatLoadMinimal);
     3137        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
    31383138    }
    31393139    else
     
    31453145            goto end;
    31463146        }
    3147         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatLoadFull);
     3147        STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
    31483148    }
    31493149
     
    31633163
    31643164    /* Set TLB flush state as checked until we return from the world switch. */
    3165     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, true);
     3165    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
    31663166    /* Deal with tagged TLB setup and invalidation. */
    3167     pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
     3167    pVM->hm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);
    31683168
    31693169    /*
     
    31803180
    31813181    /* All done! Let's start VM execution. */
    3182     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
     3182    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
    31833183    Assert(idCpuCheck == RTMpCpuId());
    31843184
    31853185#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    3186     pVCpu->hwaccm.s.vmx.VMCSCache.cResume = cResume;
    3187     pVCpu->hwaccm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
     3186    pVCpu->hm.s.vmx.VMCSCache.cResume = cResume;
     3187    pVCpu->hm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
    31883188#endif
    31893189
     
    31913191     * Save the current TPR value in the LSTAR MSR so our patches can access it.
    31923192     */
    3193     if (pVM->hwaccm.s.fTPRPatchingActive)
    3194     {
    3195         Assert(pVM->hwaccm.s.fTPRPatchingActive);
     3193    if (pVM->hm.s.fTPRPatchingActive)
     3194    {
     3195        Assert(pVM->hm.s.fTPRPatchingActive);
    31963196        u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR);
    31973197        ASMWrMsr(MSR_K8_LSTAR, u8LastTPR);
     
    32053205     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    32063206     */
    3207     if (    (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3208         && !(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    3209     {
    3210         pVCpu->hwaccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
     3207    if (    (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     3208        && !(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3209    {
     3210        pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
    32113211        uint64_t u64GuestTSCAux = 0;
    32123212        rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
     
    32173217
    32183218#ifdef VBOX_WITH_KERNEL_USING_XMM
    3219     rc = hwaccmR0VMXStartVMWrapXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);
     3219    rc = hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
    32203220#else
    3221     rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);
    3222 #endif
    3223     ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    3224     ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
     3221    rc = pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
     3222#endif
     3223    ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
     3224    ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
    32253225
    32263226    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    3227     if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
     3227    if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
    32283228    {
    32293229#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    32303230        /* Restore host's TSC_AUX. */
    3231         if (pVCpu->hwaccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    3232             ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hwaccm.s.u64HostTSCAux);
     3231        if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     3232            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
    32333233#endif
    32343234
    32353235        TMCpuTickSetLastSeen(pVCpu,
    3236                              ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     3236                             ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
    32373237    }
    32383238
     
    32443244     * Restore the host LSTAR MSR if the guest could have changed it.
    32453245     */
    3246     if (pVM->hwaccm.s.fTPRPatchingActive)
    3247     {
    3248         Assert(pVM->hwaccm.s.fTPRPatchingActive);
    3249         pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
     3246    if (pVM->hm.s.fTPRPatchingActive)
     3247    {
     3248        Assert(pVM->hm.s.fTPRPatchingActive);
     3249        pVCpu->hm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
    32503250        ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR);
    32513251    }
    32523252
    3253     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);
     3253    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
    32543254    ASMSetFlags(uOldEFlags);
    32553255#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     
    32573257#endif
    32583258
    3259     AssertMsg(!pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",
    3260                                                                    pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries));
     3259    AssertMsg(!pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",
     3260                                                                   pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries));
    32613261
    32623262    /* In case we execute a goto ResumeExecution later on. */
    3263     pVCpu->hwaccm.s.fResumeVM  = true;
    3264     pVCpu->hwaccm.s.fForceTLBFlush = false;
     3263    pVCpu->hm.s.fResumeVM  = true;
     3264    pVCpu->hm.s.fForceTLBFlush = false;
    32653265
    32663266    /*
     
    32813281    /* Investigate why there was a VM-exit. */
    32823282    rc2  = VMXReadCachedVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
    3283     STAM_COUNTER_INC(&pVCpu->hwaccm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
     3283    STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
    32843284
    32853285    exitReason &= 0xffff;   /* bit 0-15 contain the exit code. */
     
    33113311    rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO,            &val);
    33123312    AssertRC(rc2);
    3313     pVCpu->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
    3314     if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     3313    pVCpu->hm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
     3314    if (    VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo)
    33153315        /* Ignore 'int xx' as they'll be restarted anyway. */
    3316         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW
     3316        &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW
    33173317        /* Ignore software exceptions (such as int3) as they'll reoccur when we restart the instruction anyway. */
    3318         &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
    3319     {
    3320         Assert(!pVCpu->hwaccm.s.Event.fPending);
    3321         pVCpu->hwaccm.s.Event.fPending = true;
     3318        &&  VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
     3319    {
     3320        Assert(!pVCpu->hm.s.Event.fPending);
     3321        pVCpu->hm.s.Event.fPending = true;
    33223322        /* Error code present? */
    3323         if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo))
     3323        if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo))
    33243324        {
    33253325            rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_ERRCODE, &val);
    33263326            AssertRC(rc2);
    3327             pVCpu->hwaccm.s.Event.errCode  = val;
     3327            pVCpu->hm.s.Event.errCode  = val;
    33283328            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n",
    3329                  pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
     3329                 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
    33303330        }
    33313331        else
    33323332        {
    3333             Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hwaccm.s.Event.intInfo,
     3333            Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hm.s.Event.intInfo,
    33343334                 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    3335             pVCpu->hwaccm.s.Event.errCode  = 0;
     3335            pVCpu->hm.s.Event.errCode  = 0;
    33363336        }
    33373337    }
    33383338#ifdef VBOX_STRICT
    3339     else if (   VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo)
     3339    else if (   VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo)
    33403340                /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */
    3341              && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
     3341             && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)
    33423342    {
    33433343        Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n",
    3344              pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
     3344             pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
    33453345    }
    33463346
    33473347    if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
    3348         HWACCMDumpRegs(pVM, pVCpu, pCtx);
     3348        HMDumpRegs(pVM, pVCpu, pCtx);
    33493349#endif
    33503350
     
    33593359     */
    33603360    if (    fSetupTPRCaching
    3361         &&  u8LastTPR != pVCpu->hwaccm.s.vmx.pbVAPIC[0x80])
    3362     {
    3363         rc2 = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.pbVAPIC[0x80]);
     3361        &&  u8LastTPR != pVCpu->hm.s.vmx.pbVAPIC[0x80])
     3362    {
     3363        rc2 = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVAPIC[0x80]);
    33643364        AssertRC(rc2);
    33653365    }
     
    33693369                      exitReason, (uint64_t)exitQualification, pCtx->cs.Sel, pCtx->rip, (uint64_t)intInfo);
    33703370#endif
    3371     STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);
     3371    STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
    33723372
    33733373    /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
     
    33923392            break;
    33933393        }
    3394         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3394        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub3, y3);
    33953395        switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
    33963396        {
     
    34233423                    Assert(CPUMIsGuestFPUStateActive(pVCpu));
    34243424
    3425                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowNM);
     3425                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
    34263426
    34273427                    /* Continue execution. */
    3428                     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    3429 
    3430                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3428                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     3429
     3430                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34313431                    goto ResumeExecution;
    34323432                }
    34333433
    34343434                Log(("Forward #NM fault to the guest\n"));
    3435                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNM);
     3435                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
    34363436                rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
    34373437                                         cbInstr, 0);
    34383438                AssertRC(rc2);
    3439                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3439                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34403440                goto ResumeExecution;
    34413441            }
     
    34443444            {
    34453445#ifdef VBOX_ALWAYS_TRAP_PF
    3446                 if (pVM->hwaccm.s.fNestedPaging)
     3446                if (pVM->hm.s.fNestedPaging)
    34473447                {
    34483448                    /*
     
    34543454                    Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx));
    34553455
    3456                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     3456                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    34573457
    34583458                    /* Now we must update CR2. */
     
    34623462                    AssertRC(rc2);
    34633463
    3464                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3464                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    34653465                    goto ResumeExecution;
    34663466                }
    34673467#else
    3468                 Assert(!pVM->hwaccm.s.fNestedPaging);
    3469 #endif
    3470 
    3471 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
     3468                Assert(!pVM->hm.s.fNestedPaging);
     3469#endif
     3470
     3471#ifdef VBOX_HM_WITH_GUEST_PATCHING
    34723472                /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
    3473                 if (    pVM->hwaccm.s.fTRPPatchingAllowed
    3474                     &&  pVM->hwaccm.s.pGuestPatchMem
     3473                if (    pVM->hm.s.fTRPPatchingAllowed
     3474                    &&  pVM->hm.s.pGuestPatchMem
    34753475                    &&  (exitQualification & 0xfff) == 0x080
    34763476                    &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    34773477                    &&  CPUMGetGuestCPL(pVCpu) == 0
    34783478                    &&  !CPUMIsGuestInLongModeEx(pCtx)
    3479                     &&  pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
     3479                    &&  pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
    34803480                {
    34813481                    RTGCPHYS GCPhysApicBase, GCPhys;
     
    34883488                    {
    34893489                        /* Only attempt to patch the instruction once. */
    3490                         PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     3490                        PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    34913491                        if (!pPatch)
    34923492                        {
    3493                             rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
     3493                            rc = VINF_EM_HM_PATCH_TPR_INSTR;
    34943494                            break;
    34953495                        }
     
    35083508                    &&  !(errCode & X86_TRAP_PF_P)  /* not present */
    35093509                    &&  fSetupTPRCaching
    3510                     &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     3510                    &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    35113511                {
    35123512                    RTGCPHYS GCPhysApicBase, GCPhys;
     
    35193519                    {
    35203520                        Log(("Enable VT-x virtual APIC access filtering\n"));
    3521                         rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     3521                        rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    35223522                        AssertRC(rc2);
    35233523                    }
     
    35313531                {   /* We've successfully synced our shadow pages, so let's just continue execution. */
    35323532                    Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode));
    3533                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     3533                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
    35343534
    35353535                    TRPMResetTrap(pVCpu);
    3536                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3536                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35373537                    goto ResumeExecution;
    35383538                }
     
    35443544                    Log2(("Forward page fault to the guest\n"));
    35453545
    3546                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
     3546                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
    35473547                    /* The error code might have been changed. */
    35483548                    errCode = TRPMGetErrorCode(pVCpu);
     
    35563556                    AssertRC(rc2);
    35573557
    3558                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3558                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35593559                    goto ResumeExecution;
    35603560                }
     
    35643564#endif
    35653565                /* Need to go back to the recompiler to emulate the instruction. */
    3566                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPFEM);
     3566                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
    35673567                TRPMResetTrap(pVCpu);
    35683568                break;
     
    35713571            case X86_XCPT_MF: /* Floating point exception. */
    35723572            {
    3573                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestMF);
     3573                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
    35743574                if (!(pCtx->cr0 & X86_CR0_NE))
    35753575                {
     
    35843584                AssertRC(rc2);
    35853585
    3586                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3586                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    35873587                goto ResumeExecution;
    35883588            }
     
    36023602                 * 63:15    Reserved (0)
    36033603                 */
    3604                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDB);
     3604                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
    36053605
    36063606                /* Note that we don't support guest and host-initiated debugging at the same time. */
     
    36363636                    AssertRC(rc2);
    36373637
    3638                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3638                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36393639                    goto ResumeExecution;
    36403640                }
     
    36463646            case X86_XCPT_BP:   /* Breakpoint. */
    36473647            {
    3648                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestBP);
     3648                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
    36493649                rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    36503650                if (rc == VINF_EM_RAW_GUEST_TRAP)
     
    36543654                                             cbInstr, errCode);
    36553655                    AssertRC(rc2);
    3656                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3656                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36573657                    goto ResumeExecution;
    36583658                }
    36593659                if (rc == VINF_SUCCESS)
    36603660                {
    3661                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3661                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36623662                    goto ResumeExecution;
    36633663                }
     
    36693669            {
    36703670                uint32_t     cbOp;
    3671                 PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
    3672 
    3673                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
     3671                PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
     3672
     3673                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
    36743674#ifdef VBOX_STRICT
    36753675                if (    !CPUMIsGuestInRealModeEx(pCtx)
    3676                     ||  !pVM->hwaccm.s.vmx.pRealModeTSS)
     3676                    ||  !pVM->hm.s.vmx.pRealModeTSS)
    36773677                {
    36783678                    Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, errCode));
     
    36803680                                             cbInstr, errCode);
    36813681                    AssertRC(rc2);
    3682                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3682                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    36833683                    goto ResumeExecution;
    36843684                }
     
    36993699                        case OP_CLI:
    37003700                            pCtx->eflags.Bits.u1IF = 0;
    3701                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCli);
     3701                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
    37023702                            break;
    37033703
     
    37093709                                               VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
    37103710                            AssertRC(rc2);
    3711                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitSti);
     3711                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
    37123712                            break;
    37133713
     
    37163716                            rc = VINF_EM_HALT;
    37173717                            pCtx->rip += pDis->cbInstr;
    3718                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     3718                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    37193719                            break;
    37203720
     
    37583758                            pCtx->esp &= uMask;
    37593759
    3760                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPopf);
     3760                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
    37613761                            break;
    37623762                        }
     
    38013801                            pCtx->esp -= cbParm;
    38023802                            pCtx->esp &= uMask;
    3803                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitPushf);
     3803                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
    38043804                            break;
    38053805                        }
     
    38393839                            LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip));
    38403840                            fUpdateRIP = false;
    3841                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIret);
     3841                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
    38423842                            break;
    38433843                        }
     
    38553855                            AssertRC(VBOXSTRICTRC_VAL(rc));
    38563856                            fUpdateRIP = false;
    3857                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3857                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38583858                            break;
    38593859                        }
     
    38733873                                AssertRC(VBOXSTRICTRC_VAL(rc));
    38743874                                fUpdateRIP = false;
    3875                                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3875                                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38763876                            }
    38773877                            break;
     
    38903890                            AssertRC(VBOXSTRICTRC_VAL(rc));
    38913891                            fUpdateRIP = false;
    3892                             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInt);
     3892                            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
    38933893                            break;
    38943894                        }
     
    39093909                         * whole context to be done with it.
    39103910                         */
    3911                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     3911                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    39123912
    39133913                        /* Only resume if successful. */
    3914                         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3914                        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39153915                        goto ResumeExecution;
    39163916                    }
     
    39333933                switch (vector)
    39343934                {
    3935                     case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE); break;
    3936                     case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD); break;
    3937                     case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS); break;
    3938                     case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP); break;
    3939                     case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXF); break;
     3935                    case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
     3936                    case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
     3937                    case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
     3938                    case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
     3939                    case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
    39403940                }
    39413941
     
    39453945                AssertRC(rc2);
    39463946
    3947                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3947                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39483948                goto ResumeExecution;
    39493949            }
    39503950#endif
    39513951            default:
    3952                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestXcpUnk);
     3952                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
    39533953                if (    CPUMIsGuestInRealModeEx(pCtx)
    3954                     &&  pVM->hwaccm.s.vmx.pRealModeTSS)
     3954                    &&  pVM->hm.s.vmx.pRealModeTSS)
    39553955                {
    39563956                    Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs.Sel, pCtx->eip, errCode));
     
    39663966                    }
    39673967
    3968                     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3968                    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39693969                    goto ResumeExecution;
    39703970                }
     
    39823982        }
    39833983
    3984         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub3, y3);
     3984        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
    39853985        break;
    39863986    }
     
    39943994        RTGCPHYS GCPhys;
    39953995
    3996         Assert(pVM->hwaccm.s.fNestedPaging);
     3996        Assert(pVM->hm.s.fNestedPaging);
    39973997
    39983998        rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys);
     
    40174017                &&  GCPhys > 0x1000000          /* to skip VGA frame buffer accesses */
    40184018                &&  fSetupTPRCaching
    4019                 &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     4019                &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    40204020            {
    40214021                RTGCPHYS GCPhysApicBase;
     
    40254025                {
    40264026                    Log(("Enable VT-x virtual APIC access filtering\n"));
    4027                     rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     4027                    rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    40284028                    AssertRC(rc2);
    40294029                }
     
    40494049            /* We've successfully synced our shadow pages, so let's just continue execution. */
    40504050            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode));
    4051             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitReasonNPF);
     4051            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF);
    40524052
    40534053            TRPMResetTrap(pVCpu);
     
    40684068        RTGCPHYS GCPhys;
    40694069
    4070         Assert(pVM->hwaccm.s.fNestedPaging);
     4070        Assert(pVM->hm.s.fNestedPaging);
    40714071
    40724072        rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys);
     
    40784078            &&  GCPhys > 0x1000000              /* to skip VGA frame buffer accesses */
    40794079            &&  fSetupTPRCaching
    4080             &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     4080            &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    40814081        {
    40824082            RTGCPHYS GCPhysApicBase;
     
    40864086            {
    40874087                Log(("Enable VT-x virtual APIC access filtering\n"));
    4088                 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hwaccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
     4088                rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);
    40894089                AssertRC(rc2);
    40904090            }
     
    41164116        LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip,
    41174117                 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
    4118         pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
    4119         rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4118        pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
     4119        rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    41204120        AssertRC(rc2);
    4121         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIrqWindow);
     4121        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIrqWindow);
    41224122        goto ResumeExecution;   /* we check for pending guest interrupts there */
    41234123
    41244124    case VMX_EXIT_WBINVD:               /* 54 Guest software attempted to execute WBINVD. (conditional) */
    41254125    case VMX_EXIT_INVD:                 /* 13 Guest software attempted to execute INVD. (unconditional) */
    4126         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvd);
     4126        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    41274127        /* Skip instruction and continue directly. */
    41284128        pCtx->rip += cbInstr;
     
    41334133    {
    41344134        Log2(("VMX: Cpuid %x\n", pCtx->eax));
    4135         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid);
     4135        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
    41364136        rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41374137        if (rc == VINF_SUCCESS)
     
    41504150    {
    41514151        Log2(("VMX: Rdpmc %x\n", pCtx->ecx));
    4152         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc);
     4152        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
    41534153        rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41544154        if (rc == VINF_SUCCESS)
     
    41664166    {
    41674167        Log2(("VMX: Rdtsc\n"));
    4168         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc);
     4168        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
    41694169        rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    41704170        if (rc == VINF_SUCCESS)
     
    41824182    {
    41834183        Log2(("VMX: Rdtscp\n"));
    4184         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtscp);
     4184        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
    41854185        rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
    41864186        if (rc == VINF_SUCCESS)
     
    41984198    {
    41994199        Log2(("VMX: invlpg\n"));
    4200         Assert(!pVM->hwaccm.s.fNestedPaging);
    4201 
    4202         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvlpg);
     4200        Assert(!pVM->hm.s.fNestedPaging);
     4201
     4202        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
    42034203        rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification);
    42044204        if (rc == VINF_SUCCESS)
     
    42164216        Log2(("VMX: monitor\n"));
    42174217
    4218         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMonitor);
     4218        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
    42194219        rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    42204220        if (rc == VINF_SUCCESS)
     
    42304230    case VMX_EXIT_WRMSR:                /* 32 WRMSR. Guest software attempted to execute WRMSR. */
    42314231        /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
    4232         if (    pVM->hwaccm.s.fTPRPatchingActive
     4232        if (    pVM->hm.s.fTPRPatchingActive
    42334233            &&  pCtx->ecx == MSR_K8_LSTAR)
    42344234        {
     
    42494249            goto ResumeExecution;
    42504250        }
    4251         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_MSR;
     4251        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_MSR;
    42524252        /* no break */
    42534253    case VMX_EXIT_RDMSR:                /* 31 RDMSR. Guest software attempted to execute RDMSR. */
    42544254    {
    4255         STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
     4255        STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
    42564256
    42574257        /*
     
    42744274    case VMX_EXIT_CRX_MOVE:             /* 28 Control-register accesses. */
    42754275    {
    4276         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4276        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub2, y2);
    42774277
    42784278        switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
     
    42814281            {
    42824282                Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
    4283                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4283                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    42844284                rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    42854285                                         VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
     
    42884288                {
    42894289                    case 0:
    4290                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;
     4290                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3;
    42914291                        break;
    42924292                    case 2:
    42934293                        break;
    42944294                    case 3:
    4295                         Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
    4296                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     4295                        Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
     4296                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    42974297                        break;
    42984298                    case 4:
    4299                         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     4299                        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    43004300                        break;
    43014301                    case 8:
    43024302                        /* CR8 contains the APIC TPR */
    4303                         Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1
     4303                        Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1
    43044304                                 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    43054305                        break;
     
    43154315            {
    43164316                Log2(("VMX: mov x, crx\n"));
    4317                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
    4318 
    4319                 Assert(   !pVM->hwaccm.s.fNestedPaging
     4317                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
     4318
     4319                Assert(   !pVM->hm.s.fNestedPaging
    43204320                       || !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    43214321                       || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3);
     
    43234323                /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
    43244324                Assert(   VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
    4325                        || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
     4325                       || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
    43264326
    43274327                rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
     
    43344334            {
    43354335                Log2(("VMX: clts\n"));
    4336                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS);
     4336                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCLTS);
    43374337                rc = EMInterpretCLTS(pVM, pVCpu);
    4338                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4338                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    43394339                break;
    43404340            }
     
    43434343            {
    43444344                Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
    4345                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW);
     4345                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLMSW);
    43464346                rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
    4347                 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     4347                pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    43484348                break;
    43494349            }
     
    43574357        {
    43584358            /* Only resume if successful. */
    4359             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4359            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
    43604360            goto ResumeExecution;
    43614361        }
    43624362        Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
    4363         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub2, y2);
     4363        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
    43644364        break;
    43654365    }
     
    43714371        {
    43724372            /* Disable DRx move intercepts. */
    4373             pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    4374             rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4373            pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     4374            rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    43754375            AssertRC(rc2);
    43764376
     
    43904390
    43914391#ifdef VBOX_WITH_STATISTICS
    4392             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     4392            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
    43934393            if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
    4394                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     4394                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    43954395            else
    4396                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     4396                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    43974397#endif
    43984398
     
    44064406            Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
    44074407                  VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
    4408             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
     4408            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
    44094409            rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    44104410                                     VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
    44114411                                     VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
    4412             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     4412            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    44134413            Log2(("DR7=%08x\n", pCtx->dr[7]));
    44144414        }
     
    44164416        {
    44174417            Log2(("VMX: mov x, DRx\n"));
    4418             STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead);
     4418            STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
    44194419            rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
    44204420                                    VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
     
    44374437    case VMX_EXIT_PORT_IO:              /* 30 I/O instruction. */
    44384438    {
    4439         STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4439        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub1, y1);
    44404440        uint32_t uPort;
    44414441        uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
     
    44514451        {
    44524452            rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ;
    4453             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4453            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    44544454            break;
    44554455        }
     
    44594459        {
    44604460            /* ins/outs */
    4461             PDISCPUSTATE pDis = &pVCpu->hwaccm.s.DisState;
     4461            PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    44624462
    44634463            /* Disassemble manually to deal with segment prefixes. */
     
    44704470                {
    44714471                    Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
    4472                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringWrite);
     4472                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
    44734473                    rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
    44744474                }
     
    44764476                {
    44774477                    Log2(("IOMInterpretINSEx  %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
    4478                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOStringRead);
     4478                    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
    44794479                    rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
    44804480                }
     
    44924492            if (fIOWrite)
    44934493            {
    4494                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
     4494                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
    44954495                rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
    44964496                if (rc == VINF_IOM_R3_IOPORT_WRITE)
    4497                     HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
     4497                    HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
    44984498            }
    44994499            else
     
    45014501                uint32_t u32Val = 0;
    45024502
    4503                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIORead);
     4503                STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
    45044504                rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
    45054505                if (IOM_SUCCESS(rc))
     
    45104510                else
    45114511                if (rc == VINF_IOM_R3_IOPORT_READ)
    4512                     HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
     4512                    HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
    45134513            }
    45144514        }
     
    45274527                if (pCtx->dr[7] & X86_DR7_ENABLED_MASK)
    45284528                {
    4529                     STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
     4529                    STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck);
    45304530                    for (unsigned i = 0; i < 4; i++)
    45314531                    {
     
    45754575                            AssertRC(rc2);
    45764576
    4577                             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4577                            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45784578                            goto ResumeExecution;
    45794579                        }
    45804580                    }
    45814581                }
    4582                 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4582                STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45834583                goto ResumeExecution;
    45844584            }
    4585             STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4585            STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    45864586            break;
    45874587        }
     
    46004600        }
    46014601#endif
    4602         STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2Sub1, y1);
     4602        STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
    46034603        break;
    46044604    }
     
    46864686        Log(("VMX_EXIT_TASK_SWITCH: exit=%RX64\n", exitQualification));
    46874687        if (    (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(exitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
    4688             &&  pVCpu->hwaccm.s.Event.fPending)
     4688            &&  pVCpu->hm.s.Event.fPending)
    46894689        {
    46904690            /* Caused by an injected interrupt. */
    4691             pVCpu->hwaccm.s.Event.fPending = false;
    4692 
    4693             Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hwaccm.s.Event.intInfo)));
    4694             Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo));
    4695             rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hwaccm.s.Event.intInfo), TRPM_HARDWARE_INT);
     4691            pVCpu->hm.s.Event.fPending = false;
     4692
     4693            Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo)));
     4694            Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo));
     4695            rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo), TRPM_HARDWARE_INT);
    46964696            AssertRC(rc2);
    46974697        }
     
    47024702    case VMX_EXIT_HLT:                  /* 12 Guest software attempted to execute HLT. */
    47034703        /* Check if external interrupts are pending; if so, don't switch back. */
    4704         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitHlt);
     4704        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
    47054705        pCtx->rip++;    /* skip hlt */
    47064706        if (EMShouldContinueAfterHalt(pVCpu, pCtx))
     
    47124712    case VMX_EXIT_MWAIT:                /* 36 Guest software executed MWAIT. */
    47134713        Log2(("VMX: mwait\n"));
    4714         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMwait);
     4714        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
    47154715        rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
    47164716        if (    rc == VINF_EM_HALT
     
    47374737    case VMX_EXIT_MTF:                  /* 37 Exit due to Monitor Trap Flag. */
    47384738        LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip));
    4739         pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
    4740         rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4739        pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
     4740        rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    47414741        AssertRC(rc2);
    4742         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitMTF);
     4742        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMTF);
    47434743#if 0
    47444744        DBGFDoneStepping(pVCpu);
     
    48724872        &&  !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
    48734873    {
    4874         STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatPendingHostIrq);
     4874        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
    48754875        /* On the next entry we'll only sync the host context. */
    4876         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
     4876        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
    48774877    }
    48784878    else
     
    48814881        /** @todo we can do better than this */
    48824882        /* Not in the VINF_PGM_CHANGE_MODE though! */
    4883         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
     4883        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
    48844884    }
    48854885
     
    48904890    {
    48914891        /* Try to extract more information about what might have gone wrong here. */
    4892         VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys);
    4893         pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS;
    4894         pVCpu->hwaccm.s.vmx.lasterror.idEnteredCpu   = pVCpu->hwaccm.s.idEnteredCpu;
    4895         pVCpu->hwaccm.s.vmx.lasterror.idCurrentCpu   = RTMpCpuId();
     4892        VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
     4893        pVCpu->hm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS;
     4894        pVCpu->hm.s.vmx.lasterror.idEnteredCpu   = pVCpu->hm.s.idEnteredCpu;
     4895        pVCpu->hm.s.vmx.lasterror.idCurrentCpu   = RTMpCpuId();
    48964896    }
    48974897
     
    49054905#endif
    49064906
    4907     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit2, x);
    4908     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
    4909     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatEntry, x);
     4907    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
     4908    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
     4909    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
    49104910    Log2(("X"));
    49114911    return VBOXSTRICTRC_TODO(rc);
     
    49234923VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
    49244924{
    4925     Assert(pVM->hwaccm.s.vmx.fSupported);
     4925    Assert(pVM->hm.s.vmx.fSupported);
    49264926    NOREF(pCpu);
    49274927
     
    49344934
    49354935    /* Activate the VMCS. */
    4936     int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4936    int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    49374937    if (RT_FAILURE(rc))
    49384938        return rc;
    49394939
    4940     pVCpu->hwaccm.s.fResumeVM = false;
     4940    pVCpu->hm.s.fResumeVM = false;
    49414941    return VINF_SUCCESS;
    49424942}
     
    49534953VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    49544954{
    4955     Assert(pVM->hwaccm.s.vmx.fSupported);
     4955    Assert(pVM->hm.s.vmx.fSupported);
    49564956
    49574957#ifdef DEBUG
     
    49594959    {
    49604960        CPUMR0LoadHostDebugState(pVM, pVCpu);
    4961         Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     4961        Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    49624962    }
    49634963    else
     
    49724972
    49734973        /* Enable DRx move intercepts again. */
    4974         pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
    4975         int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
     4974        pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
     4975        int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls);
    49764976        AssertRC(rc);
    49774977
    49784978        /* Resync the debug registers the next time. */
    4979         pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;
     4979        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
    49804980    }
    49814981    else
    4982         Assert(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
     4982        Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
    49834983
    49844984    /*
     
    49864986     * VMCS data back to memory.
    49874987     */
    4988     int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     4988    int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    49894989    AssertRC(rc);
    49904990
     
    50065006
    50075007    LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush));
    5008     Assert(pVM->hwaccm.s.fNestedPaging);
    5009     descriptor[0] = pVCpu->hwaccm.s.vmx.GCPhysEPTP;
     5008    Assert(pVM->hm.s.fNestedPaging);
     5009    descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP;
    50105010    descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */
    50115011    int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
    5012     AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hwaccm.s.vmx.GCPhysEPTP, rc));
     5012    AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc));
    50135013}
    50145014
     
    50295029    uint64_t descriptor[2];
    50305030
    5031     Assert(pVM->hwaccm.s.vmx.fVPID);
     5031    Assert(pVM->hm.s.vmx.fVPID);
    50325032    if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
    50335033    {
     
    50385038    {
    50395039        AssertPtr(pVCpu);
    5040         AssertMsg(pVCpu->hwaccm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
    5041         AssertMsg(pVCpu->hwaccm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));
    5042         descriptor[0] = pVCpu->hwaccm.s.uCurrentASID;
     5040        AssertMsg(pVCpu->hm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
     5041        AssertMsg(pVCpu->hm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));
     5042        descriptor[0] = pVCpu->hm.s.uCurrentASID;
    50435043        descriptor[1] = GCPtr;
    50445044    }
    50455045    int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
    50465046    AssertMsg(rc == VINF_SUCCESS,
    5047               ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hwaccm.s.uCurrentASID : 0, GCPtr, rc));
     5047              ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentASID : 0, GCPtr, rc));
    50485048}
    50495049
     
    50735073         * function maybe called in a loop with individual addresses.
    50745074         */
    5075         if (pVM->hwaccm.s.vmx.fVPID)
     5075        if (pVM->hm.s.vmx.fVPID)
    50765076        {
    50775077            /* If we can flush just this page do it, otherwise flush as little as possible. */
    5078             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     5078            if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    50795079                hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
    50805080            else
    50815081                VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    50825082        }
    5083         else if (pVM->hwaccm.s.fNestedPaging)
     5083        else if (pVM->hm.s.fNestedPaging)
    50845084            VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    50855085    }
     
    51475147                Log(("Current stack %08x\n", &rc2));
    51485148
    5149                 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError;
    5150                 pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason;
     5149                pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError;
     5150                pVCpu->hm.s.vmx.lasterror.ulExitReason = exitReason;
    51515151
    51525152#ifdef VBOX_STRICT
     
    51835183                {
    51845184                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5185                     HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
     5185                    HMR0DumpDescriptor(pDesc, val, "CS: ");
    51865186                }
    51875187
     
    51915191                {
    51925192                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5193                     HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
     5193                    HMR0DumpDescriptor(pDesc, val, "DS: ");
    51945194                }
    51955195
     
    51995199                {
    52005200                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5201                     HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
     5201                    HMR0DumpDescriptor(pDesc, val, "ES: ");
    52025202                }
    52035203
     
    52075207                {
    52085208                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5209                     HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
     5209                    HMR0DumpDescriptor(pDesc, val, "FS: ");
    52105210                }
    52115211
     
    52155215                {
    52165216                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5217                     HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
     5217                    HMR0DumpDescriptor(pDesc, val, "GS: ");
    52185218                }
    52195219
     
    52235223                {
    52245224                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5225                     HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
     5225                    HMR0DumpDescriptor(pDesc, val, "SS: ");
    52265226                }
    52275227
     
    52315231                {
    52325232                    pDesc  = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
    5233                     HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
     5233                    HMR0DumpDescriptor(pDesc, val, "TR: ");
    52345234                }
    52355235
     
    52925292    int             rc;
    52935293
    5294     pCpu = HWACCMR0GetCurrentCpu();
     5294    pCpu = HMR0GetCurrentCpu();
    52955295    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    52965296
     
    52985298    pCache->uPos = 1;
    52995299    pCache->interPD = PGMGetInterPaeCR3(pVM);
    5300     pCache->pSwitcher = (uint64_t)pVM->hwaccm.s.pfnHost32ToGuest64R0;
     5300    pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
    53015301#endif
    53025302
     
    53135313    aParam[0] = (uint32_t)(HCPhysCpuPage);                                  /* Param 1: VMXON physical address - Lo. */
    53145314    aParam[1] = (uint32_t)(HCPhysCpuPage >> 32);                            /* Param 1: VMXON physical address - Hi. */
    5315     aParam[2] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS);                 /* Param 2: VMCS physical address - Lo. */
    5316     aParam[3] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS >> 32);           /* Param 2: VMCS physical address - Hi. */
    5317     aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache);
     5315    aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS);                 /* Param 2: VMCS physical address - Lo. */
     5316    aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS >> 32);           /* Param 2: VMCS physical address - Hi. */
     5317    aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
    53185318    aParam[5] = 0;
    53195319
    53205320#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5321     pCtx->dr[4] = pVM->hwaccm.s.vmx.pScratchPhys + 16 + 8;
    5322     *(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) = 1;
    5323 #endif
    5324     rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hwaccm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
     5321    pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
     5322    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
     5323#endif
     5324    rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
    53255325
    53265326#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    5327     Assert(*(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) == 5);
     5327    Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
    53285328    Assert(pCtx->dr[4] == 10);
    5329     *(uint32_t *)(pVM->hwaccm.s.vmx.pScratch + 16 + 8) = 0xff;
     5329    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
    53305330#endif
    53315331
    53325332#ifdef DEBUG
    53335333    AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
    5334     AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
    5335                                                                               pVCpu->hwaccm.s.vmx.HCPhysVMCS));
     5334    AssertMsg(pCache->TestIn.HCPhysVMCS   == pVCpu->hm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
     5335                                                                              pVCpu->hm.s.vmx.HCPhysVMCS));
    53365336    AssertMsg(pCache->TestIn.HCPhysVMCS   == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
    53375337                                                                          pCache->TestOut.HCPhysVMCS));
    53385338    AssertMsg(pCache->TestIn.pCache       == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
    53395339                                                                      pCache->TestOut.pCache));
    5340     AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache),
    5341               ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache)));
     5340    AssertMsg(pCache->TestIn.pCache       == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
     5341              ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
    53425342    AssertMsg(pCache->TestIn.pCtx         == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
    53435343                                                                    pCache->TestOut.pCtx));
     
    54665466    RTHCUINTREG     uOldEFlags;
    54675467
    5468     AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     5468    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    54695469    Assert(pfnHandler);
    5470     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));
    5471     Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));
     5470    Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
     5471    Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
    54725472
    54735473#ifdef VBOX_STRICT
    5474     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++)
    5475         Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i]));
    5476 
    5477     for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++)
    5478         Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i]));
     5474    for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries;i++)
     5475        Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
     5476
     5477    for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries;i++)
     5478        Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
    54795479#endif
    54805480
     
    54875487#endif
    54885488
    5489     pCpu = HWACCMR0GetCurrentCpu();
     5489    pCpu = HMR0GetCurrentCpu();
    54905490    HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
    54915491
    54925492    /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
    5493     VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     5493    VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    54945494
    54955495    /* Leave VMX Root Mode. */
     
    55035503        CPUMPushHyper(pVCpu, paParam[i]);
    55045504
    5505     STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     5505    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    55065506
    55075507    /* Call switcher. */
    5508     rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
    5509     STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z);
     5508    rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
     5509    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    55105510
    55115511    /* Make sure the VMX instructions don't cause #UD faults. */
     
    55215521    }
    55225522
    5523     rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS);
     5523    rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
    55245524    AssertRC(rc2);
    55255525    Assert(!(ASMGetFlags() & X86_EFL_IF));
     
    56095609VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    56105610{
    5611     PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     5611    PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    56125612
    56135613    AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r43307 r43387  
    2424#include <VBox/vmm/stam.h>
    2525#include <VBox/dis.h>
    26 #include <VBox/vmm/hwaccm.h>
     26#include <VBox/vmm/hm.h>
    2727#include <VBox/vmm/pgm.h>
    28 #include <VBox/vmm/hwacc_vmx.h>
     28#include <VBox/vmm/hm_vmx.h>
    2929
    3030RT_C_DECLS_BEGIN
     
    220220        else                                                                                    \
    221221        if (    CPUMIsGuestInRealModeEx(pCtx)                                                   \
    222             &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)                                          \
     222            &&  !pVM->hm.s.vmx.fUnrestrictedGuest)                                          \
    223223        {                                                                                       \
    224224            /* Must override this or else VT-x will fail with invalid guest state errors. */    \
     
    291291{
    292292    Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX);
    293     *pVal = pVCpu->hwaccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
     293    *pVal = pVCpu->hm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];
    294294    return VINF_SUCCESS;
    295295}
  • trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp

    r42222 r43387  
    2828#include <VBox/vmm/vmm.h>
    2929#include <VBox/vmm/patm.h>
    30 #include <VBox/vmm/hwaccm.h>
     30#include <VBox/vmm/hm.h>
    3131
    3232#include <VBox/log.h>
     
    374374    PDMDEV_ASSERT_DEVINS(pDevIns);
    375375    LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance));
    376     return HWACCMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));
     376    return HMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));
    377377}
    378378
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r43379 r43387  
    3737#include <VBox/vmm/gmm.h>
    3838#include <VBox/intnet.h>
    39 #include <VBox/vmm/hwaccm.h>
     39#include <VBox/vmm/hm.h>
    4040#include <VBox/param.h>
    4141#include <VBox/err.h>
     
    117117
    118118    /*
    119      * Initialize the VMM, GVMM, GMM, HWACCM, PGM (Darwin) and INTNET.
     119     * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
    120120     */
    121121    int rc = vmmInitFormatTypes();
     
    128128            if (RT_SUCCESS(rc))
    129129            {
    130                 rc = HWACCMR0Init();
     130                rc = HMR0Init();
    131131                if (RT_SUCCESS(rc))
    132132                {
     
    188188                    else
    189189                        LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
    190                     HWACCMR0Term();
     190                    HMR0Term();
    191191                }
    192192                else
    193                     LogRel(("ModuleInit: HWACCMR0Init -> %Rrc\n", rc));
     193                    LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
    194194                GMMR0Term();
    195195            }
     
    231231
    232232    /*
    233      * PGM (Darwin), HWACCM and PciRaw global cleanup.
     233     * PGM (Darwin), HM and PciRaw global cleanup.
    234234     */
    235235#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    240240#endif
    241241    PGMDeregisterStringFormatTypes();
    242     HWACCMR0Term();
     242    HMR0Term();
    243243#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
    244244    vmmR0TripleFaultHackTerm();
     
    342342    {
    343343        /*
    344          * Init HWACCM, CPUM and PGM (Darwin only).
    345          */
    346         rc = HWACCMR0InitVM(pVM);
     344         * Init HM, CPUM and PGM (Darwin only).
     345         */
     346        rc = HMR0InitVM(pVM);
    347347        if (RT_SUCCESS(rc))
    348348        {
     
    370370            PciRawR0TermVM(pVM);
    371371#endif
    372             HWACCMR0TermVM(pVM);
     372            HMR0TermVM(pVM);
    373373        }
    374374    }
     
    410410        PGMR0DynMapTermVM(pVM);
    411411#endif
    412         HWACCMR0TermVM(pVM);
     412        HMR0TermVM(pVM);
    413413    }
    414414
     
    603603            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
    604604            break;
    605         case VINF_EM_HWACCM_PATCH_TPR_INSTR:
     605        case VINF_EM_HM_PATCH_TPR_INSTR:
    606606            STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
    607607            break;
     
    662662            /* Some safety precautions first. */
    663663#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    664             if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled /* hwaccm */
     664            if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled /* hm */
    665665                          && pVM->cCpus == 1               /* !smp */
    666666                          && PGMGetHyperCR3(pVCpu)))
     
    683683                /* We might need to disable VT-x if the active switcher turns off paging. */
    684684                bool fVTxDisabled;
    685                 int rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
     685                int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
    686686                if (RT_SUCCESS(rc))
    687687                {
     
    705705
    706706                    /* Re-enable VT-x if previously turned off. */
    707                     HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
     707                    HMR0LeaveSwitcher(pVM, fVTxDisabled);
    708708
    709709                    if (    rc == VINF_EM_RAW_INTERRUPT
     
    770770#endif
    771771            int rc;
    772             if (!HWACCMR0SuspendPending())
     772            if (!HMR0SuspendPending())
    773773            {
    774                 rc = HWACCMR0Enter(pVM, pVCpu);
     774                rc = HMR0Enter(pVM, pVCpu);
    775775                if (RT_SUCCESS(rc))
    776776                {
    777                     rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HWACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
    778                     int rc2 = HWACCMR0Leave(pVM, pVCpu);
     777                    rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */
     778                    int rc2 = HMR0Leave(pVM, pVCpu);
    779779                    AssertRC(rc2);
    780780                }
     
    962962
    963963        /*
    964          * Attempt to enable hwacc mode and check the current setting.
     964         * Attempt to enable hm mode and check the current setting.
    965965         */
    966966        case VMMR0_DO_HWACC_ENABLE:
    967             return HWACCMR0EnableAllCpus(pVM);
     967            return HMR0EnableAllCpus(pVM);
    968968
    969969        /*
     
    971971         */
    972972        case VMMR0_DO_HWACC_SETUP_VM:
    973             return HWACCMR0SetupVM(pVM);
     973            return HMR0SetupVM(pVM);
    974974
    975975        /*
     
    981981            bool fVTxDisabled;
    982982
    983             /* Safety precaution as HWACCM can disable the switcher. */
     983            /* Safety precaution as HM can disable the switcher. */
    984984            Assert(!pVM->vmm.s.fSwitcherDisabled);
    985985            if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
     
    999999
    10001000            /* We might need to disable VT-x if the active switcher turns off paging. */
    1001             rc = HWACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
     1001            rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
    10021002            if (RT_FAILURE(rc))
    10031003                return rc;
     
    10061006
    10071007            /* Re-enable VT-x if previously turned off. */
    1008             HWACCMR0LeaveSwitcher(pVM, fVTxDisabled);
     1008            HMR0LeaveSwitcher(pVM, fVTxDisabled);
    10091009
    10101010            /** @todo dispatch interrupts? */
     
    12841284            if (idCpu == NIL_VMCPUID)
    12851285                return VERR_INVALID_CPU_ID;
    1286             return HWACCMR0TestSwitcher3264(pVM);
     1286            return HMR0TestSwitcher3264(pVM);
    12871287#endif
    12881288        default:
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r42894 r43387  
    4444#include <VBox/vmm/dbgf.h>
    4545#include <VBox/vmm/patm.h>
    46 #include <VBox/vmm/hwaccm.h>
     46#include <VBox/vmm/hm.h>
    4747#include <VBox/vmm/ssm.h>
    4848#include "CPUMInternal.h"
     
    19431943     * intercept CPUID instructions for user mode applications.
    19441944     */
    1945     if (!HWACCMIsEnabled(pVM))
     1945    if (!HMIsEnabled(pVM))
    19461946    {
    19471947        /* CPUID(0) */
     
    25102510            {
    25112511                PVMCPU      pVCpu  = &pVM->aCpus[iCpu];
    2512                 bool const  fValid = HWACCMIsEnabled(pVM)
     2512                bool const  fValid = HMIsEnabled(pVM)
    25132513                                  || (   uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
    25142514                                      && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
     
    39433943            pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
    39443944            if (    MMHyperIsInsideArea(pState->pVM, pState->pvPageGC)
    3945                 &&  !HWACCMIsEnabled(pState->pVM))
     3945                &&  !HMIsEnabled(pState->pVM))
    39463946            {
    39473947                pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
  • trunk/src/VBox/VMM/VMMR3/DBGF.cpp

    r42833 r43387  
    7777#endif
    7878#include <VBox/vmm/em.h>
    79 #include <VBox/vmm/hwaccm.h>
     79#include <VBox/vmm/hm.h>
    8080#include "DBGFInternal.h"
    8181#include <VBox/vmm/vm.h>
     
    253253    int cWait = 10;
    254254# else
    255     int cWait = HWACCMIsEnabled(pVM)
     255    int cWait = HMIsEnabled(pVM)
    256256             && (   enmEvent == DBGFEVENT_ASSERTION_HYPER
    257257                 || enmEvent == DBGFEVENT_FATAL_ERROR)
  • trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp

    r42165 r43387  
    2424#include <VBox/vmm/pgm.h>
    2525#include <VBox/vmm/selm.h>
    26 #include <VBox/vmm/hwaccm.h>
     26#include <VBox/vmm/hm.h>
    2727#include "DBGFInternal.h"
    2828#include <VBox/vmm/vm.h>
     
    421421    else
    422422    {
    423         if (HWACCMIsEnabled(pVM))
     423        if (HMIsEnabled(pVM))
    424424            rc = VERR_INVALID_STATE;
    425425        else
  • trunk/src/VBox/VMM/VMMR3/EM.cpp

    r42698 r43387  
    2626 *
    2727 * The interpreted execution is only used to avoid switching between
    28  * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
     28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
    2929 * The interpretation is thus implemented as part of EM.
    3030 *
     
    5656#include <VBox/vmm/pdmcritsect.h>
    5757#include <VBox/vmm/pdmqueue.h>
    58 #include <VBox/vmm/hwaccm.h>
     58#include <VBox/vmm/hm.h>
    5959#include <VBox/vmm/patm.h>
    6060#ifdef IEM_VERIFICATION_MODE
     
    8080*******************************************************************************/
    8181#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
    82 #define EM_NOTIFY_HWACCM
     82#define EM_NOTIFY_HM
    8383#endif
    8484
     
    632632
    633633    /*
    634      * Force rescheduling if in RAW, HWACCM or REM.
     634     * Force rescheduling if in RAW, HM or REM.
    635635     */
    636636    return    pVCpu->em.s.enmState == EMSTATE_RAW
     
    11871187
    11881188    X86EFLAGS EFlags = pCtx->eflags;
    1189     if (HWACCMIsEnabled(pVM))
     1189    if (HMIsEnabled(pVM))
    11901190    {
    11911191        /*
     
    11961196         */
    11971197        if (   EMIsHwVirtExecutionEnabled(pVM)
    1198             && HWACCMR3CanExecuteGuest(pVM, pCtx))
     1198            && HMR3CanExecuteGuest(pVM, pCtx))
    11991199            return EMSTATE_HWACC;
    12001200
     
    16891689            &&  !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
    16901690            &&  PATMAreInterruptsEnabled(pVM)
    1691             &&  !HWACCMR3IsEventPending(pVCpu))
     1691            &&  !HMR3IsEventPending(pVCpu))
    16921692        {
    16931693            Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
  • trunk/src/VBox/VMM/VMMR3/EMHM.cpp

    r43373 r43387  
    3838#include <VBox/vmm/pdmcritsect.h>
    3939#include <VBox/vmm/pdmqueue.h>
    40 #include <VBox/vmm/hwaccm.h>
     40#include <VBox/vmm/hm.h>
    4141#include "EMInternal.h"
    4242#include "internal/em.h"
     
    5555*******************************************************************************/
    5656#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
    57 #define EM_NOTIFY_HWACCM
     57#define EM_NOTIFY_HM
    5858#endif
    5959
     
    6666static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    6767
    68 #define EMHANDLERC_WITH_HWACCM
     68#define EMHANDLERC_WITH_HM
    6969#include "EMHandleRCTmpl.h"
    7070
     
    141141        rc = emR3HwAccStep(pVM, pVCpu);
    142142        if (    rc != VINF_SUCCESS
    143             ||  !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
     143            ||  !HMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))
    144144            break;
    145145    }
     
    216216            if (RT_SUCCESS(rc))
    217217            {
    218 #ifdef EM_NOTIFY_HWACCM
     218#ifdef EM_NOTIFY_HM
    219219                if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
    220                     HWACCMR3NotifyEmulated(pVCpu);
     220                    HMR3NotifyEmulated(pVCpu);
    221221#endif
    222222                STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
     
    246246    STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
    247247
    248 #ifdef EM_NOTIFY_HWACCM
     248#ifdef EM_NOTIFY_HM
    249249    if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)
    250         HWACCMR3NotifyEmulated(pVCpu);
     250        HMR3NotifyEmulated(pVCpu);
    251251#endif
    252252    return rc;
     
    288288
    289289    /* Try to restart the io instruction that was refused in ring-0. */
    290     VBOXSTRICTRC rcStrict = HWACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
     290    VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
    291291    if (IOM_SUCCESS(rcStrict))
    292292    {
     
    474474    STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);
    475475
    476 #ifdef EM_NOTIFY_HWACCM
    477     HWACCMR3NotifyScheduled(pVCpu);
     476#ifdef EM_NOTIFY_HM
     477    HMR3NotifyScheduled(pVCpu);
    478478#endif
    479479
     
    486486
    487487        /* Check if a forced reschedule is pending. */
    488         if (HWACCMR3IsRescheduleRequired(pVM, pCtx))
     488        if (HMR3IsRescheduleRequired(pVM, pCtx))
    489489        {
    490490            rc = VINF_EM_RESCHEDULE;
     
    495495         * Process high priority pre-execution raw-mode FFs.
    496496         */
    497         VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */
     497        VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HM mode; shouldn't be set really. */
    498498        if (    VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
    499499            ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r43373 r43387  
    11/* $Id$ */
    22/** @file
    3  * HWACCM - Intel/AMD VM Hardware Support Manager.
     3 * HM - Intel/AMD VM Hardware Support Manager.
    44 */
    55
     
    1919*   Header Files                                                               *
    2020*******************************************************************************/
    21 #define LOG_GROUP LOG_GROUP_HWACCM
     21#define LOG_GROUP LOG_GROUP_HM
    2222#include <VBox/vmm/cpum.h>
    2323#include <VBox/vmm/stam.h>
     
    3535# include <VBox/vmm/rem.h>
    3636#endif
    37 #include <VBox/vmm/hwacc_vmx.h>
    38 #include <VBox/vmm/hwacc_svm.h>
    39 #include "HWACCMInternal.h"
     37#include <VBox/vmm/hm_vmx.h>
     38#include <VBox/vmm/hm_svm.h>
     39#include "HMInternal.h"
    4040#include <VBox/vmm/vm.h>
    4141#include <VBox/err.h>
     
    271271*   Internal Functions                                                         *
    272272*******************************************************************************/
    273 static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM);
    274 static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
    275 static int hwaccmR3InitCPU(PVM pVM);
    276 static int hwaccmR3InitFinalizeR0(PVM pVM);
    277 static int hwaccmR3TermCPU(PVM pVM);
    278 
    279 
    280 /**
    281  * Initializes the HWACCM.
     273static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM);
     274static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
     275static int hmR3InitCPU(PVM pVM);
     276static int hmR3InitFinalizeR0(PVM pVM);
     277static int hmR3TermCPU(PVM pVM);
     278
     279
     280/**
     281 * Initializes the HM.
    282282 *
    283283 * @returns VBox status code.
    284284 * @param   pVM         Pointer to the VM.
    285285 */
    286 VMMR3DECL(int) HWACCMR3Init(PVM pVM)
    287 {
    288     LogFlow(("HWACCMR3Init\n"));
     286VMMR3DECL(int) HMR3Init(PVM pVM)
     287{
     288    LogFlow(("HMR3Init\n"));
    289289
    290290    /*
    291291     * Assert alignment and sizes.
    292292     */
    293     AssertCompileMemberAlignment(VM, hwaccm.s, 32);
    294     AssertCompile(sizeof(pVM->hwaccm.s) <= sizeof(pVM->hwaccm.padding));
     293    AssertCompileMemberAlignment(VM, hm.s, 32);
     294    AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding));
    295295
    296296    /* Some structure checks. */
     
    315315     * Register the saved state data unit.
    316316     */
    317     int rc = SSMR3RegisterInternal(pVM, "HWACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),
     317    int rc = SSMR3RegisterInternal(pVM, "HM", 0, HM_SSM_VERSION, sizeof(HM),
    318318                                   NULL, NULL, NULL,
    319                                    NULL, hwaccmR3Save, NULL,
    320                                    NULL, hwaccmR3Load, NULL);
     319                                   NULL, hmR3Save, NULL,
     320                                   NULL, hmR3Load, NULL);
    321321    if (RT_FAILURE(rc))
    322322        return rc;
    323323
    324324    /* Misc initialisation. */
    325     pVM->hwaccm.s.vmx.fSupported = false;
    326     pVM->hwaccm.s.svm.fSupported = false;
    327     pVM->hwaccm.s.vmx.fEnabled   = false;
    328     pVM->hwaccm.s.svm.fEnabled   = false;
    329 
    330     pVM->hwaccm.s.fNestedPaging  = false;
    331     pVM->hwaccm.s.fLargePages    = false;
     325    pVM->hm.s.vmx.fSupported = false;
     326    pVM->hm.s.svm.fSupported = false;
     327    pVM->hm.s.vmx.fEnabled   = false;
     328    pVM->hm.s.svm.fEnabled   = false;
     329
     330    pVM->hm.s.fNestedPaging  = false;
     331    pVM->hm.s.fLargePages    = false;
    332332
    333333    /* Disabled by default. */
    334     pVM->fHWACCMEnabled = false;
     334    pVM->fHMEnabled = false;
    335335
    336336    /*
     
    340340    PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/");
    341341    /* Nested paging: disabled by default. */
    342     rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hwaccm.s.fAllowNestedPaging, false);
     342    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false);
    343343    AssertRC(rc);
    344344
    345345    /* Large pages: disabled by default. */
    346     rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hwaccm.s.fLargePages, false);
     346    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hm.s.fLargePages, false);
    347347    AssertRC(rc);
    348348
    349349    /* VT-x VPID: disabled by default. */
    350     rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hwaccm.s.vmx.fAllowVPID, false);
     350    rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hm.s.vmx.fAllowVPID, false);
    351351    AssertRC(rc);
    352352
    353     /* HWACCM support must be explicitely enabled in the configuration file. */
    354     rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hwaccm.s.fAllowed, false);
     353    /* HM support must be explicitely enabled in the configuration file. */
     354    rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hm.s.fAllowed, false);
    355355    AssertRC(rc);
    356356
    357357    /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */
    358     rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hwaccm.s.fTRPPatchingAllowed, false);
     358    rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hm.s.fTRPPatchingAllowed, false);
    359359    AssertRC(rc);
    360360
    361361#ifdef RT_OS_DARWIN
    362     if (VMMIsHwVirtExtForced(pVM) != pVM->hwaccm.s.fAllowed)
     362    if (VMMIsHwVirtExtForced(pVM) != pVM->hm.s.fAllowed)
    363363#else
    364     if (VMMIsHwVirtExtForced(pVM) && !pVM->hwaccm.s.fAllowed)
     364    if (VMMIsHwVirtExtForced(pVM) && !pVM->hm.s.fAllowed)
    365365#endif
    366366    {
    367367        AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n",
    368                                VMMIsHwVirtExtForced(pVM), pVM->hwaccm.s.fAllowed));
    369         return VERR_HWACCM_CONFIG_MISMATCH;
     368                               VMMIsHwVirtExtForced(pVM), pVM->hm.s.fAllowed));
     369        return VERR_HM_CONFIG_MISMATCH;
    370370    }
    371371
    372372    if (VMMIsHwVirtExtForced(pVM))
    373         pVM->fHWACCMEnabled = true;
     373        pVM->fHMEnabled = true;
    374374
    375375#if HC_ARCH_BITS == 32
     
    378378     * (To use the default, don't set 64bitEnabled in CFGM.)
    379379     */
    380     rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, false);
     380    rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, false);
    381381    AssertLogRelRCReturn(rc, rc);
    382     if (pVM->hwaccm.s.fAllow64BitGuests)
     382    if (pVM->hm.s.fAllow64BitGuests)
    383383    {
    384384# ifdef RT_OS_DARWIN
    385385        if (!VMMIsHwVirtExtForced(pVM))
    386386# else
    387         if (!pVM->hwaccm.s.fAllowed)
     387        if (!pVM->hm.s.fAllowed)
    388388# endif
    389389            return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V).");
     
    394394     * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.)*
    395395     */
    396     rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hwaccm.s.fAllow64BitGuests, true);
     396    rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, true);
    397397    AssertLogRelRCReturn(rc, rc);
    398398#endif
     
    405405     *  Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors.
    406406     */
    407     rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hwaccm.s.fGlobalInit,
     407    rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hm.s.fGlobalInit,
    408408#if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS)
    409409                            false
     
    414414
    415415    /* Max number of resume loops. */
    416     rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hwaccm.s.cMaxResumeLoops, 0 /* set by R0 later */);
     416    rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */);
    417417    AssertRC(rc);
    418418
     
    422422
    423423/**
    424  * Initializes the per-VCPU HWACCM.
     424 * Initializes the per-VCPU HM.
    425425 *
    426426 * @returns VBox status code.
    427427 * @param   pVM         Pointer to the VM.
    428428 */
    429 static int hwaccmR3InitCPU(PVM pVM)
    430 {
    431     LogFlow(("HWACCMR3InitCPU\n"));
     429static int hmR3InitCPU(PVM pVM)
     430{
     431    LogFlow(("HMR3InitCPU\n"));
    432432
    433433    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    435435        PVMCPU pVCpu = &pVM->aCpus[i];
    436436
    437         pVCpu->hwaccm.s.fActive = false;
     437        pVCpu->hm.s.fActive = false;
    438438    }
    439439
    440440#ifdef VBOX_WITH_STATISTICS
    441     STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchSuccess,   STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success",  STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
    442     STAM_REG(pVM, &pVM->hwaccm.s.StatTPRPatchFailure,   STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed",   STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
    443     STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
    444     STAM_REG(pVM, &pVM->hwaccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
     441    STAM_REG(pVM, &pVM->hm.s.StatTPRPatchSuccess,   STAMTYPE_COUNTER, "/HM/TPR/Patch/Success",  STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
     442    STAM_REG(pVM, &pVM->hm.s.StatTPRPatchFailure,   STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed",   STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
     443    STAM_REG(pVM, &pVM->hm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES,    "Number of times an instruction was successfully patched.");
     444    STAM_REG(pVM, &pVM->hm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES,    "Number of unsuccessful patch attempts.");
    445445
    446446    /*
     
    452452        int    rc;
    453453
    454         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
    455                              "/PROF/HWACCM/CPU%d/Poke", i);
     454        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",
     455                             "/PROF/HM/CPU%d/Poke", i);
    456456        AssertRC(rc);
    457         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
    458                              "/PROF/HWACCM/CPU%d/PokeWait", i);
     457        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",
     458                             "/PROF/HM/CPU%d/PokeWait", i);
    459459        AssertRC(rc);
    460         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
    461                              "/PROF/HWACCM/CPU%d/PokeWaitFailed", i);
     460        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",
     461                             "/PROF/HM/CPU%d/PokeWaitFailed", i);
    462462        AssertRC(rc);
    463         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
    464                              "/PROF/HWACCM/CPU%d/SwitchToGC", i);
     463        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",
     464                             "/PROF/HM/CPU%d/SwitchToGC", i);
    465465        AssertRC(rc);
    466         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
    467                              "/PROF/HWACCM/CPU%d/SwitchFromGC_1", i);
     466        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",
     467                             "/PROF/HM/CPU%d/SwitchFromGC_1", i);
    468468        AssertRC(rc);
    469         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
    470                              "/PROF/HWACCM/CPU%d/SwitchFromGC_2", i);
     469        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",
     470                             "/PROF/HM/CPU%d/SwitchFromGC_2", i);
    471471        AssertRC(rc);
    472472# if 1 /* temporary for tracking down darwin holdup. */
    473         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
    474                              "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub1", i);
     473        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",
     474                             "/PROF/HM/CPU%d/SwitchFromGC_2/Sub1", i);
    475475        AssertRC(rc);
    476         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
    477                              "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub2", i);
     476        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",
     477                             "/PROF/HM/CPU%d/SwitchFromGC_2/Sub2", i);
    478478        AssertRC(rc);
    479         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
    480                              "/PROF/HWACCM/CPU%d/SwitchFromGC_2/Sub3", i);
     479        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",
     480                             "/PROF/HM/CPU%d/SwitchFromGC_2/Sub3", i);
    481481        AssertRC(rc);
    482482# endif
    483         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
    484                              "/PROF/HWACCM/CPU%d/InGC", i);
     483        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",
     484                             "/PROF/HM/CPU%d/InGC", i);
    485485        AssertRC(rc);
    486486
    487487# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    488         rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
    489                              "/PROF/HWACCM/CPU%d/Switcher3264", i);
     488        rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",
     489                             "/PROF/HM/CPU%d/Switcher3264", i);
    490490        AssertRC(rc);
    491491# endif
    492492
    493 # define HWACCM_REG_COUNTER(a, b) \
     493# define HM_REG_COUNTER(a, b) \
    494494        rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \
    495495        AssertRC(rc);
    496496
    497         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM,           "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");
    498         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM,            "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");
    499         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF,           "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");
    500         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPFEM,         "/HWACCM/CPU%d/Exit/Trap/Shw/#PF-EM");
    501         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF,            "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");
    502         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD,            "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");
    503         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS,            "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");
    504         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP,            "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");
    505         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP,            "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");
    506         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF,            "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");
    507         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE,            "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");
    508         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB,            "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");
    509         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestBP,            "/HWACCM/CPU%d/Exit/Trap/Gst/#BP");
    510         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXF,            "/HWACCM/CPU%d/Exit/Trap/Gst/#XF");
    511         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXcpUnk,        "/HWACCM/CPU%d/Exit/Trap/Gst/Other");
    512         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvlpg,             "/HWACCM/CPU%d/Exit/Instr/Invlpg");
    513         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd,               "/HWACCM/CPU%d/Exit/Instr/Invd");
    514         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid,              "/HWACCM/CPU%d/Exit/Instr/Cpuid");
    515         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc,              "/HWACCM/CPU%d/Exit/Instr/Rdtsc");
    516         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtscp,             "/HWACCM/CPU%d/Exit/Instr/Rdtscp");
    517         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc,              "/HWACCM/CPU%d/Exit/Instr/Rdpmc");
    518         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr,              "/HWACCM/CPU%d/Exit/Instr/Rdmsr");
    519         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr,              "/HWACCM/CPU%d/Exit/Instr/Wrmsr");
    520         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait,              "/HWACCM/CPU%d/Exit/Instr/Mwait");
    521         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor,            "/HWACCM/CPU%d/Exit/Instr/Monitor");
    522         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite,           "/HWACCM/CPU%d/Exit/Instr/DR/Write");
    523         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead,            "/HWACCM/CPU%d/Exit/Instr/DR/Read");
    524         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS,               "/HWACCM/CPU%d/Exit/Instr/CLTS");
    525         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW,               "/HWACCM/CPU%d/Exit/Instr/LMSW");
    526         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli,                "/HWACCM/CPU%d/Exit/Instr/Cli");
    527         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti,                "/HWACCM/CPU%d/Exit/Instr/Sti");
    528         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf,              "/HWACCM/CPU%d/Exit/Instr/Pushf");
    529         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf,               "/HWACCM/CPU%d/Exit/Instr/Popf");
    530         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret,               "/HWACCM/CPU%d/Exit/Instr/Iret");
    531         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt,                "/HWACCM/CPU%d/Exit/Instr/Int");
    532         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt,                "/HWACCM/CPU%d/Exit/Instr/Hlt");
    533         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite,            "/HWACCM/CPU%d/Exit/IO/Write");
    534         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead,             "/HWACCM/CPU%d/Exit/IO/Read");
    535         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite,      "/HWACCM/CPU%d/Exit/IO/WriteString");
    536         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead,       "/HWACCM/CPU%d/Exit/IO/ReadString");
    537         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow,          "/HWACCM/CPU%d/Exit/IrqWindow");
    538         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume,          "/HWACCM/CPU%d/Exit/MaxResume");
    539         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending,     "/HWACCM/CPU%d/Exit/PreemptPending");
    540         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMTF,                "/HWACCM/CPU%d/Exit/MonitorTrapFlag");
    541 
    542         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq,         "/HWACCM/CPU%d/Switch/IrqPending");
    543         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3,             "/HWACCM/CPU%d/Switch/ToR3");
    544 
    545         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject,              "/HWACCM/CPU%d/Irq/Inject");
    546         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject,            "/HWACCM/CPU%d/Irq/Reinject");
    547         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq,         "/HWACCM/CPU%d/Irq/PendingOnHost");
    548 
    549         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage,              "/HWACCM/CPU%d/Flush/Page");
    550         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual,        "/HWACCM/CPU%d/Flush/Page/Virt");
    551         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual,    "/HWACCM/CPU%d/Flush/Page/Phys");
    552         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB,               "/HWACCM/CPU%d/Flush/TLB");
    553         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual,         "/HWACCM/CPU%d/Flush/TLB/Manual");
    554         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange,      "/HWACCM/CPU%d/Flush/TLB/CRx");
    555         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg,        "/HWACCM/CPU%d/Flush/Page/Invlpg");
    556         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch,    "/HWACCM/CPU%d/Flush/TLB/Switch");
    557         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch,  "/HWACCM/CPU%d/Flush/TLB/Skipped");
    558         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID,              "/HWACCM/CPU%d/Flush/TLB/ASID");
    559         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga,        "/HWACCM/CPU%d/Flush/TLB/PhysInvl");
    560         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown,           "/HWACCM/CPU%d/Flush/Shootdown/Page");
    561         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush,      "/HWACCM/CPU%d/Flush/Shootdown/TLB");
    562 
    563         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset,              "/HWACCM/CPU%d/TSC/Offset");
    564         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept,           "/HWACCM/CPU%d/TSC/Intercept");
    565         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow,   "/HWACCM/CPU%d/TSC/InterceptOverflow");
    566 
    567         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed,               "/HWACCM/CPU%d/Debug/Armed");
    568         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch,       "/HWACCM/CPU%d/Debug/ContextSwitch");
    569         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck,             "/HWACCM/CPU%d/Debug/IOCheck");
    570 
    571         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadMinimal,            "/HWACCM/CPU%d/Load/Minimal");
    572         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadFull,               "/HWACCM/CPU%d/Load/Full");
     497        HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM,           "/HM/CPU%d/Exit/Trap/Shw/#NM");
     498        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM,            "/HM/CPU%d/Exit/Trap/Gst/#NM");
     499        HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF,           "/HM/CPU%d/Exit/Trap/Shw/#PF");
     500        HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM,         "/HM/CPU%d/Exit/Trap/Shw/#PF-EM");
     501        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF,            "/HM/CPU%d/Exit/Trap/Gst/#PF");
     502        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD,            "/HM/CPU%d/Exit/Trap/Gst/#UD");
     503        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS,            "/HM/CPU%d/Exit/Trap/Gst/#SS");
     504        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP,            "/HM/CPU%d/Exit/Trap/Gst/#NP");
     505        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP,            "/HM/CPU%d/Exit/Trap/Gst/#GP");
     506        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF,            "/HM/CPU%d/Exit/Trap/Gst/#MF");
     507        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE,            "/HM/CPU%d/Exit/Trap/Gst/#DE");
     508        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB,            "/HM/CPU%d/Exit/Trap/Gst/#DB");
     509        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP,            "/HM/CPU%d/Exit/Trap/Gst/#BP");
     510        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF,            "/HM/CPU%d/Exit/Trap/Gst/#XF");
     511        HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk,        "/HM/CPU%d/Exit/Trap/Gst/Other");
     512        HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvlpg,             "/HM/CPU%d/Exit/Instr/Invlpg");
     513        HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvd,               "/HM/CPU%d/Exit/Instr/Invd");
     514        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCpuid,              "/HM/CPU%d/Exit/Instr/Cpuid");
     515        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtsc,              "/HM/CPU%d/Exit/Instr/Rdtsc");
     516        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtscp,             "/HM/CPU%d/Exit/Instr/Rdtscp");
     517        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdpmc,              "/HM/CPU%d/Exit/Instr/Rdpmc");
     518        HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr,              "/HM/CPU%d/Exit/Instr/Rdmsr");
     519        HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr,              "/HM/CPU%d/Exit/Instr/Wrmsr");
     520        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait,              "/HM/CPU%d/Exit/Instr/Mwait");
     521        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor,            "/HM/CPU%d/Exit/Instr/Monitor");
     522        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite,           "/HM/CPU%d/Exit/Instr/DR/Write");
     523        HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead,            "/HM/CPU%d/Exit/Instr/DR/Read");
     524        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCLTS,               "/HM/CPU%d/Exit/Instr/CLTS");
     525        HM_REG_COUNTER(&pVCpu->hm.s.StatExitLMSW,               "/HM/CPU%d/Exit/Instr/LMSW");
     526        HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli,                "/HM/CPU%d/Exit/Instr/Cli");
     527        HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti,                "/HM/CPU%d/Exit/Instr/Sti");
     528        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf,              "/HM/CPU%d/Exit/Instr/Pushf");
     529        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf,               "/HM/CPU%d/Exit/Instr/Popf");
     530        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret,               "/HM/CPU%d/Exit/Instr/Iret");
     531        HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt,                "/HM/CPU%d/Exit/Instr/Int");
     532        HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt,                "/HM/CPU%d/Exit/Instr/Hlt");
     533        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite,            "/HM/CPU%d/Exit/IO/Write");
     534        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead,             "/HM/CPU%d/Exit/IO/Read");
     535        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite,      "/HM/CPU%d/Exit/IO/WriteString");
     536        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead,       "/HM/CPU%d/Exit/IO/ReadString");
     537        HM_REG_COUNTER(&pVCpu->hm.s.StatExitIrqWindow,          "/HM/CPU%d/Exit/IrqWindow");
     538        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume,          "/HM/CPU%d/Exit/MaxResume");
     539        HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptPending,     "/HM/CPU%d/Exit/PreemptPending");
     540        HM_REG_COUNTER(&pVCpu->hm.s.StatExitMTF,                "/HM/CPU%d/Exit/MonitorTrapFlag");
     541
     542        HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq,         "/HM/CPU%d/Switch/IrqPending");
     543        HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchToR3,             "/HM/CPU%d/Switch/ToR3");
     544
     545        HM_REG_COUNTER(&pVCpu->hm.s.StatIntInject,              "/HM/CPU%d/Irq/Inject");
     546        HM_REG_COUNTER(&pVCpu->hm.s.StatIntReinject,            "/HM/CPU%d/Irq/Reinject");
     547        HM_REG_COUNTER(&pVCpu->hm.s.StatPendingHostIrq,         "/HM/CPU%d/Irq/PendingOnHost");
     548
     549        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage,              "/HM/CPU%d/Flush/Page");
     550        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual,        "/HM/CPU%d/Flush/Page/Virt");
     551        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual,    "/HM/CPU%d/Flush/Page/Phys");
     552        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLB,               "/HM/CPU%d/Flush/TLB");
     553        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBManual,         "/HM/CPU%d/Flush/TLB/Manual");
     554        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBCRxChange,      "/HM/CPU%d/Flush/TLB/CRx");
     555        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageInvlpg,        "/HM/CPU%d/Flush/Page/Invlpg");
     556        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBWorldSwitch,    "/HM/CPU%d/Flush/TLB/Switch");
     557        HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch,  "/HM/CPU%d/Flush/TLB/Skipped");
     558        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushASID,              "/HM/CPU%d/Flush/TLB/ASID");
     559        HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBInvlpga,        "/HM/CPU%d/Flush/TLB/PhysInvl");
     560        HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown,           "/HM/CPU%d/Flush/Shootdown/Page");
     561        HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush,      "/HM/CPU%d/Flush/Shootdown/TLB");
     562
     563        HM_REG_COUNTER(&pVCpu->hm.s.StatTSCOffset,              "/HM/CPU%d/TSC/Offset");
     564        HM_REG_COUNTER(&pVCpu->hm.s.StatTSCIntercept,           "/HM/CPU%d/TSC/Intercept");
     565        HM_REG_COUNTER(&pVCpu->hm.s.StatTSCInterceptOverFlow,   "/HM/CPU%d/TSC/InterceptOverflow");
     566
     567        HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed,               "/HM/CPU%d/Debug/Armed");
     568        HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch,       "/HM/CPU%d/Debug/ContextSwitch");
     569        HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIOCheck,             "/HM/CPU%d/Debug/IOCheck");
     570
     571        HM_REG_COUNTER(&pVCpu->hm.s.StatLoadMinimal,            "/HM/CPU%d/Load/Minimal");
     572        HM_REG_COUNTER(&pVCpu->hm.s.StatLoadFull,               "/HM/CPU%d/Load/Full");
    573573
    574574#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    575         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFpu64SwitchBack,        "/HWACCM/CPU%d/Switch64/Fpu");
    576         HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDebug64SwitchBack,      "/HWACCM/CPU%d/Switch64/Debug");
     575        HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack,        "/HM/CPU%d/Switch64/Fpu");
     576        HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack,      "/HM/CPU%d/Switch64/Debug");
    577577#endif
    578578
    579         for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hwaccm.s.StatExitCRxWrite); j++)
     579        for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hm.s.StatExitCRxWrite); j++)
    580580        {
    581             rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
    582                                 "/HWACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
     581            rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",
     582                                "/HM/CPU%d/Exit/Instr/CR/Write/%x", i, j);
    583583            AssertRC(rc);
    584             rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
    585                                 "/HWACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
     584            rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",
     585                                "/HM/CPU%d/Exit/Instr/CR/Read/%x", i, j);
    586586            AssertRC(rc);
    587587        }
    588588
    589 #undef HWACCM_REG_COUNTER
    590 
    591         pVCpu->hwaccm.s.paStatExitReason = NULL;
    592 
    593         rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hwaccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);
     589#undef HM_REG_COUNTER
     590
     591        pVCpu->hm.s.paStatExitReason = NULL;
     592
     593        rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hm.s.paStatExitReason), 0, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatExitReason);
    594594        AssertRC(rc);
    595595        if (RT_SUCCESS(rc))
     
    600600                if (papszDesc[j])
    601601                {
    602                     rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
    603                                         papszDesc[j], "/HWACCM/CPU%d/Exit/Reason/%02x", i, j);
     602                    rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
     603                                        papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j);
    604604                    AssertRC(rc);
    605605                }
    606606            }
    607             rc = STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);
     607            rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i);
    608608            AssertRC(rc);
    609609        }
    610         pVCpu->hwaccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);
     610        pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason);
    611611# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    612         Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
     612        Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
    613613# else
    614         Assert(pVCpu->hwaccm.s.paStatExitReasonR0 != NIL_RTR0PTR);
     614        Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR);
    615615# endif
    616616
    617         rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);
     617        rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs);
    618618        AssertRCReturn(rc, rc);
    619         pVCpu->hwaccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
     619        pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs);
    620620# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    621         Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
     621        Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));
    622622# else
    623         Assert(pVCpu->hwaccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
     623        Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);
    624624# endif
    625625        for (unsigned j = 0; j < 255; j++)
    626             STAMR3RegisterF(pVM, &pVCpu->hwaccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
    627                             (j < 0x20) ? "/HWACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);
     626            STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",
     627                            (j < 0x20) ? "/HM/CPU%d/Interrupt/Trap/%02X" : "/HM/CPU%d/Interrupt/IRQ/%02X", i, j);
    628628
    629629    }
     
    636636        PVMCPU pVCpu = &pVM->aCpus[i];
    637637
    638         PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     638        PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    639639        strcpy((char *)pCache->aMagic, "VMCSCACHE Magic");
    640640        pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
     
    652652 * @param   enmWhat             The phase that completed.
    653653 */
    654 VMMR3_INT_DECL(int) HWACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
     654VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
    655655{
    656656    switch (enmWhat)
    657657    {
    658658        case VMINITCOMPLETED_RING3:
    659             return hwaccmR3InitCPU(pVM);
     659            return hmR3InitCPU(pVM);
    660660        case VMINITCOMPLETED_RING0:
    661             return hwaccmR3InitFinalizeR0(pVM);
     661            return hmR3InitFinalizeR0(pVM);
    662662        default:
    663663            return VINF_SUCCESS;
     
    671671 * @param   pVM         Pointer to the VM.
    672672 */
    673 static void hwaccmR3DisableRawMode(PVM pVM)
     673static void hmR3DisableRawMode(PVM pVM)
    674674{
    675675    /* Disable PATM & CSAM. */
     
    706706 * @param   pVM         Pointer to the VM.
    707707 */
    708 static int hwaccmR3InitFinalizeR0(PVM pVM)
     708static int hmR3InitFinalizeR0(PVM pVM)
    709709{
    710710    int rc;
     
    714714     * is already using AMD-V.
    715715     */
    716     if (    !pVM->hwaccm.s.vmx.fSupported
    717         &&  !pVM->hwaccm.s.svm.fSupported
    718         &&  pVM->hwaccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
     716    if (    !pVM->hm.s.vmx.fSupported
     717        &&  !pVM->hm.s.svm.fSupported
     718        &&  pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */
    719719        &&  RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
    720720    {
    721         LogRel(("HWACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
    722         pVM->hwaccm.s.svm.fSupported        = true;
    723         pVM->hwaccm.s.svm.fIgnoreInUseError = true;
     721        LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));
     722        pVM->hm.s.svm.fSupported        = true;
     723        pVM->hm.s.svm.fIgnoreInUseError = true;
    724724    }
    725725    else
    726     if (    !pVM->hwaccm.s.vmx.fSupported
    727         &&  !pVM->hwaccm.s.svm.fSupported)
    728     {
    729         LogRel(("HWACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));
    730         LogRel(("HWACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
     726    if (    !pVM->hm.s.vmx.fSupported
     727        &&  !pVM->hm.s.svm.fSupported)
     728    {
     729        LogRel(("HM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hm.s.lLastError));
     730        LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
    731731
    732732        if (VMMIsHwVirtExtForced(pVM))
    733733        {
    734             switch (pVM->hwaccm.s.lLastError)
     734            switch (pVM->hm.s.lLastError)
    735735            {
    736736            case VERR_VMX_NO_VMX:
     
    745745                return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS.");
    746746            default:
    747                 return pVM->hwaccm.s.lLastError;
     747                return pVM->hm.s.lLastError;
    748748            }
    749749        }
     
    751751    }
    752752
    753     if (pVM->hwaccm.s.vmx.fSupported)
     753    if (pVM->hm.s.vmx.fSupported)
    754754    {
    755755        rc = SUPR3QueryVTxSupported();
     
    757757        {
    758758#ifdef RT_OS_LINUX
    759             LogRel(("HWACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
     759            LogRel(("HM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));
    760760#else
    761             LogRel(("HWACCM: The host kernel does not support VT-x!\n"));
     761            LogRel(("HM: The host kernel does not support VT-x!\n"));
    762762#endif
    763763            if (   pVM->cCpus > 1
     
    770770    }
    771771
    772     if (!pVM->hwaccm.s.fAllowed)
     772    if (!pVM->hm.s.fAllowed)
    773773        return VINF_SUCCESS;    /* nothing to do */
    774774
     
    777777    if (RT_FAILURE(rc))
    778778    {
    779         LogRel(("HWACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
     779        LogRel(("HMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));
    780780        return rc;
    781781    }
    782     Assert(!pVM->fHWACCMEnabled || VMMIsHwVirtExtForced(pVM));
    783 
    784     pVM->hwaccm.s.fHasIoApic = PDMHasIoApic(pVM);
     782    Assert(!pVM->fHMEnabled || VMMIsHwVirtExtForced(pVM));
     783
     784    pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM);
    785785    /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */
    786     if (!pVM->hwaccm.s.fHasIoApic)
    787     {
    788         Assert(!pVM->hwaccm.s.fTRPPatchingAllowed); /* paranoia */
    789         pVM->hwaccm.s.fTRPPatchingAllowed = false;
     786    if (!pVM->hm.s.fHasIoApic)
     787    {
     788        Assert(!pVM->hm.s.fTRPPatchingAllowed); /* paranoia */
     789        pVM->hm.s.fTRPPatchingAllowed = false;
    790790    }
    791791
    792792    bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
    793     if (pVM->hwaccm.s.vmx.fSupported)
    794     {
    795         Log(("pVM->hwaccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));
    796 
    797         if (    pVM->hwaccm.s.fInitialized == false
    798             &&  pVM->hwaccm.s.vmx.msr.feature_ctrl != 0)
     793    if (pVM->hm.s.vmx.fSupported)
     794    {
     795        Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
     796
     797        if (    pVM->hm.s.fInitialized == false
     798            &&  pVM->hm.s.vmx.msr.feature_ctrl != 0)
    799799        {
    800800            uint64_t val;
    801801            RTGCPHYS GCPhys = 0;
    802802
    803             LogRel(("HWACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));
    804             LogRel(("HWACCM: MSR_IA32_FEATURE_CONTROL      = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));
    805             LogRel(("HWACCM: MSR_IA32_VMX_BASIC_INFO       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));
    806             LogRel(("HWACCM: VMCS id                       = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
    807             LogRel(("HWACCM: VMCS size                     = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
    808             LogRel(("HWACCM: VMCS physical address limit   = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
    809             LogRel(("HWACCM: VMCS memory type              = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
    810             LogRel(("HWACCM: Dual monitor treatment        = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));
    811 
    812             LogRel(("HWACCM: MSR_IA32_VMX_PINBASED_CTLS    = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));
    813             val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
     803            LogRel(("HM: Host CR4=%08X\n", pVM->hm.s.vmx.hostCR4));
     804            LogRel(("HM: MSR_IA32_FEATURE_CONTROL      = %RX64\n", pVM->hm.s.vmx.msr.feature_ctrl));
     805            LogRel(("HM: MSR_IA32_VMX_BASIC_INFO       = %RX64\n", pVM->hm.s.vmx.msr.vmx_basic_info));
     806            LogRel(("HM: VMCS id                       = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info)));
     807            LogRel(("HM: VMCS size                     = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info)));
     808            LogRel(("HM: VMCS physical address limit   = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));
     809            LogRel(("HM: VMCS memory type              = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info)));
     810            LogRel(("HM: Dual monitor treatment        = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info)));
     811
     812            LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS    = %RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u));
     813            val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
    814814            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
    815                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
     815                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));
    816816            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
    817                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
     817                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));
    818818            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
    819                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
     819                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));
    820820            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
    821                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
    822             val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
     821                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));
     822            val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
    823823            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT)
    824                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
     824                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));
    825825            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT)
    826                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
     826                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));
    827827            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI)
    828                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
     828                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));
    829829            if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER)
    830                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
    831 
    832             LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS   = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));
    833             val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
     830                LogRel(("HM:    VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));
     831
     832            LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS   = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u));
     833            val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
    834834            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
    835                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
     835                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));
    836836            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
    837                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
     837                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));
    838838            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
    839                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
     839                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));
    840840            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
    841                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
     841                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));
    842842            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
    843                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
     843                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));
    844844            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
    845                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
     845                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));
    846846            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
    847                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
     847                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));
    848848            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
    849                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
     849                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));
    850850            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
    851                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
     851                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));
    852852            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
    853                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
     853                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));
    854854            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
    855                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
     855                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));
    856856            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    857                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
     857                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));
    858858            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
    859                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
     859                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));
    860860            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
    861                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
     861                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));
    862862            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
    863                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
     863                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));
    864864            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
    865                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
     865                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));
    866866            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
    867                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
     867                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));
    868868            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    869                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
     869                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));
    870870            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
    871                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
     871                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));
    872872            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
    873                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
     873                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));
    874874            if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    875                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
    876 
    877             val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
     875                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));
     876
     877            val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
    878878            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)
    879                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
     879                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));
    880880            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET)
    881                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
     881                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));
    882882            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT)
    883                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
     883                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));
    884884            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT)
    885                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
     885                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));
    886886            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT)
    887                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
     887                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));
    888888            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT)
    889                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
     889                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));
    890890            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)
    891                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
     891                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));
    892892            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT)
    893                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
     893                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));
    894894            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT)
    895                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
     895                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));
    896896            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT)
    897                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
     897                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));
    898898            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT)
    899                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
     899                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));
    900900            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
    901                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
     901                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));
    902902            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT)
    903                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
     903                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));
    904904            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT)
    905                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
     905                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));
    906906            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT)
    907                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
     907                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));
    908908            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS)
    909                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
     909                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));
    910910            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
    911                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
     911                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));
    912912            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
    913                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
     913                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));
    914914            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT)
    915                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
     915                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));
    916916            if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT)
    917                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
     917                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));
    918918            if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    919                 LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
    920 
    921             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     919                LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));
     920
     921            if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    922922            {
    923                 LogRel(("HWACCM: MSR_IA32_VMX_PROCBASED_CTLS2  = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));
    924                 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
     923                LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2  = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u));
     924                val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
    925925                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    926                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
     926                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));
    927927                if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
    928                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
     928                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));
    929929                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
    930                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
     930                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));
    931931                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    932                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));
     932                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));
    933933                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
    934                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
     934                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));
    935935                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
    936                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
     936                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));
    937937                if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    938                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
     938                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));
    939939                if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
    940                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
     940                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));
    941941                if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
    942                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
    943 
    944                 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
     942                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));
     943
     944                val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
    945945                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    946                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
     946                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));
    947947                if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT)
    948                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
     948                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));
    949949                if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    950                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));
     950                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));
    951951                if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC)
    952                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
     952                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));
    953953                if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
    954                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
     954                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));
    955955                if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
    956                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
     956                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));
    957957                if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    958                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
     958                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));
    959959                if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)
    960                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
     960                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));
    961961                if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT)
    962                     LogRel(("HWACCM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
     962                    LogRel(("HM:    VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));
    963963            }
    964964
    965             LogRel(("HWACCM: MSR_IA32_VMX_ENTRY_CTLS       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));
    966             val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
     965            LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS       = %RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u));
     966            val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
    967967            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
    968                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
     968                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));
    969969            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
    970                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
     970                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));
    971971            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
    972                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
     972                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));
    973973            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
    974                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
     974                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));
    975975            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
    976                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
     976                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));
    977977            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
    978                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
     978                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));
    979979            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
    980                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
    981             val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
     980                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));
     981            val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
    982982            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG)
    983                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
     983                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));
    984984            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE)
    985                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
     985                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));
    986986            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM)
    987                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
     987                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));
    988988            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON)
    989                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
     989                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));
    990990            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR)
    991                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
     991                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));
    992992            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR)
    993                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
     993                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));
    994994            if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
    995                 LogRel(("HWACCM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
    996 
    997             LogRel(("HWACCM: MSR_IA32_VMX_EXIT_CTLS        = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));
    998             val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
     995                LogRel(("HM:    VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));
     996
     997            LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS        = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u));
     998            val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
    999999            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
    1000                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
     1000                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));
    10011001            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
    1002                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
     1002                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));
    10031003            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
    1004                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
     1004                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));
    10051005            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
    1006                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
     1006                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));
    10071007            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
    1008                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
     1008                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));
    10091009            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
    1010                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
     1010                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));
    10111011            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
    1012                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
     1012                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));
    10131013            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
    1014                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
    1015             val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
     1014                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));
     1015            val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
    10161016            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG)
    1017                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
     1017                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));
    10181018            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)
    1019                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
     1019                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));
    10201020            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ)
    1021                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
     1021                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));
    10221022            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR)
    1023                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
     1023                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));
    10241024            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR)
    1025                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
     1025                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));
    10261026            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR)
    1027                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
     1027                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));
    10281028            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
    1029                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
     1029                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));
    10301030            if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER)
    1031                 LogRel(("HWACCM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
    1032 
    1033             if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps)
     1031                LogRel(("HM:    VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));
     1032
     1033            if (pVM->hm.s.vmx.msr.vmx_eptcaps)
    10341034            {
    1035                 LogRel(("HWACCM: MSR_IA32_VMX_EPT_VPID_CAPS    = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));
    1036 
    1037                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
    1038                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
    1039                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
    1040                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
    1041                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
    1042                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
    1043                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
    1044                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
    1045                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
    1046                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
    1047                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
    1048                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
    1049                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
    1050                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
    1051                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
    1052                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
    1053                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
    1054                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
    1055                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
    1056                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
    1057                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
    1058                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
    1059                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
    1060                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
    1061                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
    1062                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
    1063                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
    1064                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
    1065                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
    1066                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
    1067                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
    1068                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
    1069                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
    1070                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
    1071                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
    1072                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
    1073                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
    1074                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT\n"));
    1075                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
    1076                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS\n"));
    1077                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
    1078                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
    1079                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
    1080                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR\n"));
    1081                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
    1082                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT\n"));
    1083                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
    1084                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS\n"));
    1085                 if (pVM->hwaccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
    1086                     LogRel(("HWACCM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
     1035                LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAPS    = %RX64\n", pVM->hm.s.vmx.msr.vmx_eptcaps));
     1036
     1037                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)
     1038                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));
     1039                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)
     1040                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));
     1041                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)
     1042                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));
     1043                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)
     1044                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));
     1045                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)
     1046                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));
     1047                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)
     1048                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));
     1049                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)
     1050                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));
     1051                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)
     1052                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));
     1053                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)
     1054                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));
     1055                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)
     1056                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));
     1057                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)
     1058                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));
     1059                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)
     1060                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));
     1061                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)
     1062                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));
     1063                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)
     1064                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));
     1065                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)
     1066                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));
     1067                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)
     1068                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));
     1069                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)
     1070                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));
     1071                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)
     1072                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));
     1073                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)
     1074                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT\n"));
     1075                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)
     1076                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS\n"));
     1077                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)
     1078                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));
     1079                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)
     1080                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR\n"));
     1081                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)
     1082                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT\n"));
     1083                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)
     1084                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS\n"));
     1085                if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)
     1086                    LogRel(("HM:    MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
    10871087            }
    10881088
    1089             LogRel(("HWACCM: MSR_IA32_VMX_MISC             = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));
    1090             if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc) == pVM->hwaccm.s.vmx.cPreemptTimerShift)
    1091                 LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));
     1089            LogRel(("HM: MSR_IA32_VMX_MISC             = %RX64\n", pVM->hm.s.vmx.msr.vmx_misc));
     1090            if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift)
     1091                LogRel(("HM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc)));
    10921092            else
    10931093            {
    1094                 LogRel(("HWACCM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",
    1095                         MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));
     1094                LogRel(("HM:    MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",
     1095                        MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift));
    10961096            }
    1097             LogRel(("HWACCM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    1098             LogRel(("HWACCM:    MSR_IA32_VMX_MISC_CR3_TARGET      %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    1099             LogRel(("HWACCM:    MSR_IA32_VMX_MISC_MAX_MSR         %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    1100             LogRel(("HWACCM:    MSR_IA32_VMX_MISC_MSEG_ID         %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));
    1101 
    1102             LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED0       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));
    1103             LogRel(("HWACCM: MSR_IA32_VMX_CR0_FIXED1       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));
    1104             LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED0       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));
    1105             LogRel(("HWACCM: MSR_IA32_VMX_CR4_FIXED1       = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));
    1106             LogRel(("HWACCM: MSR_IA32_VMX_VMCS_ENUM        = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));
    1107 
    1108             LogRel(("HWACCM: TPR shadow physaddr           = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));
     1097            LogRel(("HM:    MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)));
     1098            LogRel(("HM:    MSR_IA32_VMX_MISC_CR3_TARGET      %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc)));
     1099            LogRel(("HM:    MSR_IA32_VMX_MISC_MAX_MSR         %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
     1100            LogRel(("HM:    MSR_IA32_VMX_MISC_MSEG_ID         %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc)));
     1101
     1102            LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0       = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0));
     1103            LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1       = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1));
     1104            LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0       = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0));
     1105            LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1       = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1));
     1106            LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM        = %RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum));
     1107
     1108            LogRel(("HM: TPR shadow physaddr           = %RHp\n", pVM->hm.s.vmx.pAPICPhys));
    11091109
    11101110            /* Paranoia */
    1111             AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512);
     1111            AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512);
    11121112
    11131113            for (VMCPUID i = 0; i < pVM->cCpus; i++)
    11141114            {
    1115                 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr    = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
    1116                 LogRel(("HWACCM: VCPU%d: VMCS physaddr          = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
     1115                LogRel(("HM: VCPU%d: MSR bitmap physaddr    = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pMSRBitmapPhys));
     1116                LogRel(("HM: VCPU%d: VMCS physaddr          = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS));
    11171117            }
    11181118
    1119             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
    1120                 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
    1121 
    1122             if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
    1123                 pVM->hwaccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;
     1119            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)
     1120                pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
     1121
     1122            if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)
     1123                pVM->hm.s.vmx.fVPID = pVM->hm.s.vmx.fAllowVPID;
    11241124
    11251125            /*
     
    11281128             * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
    11291129             */
    1130             if (!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1130            if (!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    11311131                && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
    11321132            {
     
    11351135
    11361136            /* Unrestricted guest execution relies on EPT. */
    1137             if (    pVM->hwaccm.s.fNestedPaging
    1138                 &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
     1137            if (    pVM->hm.s.fNestedPaging
     1138                &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))
    11391139            {
    1140                 pVM->hwaccm.s.vmx.fUnrestrictedGuest = true;
     1140                pVM->hm.s.vmx.fUnrestrictedGuest = true;
    11411141            }
    11421142
    11431143            /* Only try once. */
    1144             pVM->hwaccm.s.fInitialized = true;
    1145 
    1146             if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     1144            pVM->hm.s.fInitialized = true;
     1145
     1146            if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    11471147            {
    11481148                /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */
    1149                 rc = PDMR3VMMDevHeapAlloc(pVM, HWACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);
     1149                rc = PDMR3VMMDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS);
    11501150                if (RT_SUCCESS(rc))
    11511151                {
    11521152                    /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */
    1153                     ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
    1154                     pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
     1153                    ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS));
     1154                    pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS);
    11551155                    /* Bit set to 0 means redirection enabled. */
    1156                     memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
     1156                    memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap));
    11571157                    /* Allow all port IO, so the VT-x IO intercepts do their job. */
    1158                     memset(pVM->hwaccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
    1159                     *((unsigned char *)pVM->hwaccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;
     1158                    memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);
     1159                    *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff;
    11601160
    11611161                    /*
     
    11631163                     * real and protected mode without paging with EPT.
    11641164                     */
    1165                     pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
     1165                    pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);
    11661166                    for (unsigned i = 0; i < X86_PG_ENTRIES; i++)
    11671167                    {
    1168                         pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u  = _4M * i;
    1169                         pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
     1168                        pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u  = _4M * i;
     1169                        pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;
    11701170                    }
    11711171
    11721172                    /* We convert it here every time as pci regions could be reconfigured. */
    1173                     rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pRealModeTSS, &GCPhys);
     1173                    rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
    11741174                    AssertRC(rc);
    1175                     LogRel(("HWACCM: Real Mode TSS guest physaddr  = %RGp\n", GCPhys));
    1176 
    1177                     rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
     1175                    LogRel(("HM: Real Mode TSS guest physaddr  = %RGp\n", GCPhys));
     1176
     1177                    rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
    11781178                    AssertRC(rc);
    1179                     LogRel(("HWACCM: Non-Paging Mode EPT CR3       = %RGp\n", GCPhys));
     1179                    LogRel(("HM: Non-Paging Mode EPT CR3       = %RGp\n", GCPhys));
    11801180                }
    11811181                else
    11821182                {
    1183                     LogRel(("HWACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
    1184                     pVM->hwaccm.s.vmx.pRealModeTSS = NULL;
    1185                     pVM->hwaccm.s.vmx.pNonPagingModeEPTPageTable = NULL;
     1183                    LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));
     1184                    pVM->hm.s.vmx.pRealModeTSS = NULL;
     1185                    pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL;
    11861186                }
    11871187            }
     
    11911191            if (rc == VINF_SUCCESS)
    11921192            {
    1193                 pVM->fHWACCMEnabled = true;
    1194                 pVM->hwaccm.s.vmx.fEnabled = true;
    1195                 hwaccmR3DisableRawMode(pVM);
     1193                pVM->fHMEnabled = true;
     1194                pVM->hm.s.vmx.fEnabled = true;
     1195                hmR3DisableRawMode(pVM);
    11961196
    11971197                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
    11981198#ifdef VBOX_ENABLE_64_BITS_GUESTS
    1199                 if (pVM->hwaccm.s.fAllow64BitGuests)
     1199                if (pVM->hm.s.fAllow64BitGuests)
    12001200                {
    12011201                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
     
    12091209                /* Todo: this needs to be fixed properly!! */
    12101210                if (    CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)
    1211                     &&  (pVM->hwaccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
     1211                    &&  (pVM->hm.s.vmx.hostEFER & MSR_K6_EFER_NXE))
    12121212                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX);
    12131213
    1214                 LogRel((pVM->hwaccm.s.fAllow64BitGuests
    1215                         ? "HWACCM: 32-bit and 64-bit guests supported.\n"
    1216                         : "HWACCM: 32-bit guests supported.\n"));
     1214                LogRel((pVM->hm.s.fAllow64BitGuests
     1215                        ? "HM: 32-bit and 64-bit guests supported.\n"
     1216                        : "HM: 32-bit guests supported.\n"));
    12171217#else
    1218                 LogRel(("HWACCM: 32-bit guests supported.\n"));
     1218                LogRel(("HM: 32-bit guests supported.\n"));
    12191219#endif
    1220                 LogRel(("HWACCM: VMX enabled!\n"));
    1221                 if (pVM->hwaccm.s.fNestedPaging)
     1220                LogRel(("HM: VMX enabled!\n"));
     1221                if (pVM->hm.s.fNestedPaging)
    12221222                {
    1223                     LogRel(("HWACCM: Enabled nested paging\n"));
    1224                     LogRel(("HWACCM: EPT root page                 = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
    1225                     if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_SINGLE_CONTEXT)
    1226                         LogRel(("HWACCM: enmFlushEPT                   = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
    1227                     else if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_ALL_CONTEXTS)
    1228                         LogRel(("HWACCM: enmFlushEPT                   = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
    1229                     else if (pVM->hwaccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_NOT_SUPPORTED)
    1230                         LogRel(("HWACCM: enmFlushEPT                   = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
     1223                    LogRel(("HM: Enabled nested paging\n"));
     1224                    LogRel(("HM: EPT root page                 = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));
     1225                    if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_SINGLE_CONTEXT)
     1226                        LogRel(("HM: enmFlushEPT                   = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));
     1227                    else if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_ALL_CONTEXTS)
     1228                        LogRel(("HM: enmFlushEPT                   = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));
     1229                    else if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_NOT_SUPPORTED)
     1230                        LogRel(("HM: enmFlushEPT                   = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));
    12311231                    else
    1232                         LogRel(("HWACCM: enmFlushEPT                   = %d\n", pVM->hwaccm.s.vmx.enmFlushEPT));
    1233 
    1234                     if (pVM->hwaccm.s.vmx.fUnrestrictedGuest)
    1235                         LogRel(("HWACCM: Unrestricted guest execution enabled!\n"));
     1232                        LogRel(("HM: enmFlushEPT                   = %d\n", pVM->hm.s.vmx.enmFlushEPT));
     1233
     1234                    if (pVM->hm.s.vmx.fUnrestrictedGuest)
     1235                        LogRel(("HM: Unrestricted guest execution enabled!\n"));
    12361236
    12371237#if HC_ARCH_BITS == 64
    1238                     if (pVM->hwaccm.s.fLargePages)
     1238                    if (pVM->hm.s.fLargePages)
    12391239                    {
    12401240                        /* Use large (2 MB) pages for our EPT PDEs where possible. */
    12411241                        PGMSetLargePageUsage(pVM, true);
    1242                         LogRel(("HWACCM: Large page support enabled!\n"));
     1242                        LogRel(("HM: Large page support enabled!\n"));
    12431243                    }
    12441244#endif
    12451245                }
    12461246                else
    1247                     Assert(!pVM->hwaccm.s.vmx.fUnrestrictedGuest);
    1248 
    1249                 if (pVM->hwaccm.s.vmx.fVPID)
     1247                    Assert(!pVM->hm.s.vmx.fUnrestrictedGuest);
     1248
     1249                if (pVM->hm.s.vmx.fVPID)
    12501250                {
    1251                     LogRel(("HWACCM: Enabled VPID\n"));
    1252                     if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_INDIV_ADDR)
    1253                         LogRel(("HWACCM: enmFlushVPID                  = VMX_FLUSH_VPID_INDIV_ADDR\n"));
    1254                     else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT)
    1255                         LogRel(("HWACCM: enmFlushVPID                  = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
    1256                     else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_ALL_CONTEXTS)
    1257                         LogRel(("HWACCM: enmFlushVPID                  = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
    1258                     else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    1259                         LogRel(("HWACCM: enmFlushVPID                  = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
     1251                    LogRel(("HM: Enabled VPID\n"));
     1252                    if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_INDIV_ADDR)
     1253                        LogRel(("HM: enmFlushVPID                  = VMX_FLUSH_VPID_INDIV_ADDR\n"));
     1254                    else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT)
     1255                        LogRel(("HM: enmFlushVPID                  = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));
     1256                    else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_ALL_CONTEXTS)
     1257                        LogRel(("HM: enmFlushVPID                  = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));
     1258                    else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
     1259                        LogRel(("HM: enmFlushVPID                  = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));
    12601260                    else
    1261                         LogRel(("HWACCM: enmFlushVPID                  = %d\n", pVM->hwaccm.s.vmx.enmFlushVPID));
     1261                        LogRel(("HM: enmFlushVPID                  = %d\n", pVM->hm.s.vmx.enmFlushVPID));
    12621262                }
    1263                 else if (pVM->hwaccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_NOT_SUPPORTED)
    1264                     LogRel(("HWACCM: Ignoring VPID capabilities of CPU.\n"));
     1263                else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_NOT_SUPPORTED)
     1264                    LogRel(("HM: Ignoring VPID capabilities of CPU.\n"));
    12651265
    12661266                /* TPR patching status logging. */
    1267                 if (pVM->hwaccm.s.fTRPPatchingAllowed)
     1267                if (pVM->hm.s.fTRPPatchingAllowed)
    12681268                {
    1269                     if (    (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
    1270                         &&  (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
     1269                    if (    (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     1270                        &&  (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
    12711271                    {
    1272                         pVM->hwaccm.s.fTRPPatchingAllowed = false;  /* not necessary as we have a hardware solution. */
    1273                         LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
     1272                        pVM->hm.s.fTRPPatchingAllowed = false;  /* not necessary as we have a hardware solution. */
     1273                        LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
    12741274                    }
    12751275                    else
     
    12821282                            ||  !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
    12831283                        {
    1284                             pVM->hwaccm.s.fTRPPatchingAllowed = false;
    1285                             LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
     1284                            pVM->hm.s.fTRPPatchingAllowed = false;
     1285                            LogRel(("HM: TPR patching disabled (long mode not supported).\n"));
    12861286                        }
    12871287                    }
    12881288                }
    1289                 LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
     1289                LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
    12901290
    12911291                /*
    12921292                 * Check for preemption timer config override and log the state of it.
    12931293                 */
    1294                 if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
     1294                if (pVM->hm.s.vmx.fUsePreemptTimer)
    12951295                {
    1296                     PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HWACCM");
    1297                     int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hwaccm.s.vmx.fUsePreemptTimer, true);
     1296                    PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM");
     1297                    int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true);
    12981298                    AssertLogRelRC(rc2);
    12991299                }
    1300                 if (pVM->hwaccm.s.vmx.fUsePreemptTimer)
    1301                     LogRel(("HWACCM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hwaccm.s.vmx.cPreemptTimerShift));
     1300                if (pVM->hm.s.vmx.fUsePreemptTimer)
     1301                    LogRel(("HM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift));
    13021302            }
    13031303            else
    13041304            {
    1305                 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc));
    1306                 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));
    1307                 pVM->fHWACCMEnabled = false;
     1305                LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc));
     1306                LogRel(("HM: Last instruction error %x\n", pVM->aCpus[0].hm.s.vmx.lasterror.ulInstrError));
     1307                pVM->fHMEnabled = false;
    13081308            }
    13091309        }
    13101310    }
    13111311    else
    1312     if (pVM->hwaccm.s.svm.fSupported)
    1313     {
    1314         Log(("pVM->hwaccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));
    1315 
    1316         if (pVM->hwaccm.s.fInitialized == false)
     1312    if (pVM->hm.s.svm.fSupported)
     1313    {
     1314        Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported));
     1315
     1316        if (pVM->hm.s.fInitialized == false)
    13171317        {
    13181318            /* Erratum 170 which requires a forced TLB flush for each world switch:
     
    13431343                &&  !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) &&  u32Stepping >= 2))
    13441344            {
    1345                 LogRel(("HWACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
     1345                LogRel(("HM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));
    13461346            }
    13471347
    1348             LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));
    1349             LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));
    1350             LogRel(("HWACCM: AMD HWCR MSR                      = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));
    1351             LogRel(("HWACCM: AMD-V revision                    = %X\n", pVM->hwaccm.s.svm.u32Rev));
    1352             LogRel(("HWACCM: AMD-V max ASID                    = %d\n", pVM->hwaccm.s.uMaxASID));
    1353             LogRel(("HWACCM: AMD-V features                    = %X\n", pVM->hwaccm.s.svm.u32Features));
     1348            LogRel(("HM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX));
     1349            LogRel(("HM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX));
     1350            LogRel(("HM: AMD HWCR MSR                      = %RX64\n", pVM->hm.s.svm.msrHWCR));
     1351            LogRel(("HM: AMD-V revision                    = %X\n", pVM->hm.s.svm.u32Rev));
     1352            LogRel(("HM: AMD-V max ASID                    = %d\n", pVM->hm.s.uMaxASID));
     1353            LogRel(("HM: AMD-V features                    = %X\n", pVM->hm.s.svm.u32Features));
    13541354            static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] =
    13551355            {
     
    13681368#undef FLAG_NAME
    13691369            };
    1370             uint32_t fSvmFeatures = pVM->hwaccm.s.svm.u32Features;
     1370            uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features;
    13711371            for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
    13721372                if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
    13731373                {
    1374                     LogRel(("HWACCM:    %s\n", s_aSvmFeatures[i].pszName));
     1374                    LogRel(("HM:    %s\n", s_aSvmFeatures[i].pszName));
    13751375                    fSvmFeatures &= ~s_aSvmFeatures[i].fFlag;
    13761376                }
     
    13781378                for (unsigned iBit = 0; iBit < 32; iBit++)
    13791379                    if (RT_BIT_32(iBit) & fSvmFeatures)
    1380                         LogRel(("HWACCM:    Reserved bit %u\n", iBit));
     1380                        LogRel(("HM:    Reserved bit %u\n", iBit));
    13811381
    13821382            /* Only try once. */
    1383             pVM->hwaccm.s.fInitialized = true;
    1384 
    1385             if (pVM->hwaccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
    1386                 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;
     1383            pVM->hm.s.fInitialized = true;
     1384
     1385            if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)
     1386                pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging;
    13871387
    13881388            rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL);
     
    13901390            if (rc == VINF_SUCCESS)
    13911391            {
    1392                 pVM->fHWACCMEnabled = true;
    1393                 pVM->hwaccm.s.svm.fEnabled = true;
    1394 
    1395                 if (pVM->hwaccm.s.fNestedPaging)
     1392                pVM->fHMEnabled = true;
     1393                pVM->hm.s.svm.fEnabled = true;
     1394
     1395                if (pVM->hm.s.fNestedPaging)
    13961396                {
    1397                     LogRel(("HWACCM:    Enabled nested paging\n"));
     1397                    LogRel(("HM:    Enabled nested paging\n"));
    13981398#if HC_ARCH_BITS == 64
    1399                     if (pVM->hwaccm.s.fLargePages)
     1399                    if (pVM->hm.s.fLargePages)
    14001400                    {
    14011401                        /* Use large (2 MB) pages for our nested paging PDEs where possible. */
    14021402                        PGMSetLargePageUsage(pVM, true);
    1403                         LogRel(("HWACCM:    Large page support enabled!\n"));
     1403                        LogRel(("HM:    Large page support enabled!\n"));
    14041404                    }
    14051405#endif
    14061406                }
    14071407
    1408                 hwaccmR3DisableRawMode(pVM);
     1408                hmR3DisableRawMode(pVM);
    14091409                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP);
    14101410                CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL);
    14111411#ifdef VBOX_ENABLE_64_BITS_GUESTS
    1412                 if (pVM->hwaccm.s.fAllow64BitGuests)
     1412                if (pVM->hm.s.fAllow64BitGuests)
    14131413                {
    14141414                    CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE);
     
    14231423#endif
    14241424
    1425                 LogRel((pVM->hwaccm.s.fAllow64BitGuests
    1426                         ? "HWACCM:    32-bit and 64-bit guest supported.\n"
    1427                         : "HWACCM:    32-bit guest supported.\n"));
    1428 
    1429                 LogRel(("HWACCM:    TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
     1425                LogRel((pVM->hm.s.fAllow64BitGuests
     1426                        ? "HM:    32-bit and 64-bit guest supported.\n"
     1427                        : "HM:    32-bit guest supported.\n"));
     1428
     1429                LogRel(("HM:    TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
    14301430            }
    14311431            else
    14321432            {
    1433                 pVM->fHWACCMEnabled = false;
     1433                pVM->fHMEnabled = false;
    14341434            }
    14351435        }
    14361436    }
    1437     if (pVM->fHWACCMEnabled)
    1438         LogRel(("HWACCM:    VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
     1437    if (pVM->fHMEnabled)
     1438        LogRel(("HM:    VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));
    14391439    RTLogRelSetBuffering(fOldBuffered);
    14401440    return VINF_SUCCESS;
     
    14491449 * @param   pVM     The VM.
    14501450 */
    1451 VMMR3DECL(void) HWACCMR3Relocate(PVM pVM)
    1452 {
    1453     Log(("HWACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
     1451VMMR3DECL(void) HMR3Relocate(PVM pVM)
     1452{
     1453    Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));
    14541454
    14551455    /* Fetch the current paging mode during the relocate callback during state loading. */
     
    14601460            PVMCPU pVCpu = &pVM->aCpus[i];
    14611461
    1462             pVCpu->hwaccm.s.enmShadowMode            = PGMGetShadowMode(pVCpu);
    1463             Assert(pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
    1464             pVCpu->hwaccm.s.vmx.enmCurrGuestMode     = PGMGetGuestMode(pVCpu);
     1462            pVCpu->hm.s.enmShadowMode            = PGMGetShadowMode(pVCpu);
     1463            Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));
     1464            pVCpu->hm.s.vmx.enmCurrGuestMode     = PGMGetGuestMode(pVCpu);
    14651465        }
    14661466    }
    14671467#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    1468     if (pVM->fHWACCMEnabled)
     1468    if (pVM->fHMEnabled)
    14691469    {
    14701470        int rc;
     
    14721472        {
    14731473            case PGMMODE_32_BIT:
    1474                 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
     1474                pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);
    14751475                break;
    14761476
    14771477            case PGMMODE_PAE:
    14781478            case PGMMODE_PAE_NX:
    1479                 pVM->hwaccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
     1479                pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);
    14801480                break;
    14811481
     
    14841484                break;
    14851485        }
    1486         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "VMXGCStartVM64", &pVM->hwaccm.s.pfnVMXGCStartVM64);
     1486        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "VMXGCStartVM64", &pVM->hm.s.pfnVMXGCStartVM64);
    14871487        AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
    14881488
    1489         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "SVMGCVMRun64",   &pVM->hwaccm.s.pfnSVMGCVMRun64);
     1489        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "SVMGCVMRun64",   &pVM->hm.s.pfnSVMGCVMRun64);
    14901490        AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
    14911491
    1492         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMSaveGuestFPU64",   &pVM->hwaccm.s.pfnSaveGuestFPU64);
    1493         AssertReleaseMsgRC(rc, ("HWACCMSetupFPU64 -> rc=%Rrc\n", rc));
    1494 
    1495         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMSaveGuestDebug64",   &pVM->hwaccm.s.pfnSaveGuestDebug64);
    1496         AssertReleaseMsgRC(rc, ("HWACCMSetupDebug64 -> rc=%Rrc\n", rc));
     1492        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMSaveGuestFPU64",   &pVM->hm.s.pfnSaveGuestFPU64);
     1493        AssertReleaseMsgRC(rc, ("HMSetupFPU64 -> rc=%Rrc\n", rc));
     1494
     1495        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMSaveGuestDebug64",   &pVM->hm.s.pfnSaveGuestDebug64);
     1496        AssertReleaseMsgRC(rc, ("HMSetupDebug64 -> rc=%Rrc\n", rc));
    14971497
    14981498# ifdef DEBUG
    1499         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HWACCMTestSwitcher64",   &pVM->hwaccm.s.pfnTest64);
    1500         AssertReleaseMsgRC(rc, ("HWACCMTestSwitcher64 -> rc=%Rrc\n", rc));
     1499        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMTestSwitcher64",   &pVM->hm.s.pfnTest64);
     1500        AssertReleaseMsgRC(rc, ("HMTestSwitcher64 -> rc=%Rrc\n", rc));
    15011501# endif
    15021502    }
     
    15121512 * @param   pVM         Pointer to the VM.
    15131513 */
    1514 VMMR3DECL(bool) HWACCMR3IsAllowed(PVM pVM)
    1515 {
    1516     return pVM->hwaccm.s.fAllowed;
     1514VMMR3DECL(bool) HMR3IsAllowed(PVM pVM)
     1515{
     1516    return pVM->hm.s.fAllowed;
    15171517}
    15181518
     
    15291529 * @param   enmGuestMode   New guest paging mode.
    15301530 */
    1531 VMMR3DECL(void) HWACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
     1531VMMR3DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)
    15321532{
    15331533    /* Ignore page mode changes during state loading. */
     
    15351535        return;
    15361536
    1537     pVCpu->hwaccm.s.enmShadowMode = enmShadowMode;
    1538 
    1539     if (   pVM->hwaccm.s.vmx.fEnabled
    1540         && pVM->fHWACCMEnabled)
    1541     {
    1542         if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
     1537    pVCpu->hm.s.enmShadowMode = enmShadowMode;
     1538
     1539    if (   pVM->hm.s.vmx.fEnabled
     1540        && pVM->fHMEnabled)
     1541    {
     1542        if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    15431543            &&  enmGuestMode >= PGMMODE_PROTECTED)
    15441544        {
     
    15531553    }
    15541554
    1555     if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)
     1555    if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode)
    15561556    {
    15571557        /* Keep track of paging mode changes. */
    1558         pVCpu->hwaccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;
    1559         pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;
     1558        pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode;
     1559        pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode;
    15601560
    15611561        /* Did we miss a change, because all code was executed in the recompiler? */
    1562         if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
     1562        if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode)
    15631563        {
    1564             Log(("HWACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));
    1565             pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;
     1564            Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode)));
     1565            pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode;
    15661566        }
    15671567    }
    15681568
    15691569    /* Reset the contents of the read cache. */
    1570     PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     1570    PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    15711571    for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
    15721572        pCache->Read.aFieldVal[j] = 0;
     
    15751575
    15761576/**
    1577  * Terminates the HWACCM.
     1577 * Terminates the HM.
    15781578 *
    15791579 * Termination means cleaning up and freeing all resources,
     
    15831583 * @param   pVM         Pointer to the VM.
    15841584 */
    1585 VMMR3DECL(int) HWACCMR3Term(PVM pVM)
    1586 {
    1587     if (pVM->hwaccm.s.vmx.pRealModeTSS)
    1588     {
    1589         PDMR3VMMDevHeapFree(pVM, pVM->hwaccm.s.vmx.pRealModeTSS);
    1590         pVM->hwaccm.s.vmx.pRealModeTSS       = 0;
    1591     }
    1592     hwaccmR3TermCPU(pVM);
     1585VMMR3DECL(int) HMR3Term(PVM pVM)
     1586{
     1587    if (pVM->hm.s.vmx.pRealModeTSS)
     1588    {
     1589        PDMR3VMMDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS);
     1590        pVM->hm.s.vmx.pRealModeTSS       = 0;
     1591    }
     1592    hmR3TermCPU(pVM);
    15931593    return 0;
    15941594}
     
    15961596
    15971597/**
    1598  * Terminates the per-VCPU HWACCM.
     1598 * Terminates the per-VCPU HM.
    15991599 *
    16001600 * @returns VBox status code.
    16011601 * @param   pVM         Pointer to the VM.
    16021602 */
    1603 static int hwaccmR3TermCPU(PVM pVM)
     1603static int hmR3TermCPU(PVM pVM)
    16041604{
    16051605    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    16081608
    16091609#ifdef VBOX_WITH_STATISTICS
    1610         if (pVCpu->hwaccm.s.paStatExitReason)
     1610        if (pVCpu->hm.s.paStatExitReason)
    16111611        {
    1612             MMHyperFree(pVM, pVCpu->hwaccm.s.paStatExitReason);
    1613             pVCpu->hwaccm.s.paStatExitReason   = NULL;
    1614             pVCpu->hwaccm.s.paStatExitReasonR0 = NIL_RTR0PTR;
     1612            MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason);
     1613            pVCpu->hm.s.paStatExitReason   = NULL;
     1614            pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR;
    16151615        }
    1616         if (pVCpu->hwaccm.s.paStatInjectedIrqs)
     1616        if (pVCpu->hm.s.paStatInjectedIrqs)
    16171617        {
    1618             MMHyperFree(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);
    1619             pVCpu->hwaccm.s.paStatInjectedIrqs   = NULL;
    1620             pVCpu->hwaccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
     1618            MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs);
     1619            pVCpu->hm.s.paStatInjectedIrqs   = NULL;
     1620            pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;
    16211621        }
    16221622#endif
    16231623
    16241624#ifdef VBOX_WITH_CRASHDUMP_MAGIC
    1625         memset(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));
    1626         pVCpu->hwaccm.s.vmx.VMCSCache.uMagic = 0;
    1627         pVCpu->hwaccm.s.vmx.VMCSCache.uPos = 0xffffffff;
     1625        memset(pVCpu->hm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VMCSCache.aMagic));
     1626        pVCpu->hm.s.vmx.VMCSCache.uMagic = 0;
     1627        pVCpu->hm.s.vmx.VMCSCache.uPos = 0xffffffff;
    16281628#endif
    16291629    }
     
    16351635 * Resets a virtual CPU.
    16361636 *
    1637  * Used by HWACCMR3Reset and CPU hot plugging.
     1637 * Used by HMR3Reset and CPU hot plugging.
    16381638 *
    16391639 * @param   pVCpu   The CPU to reset.
    16401640 */
    1641 VMMR3DECL(void) HWACCMR3ResetCpu(PVMCPU pVCpu)
     1641VMMR3DECL(void) HMR3ResetCpu(PVMCPU pVCpu)
    16421642{
    16431643    /* On first entry we'll sync everything. */
    1644     pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;
    1645 
    1646     pVCpu->hwaccm.s.vmx.cr0_mask = 0;
    1647     pVCpu->hwaccm.s.vmx.cr4_mask = 0;
    1648 
    1649     pVCpu->hwaccm.s.fActive        = false;
    1650     pVCpu->hwaccm.s.Event.fPending = false;
     1644    pVCpu->hm.s.fContextUseFlags = HM_CHANGED_ALL;
     1645
     1646    pVCpu->hm.s.vmx.cr0_mask = 0;
     1647    pVCpu->hm.s.vmx.cr4_mask = 0;
     1648
     1649    pVCpu->hm.s.fActive        = false;
     1650    pVCpu->hm.s.Event.fPending = false;
    16511651
    16521652    /* Reset state information for real-mode emulation in VT-x. */
    1653     pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
    1654     pVCpu->hwaccm.s.vmx.enmPrevGuestMode     = PGMMODE_REAL;
    1655     pVCpu->hwaccm.s.vmx.enmCurrGuestMode     = PGMMODE_REAL;
     1653    pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
     1654    pVCpu->hm.s.vmx.enmPrevGuestMode     = PGMMODE_REAL;
     1655    pVCpu->hm.s.vmx.enmCurrGuestMode     = PGMMODE_REAL;
    16561656
    16571657    /* Reset the contents of the read cache. */
    1658     PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache;
     1658    PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
    16591659    for (unsigned j = 0; j < pCache->Read.cValidEntries; j++)
    16601660        pCache->Read.aFieldVal[j] = 0;
     
    16711671 * The VM is being reset.
    16721672 *
    1673  * For the HWACCM component this means that any GDT/LDT/TSS monitors
     1673 * For the HM component this means that any GDT/LDT/TSS monitors
    16741674 * needs to be removed.
    16751675 *
    16761676 * @param   pVM     Pointer to the VM.
    16771677 */
    1678 VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
    1679 {
    1680     LogFlow(("HWACCMR3Reset:\n"));
    1681 
    1682     if (pVM->fHWACCMEnabled)
    1683         hwaccmR3DisableRawMode(pVM);
     1678VMMR3DECL(void) HMR3Reset(PVM pVM)
     1679{
     1680    LogFlow(("HMR3Reset:\n"));
     1681
     1682    if (pVM->fHMEnabled)
     1683        hmR3DisableRawMode(pVM);
    16841684
    16851685    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    16871687        PVMCPU pVCpu = &pVM->aCpus[i];
    16881688
    1689         HWACCMR3ResetCpu(pVCpu);
     1689        HMR3ResetCpu(pVCpu);
    16901690    }
    16911691
    16921692    /* Clear all patch information. */
    1693     pVM->hwaccm.s.pGuestPatchMem         = 0;
    1694     pVM->hwaccm.s.pFreeGuestPatchMem     = 0;
    1695     pVM->hwaccm.s.cbGuestPatchMem        = 0;
    1696     pVM->hwaccm.s.cPatches           = 0;
    1697     pVM->hwaccm.s.PatchTree          = 0;
    1698     pVM->hwaccm.s.fTPRPatchingActive = false;
    1699     ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
     1693    pVM->hm.s.pGuestPatchMem         = 0;
     1694    pVM->hm.s.pFreeGuestPatchMem     = 0;
     1695    pVM->hm.s.cbGuestPatchMem        = 0;
     1696    pVM->hm.s.cPatches           = 0;
     1697    pVM->hm.s.PatchTree          = 0;
     1698    pVM->hm.s.fTPRPatchingActive = false;
     1699    ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches));
    17001700}
    17011701
     
    17091709 * @param   pvUser  Unused.
    17101710 */
    1711 DECLCALLBACK(VBOXSTRICTRC) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
     1711DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
    17121712{
    17131713    VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser;
     
    17171717        return VINF_SUCCESS;
    17181718
    1719     Log(("hwaccmR3RemovePatches\n"));
    1720     for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
     1719    Log(("hmR3RemovePatches\n"));
     1720    for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
    17211721    {
    17221722        uint8_t         abInstr[15];
    1723         PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
     1723        PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
    17241724        RTGCPTR         pInstrGC = (RTGCPTR)pPatch->Core.Key;
    17251725        int             rc;
     
    17581758#endif
    17591759    }
    1760     pVM->hwaccm.s.cPatches           = 0;
    1761     pVM->hwaccm.s.PatchTree          = 0;
    1762     pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
    1763     pVM->hwaccm.s.fTPRPatchingActive = false;
     1760    pVM->hm.s.cPatches           = 0;
     1761    pVM->hm.s.PatchTree          = 0;
     1762    pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem;
     1763    pVM->hm.s.fTPRPatchingActive = false;
    17641764    return VINF_SUCCESS;
    17651765}
     
    17711771 * @returns VBox status code.
    17721772 * @param   pVM         Pointer to the VM.
    1773  * @param   idCpu       VCPU to execute hwaccmR3RemovePatches on.
     1773 * @param   idCpu       VCPU to execute hmR3RemovePatches on.
    17741774 * @param   pPatchMem   Patch memory range.
    17751775 * @param   cbPatchMem  Size of the memory range.
    17761776 */
    1777 static int hwaccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
    1778 {
    1779     int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)idCpu);
     1777static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)
     1778{
     1779    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu);
    17801780    AssertRC(rc);
    17811781
    1782     pVM->hwaccm.s.pGuestPatchMem      = pPatchMem;
    1783     pVM->hwaccm.s.pFreeGuestPatchMem  = pPatchMem;
    1784     pVM->hwaccm.s.cbGuestPatchMem     = cbPatchMem;
     1782    pVM->hm.s.pGuestPatchMem      = pPatchMem;
     1783    pVM->hm.s.pFreeGuestPatchMem  = pPatchMem;
     1784    pVM->hm.s.cbGuestPatchMem     = cbPatchMem;
    17851785    return VINF_SUCCESS;
    17861786}
     
    17951795 * @param   cbPatchMem  Size of the memory range.
    17961796 */
    1797 VMMR3DECL(int)  HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
     1797VMMR3DECL(int)  HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
    17981798{
    17991799    VM_ASSERT_EMT(pVM);
    1800     Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
     1800    Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
    18011801    if (pVM->cCpus > 1)
    18021802    {
    18031803        /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
    18041804        int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE,
    1805                                    (PFNRT)hwaccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
     1805                                   (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
    18061806        AssertRC(rc);
    18071807        return rc;
    18081808    }
    1809     return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
     1809    return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
    18101810}
    18111811
     
    18191819 * @param   cbPatchMem  Size of the memory range.
    18201820 */
    1821 VMMR3DECL(int)  HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
    1822 {
    1823     Log(("HWACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
    1824 
    1825     Assert(pVM->hwaccm.s.pGuestPatchMem == pPatchMem);
    1826     Assert(pVM->hwaccm.s.cbGuestPatchMem == cbPatchMem);
     1821VMMR3DECL(int)  HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
     1822{
     1823    Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
     1824
     1825    Assert(pVM->hm.s.pGuestPatchMem == pPatchMem);
     1826    Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem);
    18271827
    18281828    /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */
    1829     int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hwaccmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM));
     1829    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM));
    18301830    AssertRC(rc);
    18311831
    1832     pVM->hwaccm.s.pGuestPatchMem      = 0;
    1833     pVM->hwaccm.s.pFreeGuestPatchMem  = 0;
    1834     pVM->hwaccm.s.cbGuestPatchMem     = 0;
    1835     pVM->hwaccm.s.fTPRPatchingActive = false;
     1832    pVM->hm.s.pGuestPatchMem      = 0;
     1833    pVM->hm.s.pFreeGuestPatchMem  = 0;
     1834    pVM->hm.s.cbGuestPatchMem     = 0;
     1835    pVM->hm.s.fTPRPatchingActive = false;
    18361836    return VINF_SUCCESS;
    18371837}
     
    18471847 *
    18481848 */
    1849 DECLCALLBACK(VBOXSTRICTRC) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
     1849DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
    18501850{
    18511851    /*
     
    18631863     */
    18641864    PCPUMCTX        pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
    1865     PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     1865    PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    18661866    if (pPatch)
    18671867    {
    1868         Log(("hwaccmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
     1868        Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));
    18691869        return VINF_SUCCESS;
    18701870    }
    1871     uint32_t const  idx = pVM->hwaccm.s.cPatches;
    1872     if (idx >= RT_ELEMENTS(pVM->hwaccm.s.aPatches))
    1873     {
    1874         Log(("hwaccmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
     1871    uint32_t const  idx = pVM->hm.s.cPatches;
     1872    if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
     1873    {
     1874        Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
    18751875        return VINF_SUCCESS;
    18761876    }
    1877     pPatch = &pVM->hwaccm.s.aPatches[idx];
    1878 
    1879     Log(("hwaccmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
     1877    pPatch = &pVM->hm.s.aPatches[idx];
     1878
     1879    Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
    18801880
    18811881    /*
    18821882     * Disassembler the instruction and get cracking.
    18831883     */
    1884     DBGFR3DisasInstrCurrentLog(pVCpu, "hwaccmR3ReplaceTprInstr");
    1885     PDISCPUSTATE    pDis = &pVCpu->hwaccm.s.DisState;
     1884    DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3ReplaceTprInstr");
     1885    PDISCPUSTATE    pDis = &pVCpu->hm.s.DisState;
    18861886    uint32_t        cbOp;
    18871887    int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
     
    19031903            if (pDis->Param2.fUse == DISUSE_REG_GEN32)
    19041904            {
    1905                 pPatch->enmType     = HWACCMTPRINSTR_WRITE_REG;
     1905                pPatch->enmType     = HMTPRINSTR_WRITE_REG;
    19061906                pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg;
    1907                 Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
     1907                Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));
    19081908            }
    19091909            else
    19101910            {
    19111911                Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32);
    1912                 pPatch->enmType     = HWACCMTPRINSTR_WRITE_IMM;
     1912                pPatch->enmType     = HMTPRINSTR_WRITE_IMM;
    19131913                pPatch->uSrcOperand = pDis->Param2.uValue;
    1914                 Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
     1914                Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));
    19151915            }
    19161916            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall));
     
    19471947                &&  pDis->Param2.fUse == DISUSE_IMMEDIATE8
    19481948                &&  pDis->Param2.uValue == 4
    1949                 &&  cbOpMmio + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
     1949                &&  cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode))
    19501950            {
    19511951                uint8_t abInstr[15];
     
    19721972
    19731973                Log(("Acceptable read/shr candidate!\n"));
    1974                 pPatch->enmType = HWACCMTPRINSTR_READ_SHR4;
     1974                pPatch->enmType = HMTPRINSTR_READ_SHR4;
    19751975            }
    19761976            else
    19771977            {
    1978                 pPatch->enmType     = HWACCMTPRINSTR_READ;
     1978                pPatch->enmType     = HMTPRINSTR_READ;
    19791979                pPatch->uDstOperand = idxMmioReg;
    19801980
     
    19841984                memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall));
    19851985                pPatch->cbNewOp = sizeof(s_abVMMCall);
    1986                 Log(("hwaccmR3ReplaceTprInstr: HWACCMTPRINSTR_READ %u\n", pPatch->uDstOperand));
     1986                Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand));
    19871987            }
    19881988        }
    19891989
    19901990        pPatch->Core.Key = pCtx->eip;
    1991         rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
     1991        rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
    19921992        AssertRC(rc);
    19931993
    1994         pVM->hwaccm.s.cPatches++;
    1995         STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
     1994        pVM->hm.s.cPatches++;
     1995        STAM_COUNTER_INC(&pVM->hm.s.StatTPRReplaceSuccess);
    19961996        return VINF_SUCCESS;
    19971997    }
     
    20002000     * Save invalid patch, so we will not try again.
    20012001     */
    2002     Log(("hwaccmR3ReplaceTprInstr: Failed to patch instr!\n"));
     2002    Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n"));
    20032003    pPatch->Core.Key = pCtx->eip;
    2004     pPatch->enmType  = HWACCMTPRINSTR_INVALID;
    2005     rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
     2004    pPatch->enmType  = HMTPRINSTR_INVALID;
     2005    rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
    20062006    AssertRC(rc);
    2007     pVM->hwaccm.s.cPatches++;
    2008     STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
     2007    pVM->hm.s.cPatches++;
     2008    STAM_COUNTER_INC(&pVM->hm.s.StatTPRReplaceFailure);
    20092009    return VINF_SUCCESS;
    20102010}
     
    20202020 *
    20212021 */
    2022 DECLCALLBACK(VBOXSTRICTRC) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
     2022DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
    20232023{
    20242024    /*
     
    20362036     */
    20372037    PCPUMCTX        pCtx   = CPUMQueryGuestCtxPtr(pVCpu);
    2038     PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
     2038    PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    20392039    if (pPatch)
    20402040    {
    2041         Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
     2041        Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
    20422042        return VINF_SUCCESS;
    20432043    }
    2044     uint32_t const  idx = pVM->hwaccm.s.cPatches;
    2045     if (idx >= RT_ELEMENTS(pVM->hwaccm.s.aPatches))
    2046     {
    2047         Log(("hwaccmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
     2044    uint32_t const  idx = pVM->hm.s.cPatches;
     2045    if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches))
     2046    {
     2047        Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));
    20482048        return VINF_SUCCESS;
    20492049    }
    2050     pPatch = &pVM->hwaccm.s.aPatches[idx];
    2051 
    2052     Log(("hwaccmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
    2053     DBGFR3DisasInstrCurrentLog(pVCpu, "hwaccmR3PatchTprInstr");
     2050    pPatch = &pVM->hm.s.aPatches[idx];
     2051
     2052    Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));
     2053    DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3PatchTprInstr");
    20542054
    20552055    /*
    20562056     * Disassemble the instruction and get cracking.
    20572057     */
    2058     PDISCPUSTATE    pDis   = &pVCpu->hwaccm.s.DisState;
     2058    PDISCPUSTATE    pDis   = &pVCpu->hm.s.DisState;
    20592059    uint32_t        cbOp;
    20602060    int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
     
    20712071
    20722072        pPatch->cbOp    = cbOp;
    2073         pPatch->enmType = HWACCMTPRINSTR_JUMP_REPLACEMENT;
     2073        pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT;
    20742074
    20752075        if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32)
     
    21772177        }
    21782178        aPatch[off++] = 0xE9;    /* jmp return_address */
    2179         *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem + off + 4);
     2179        *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4);
    21802180        off += sizeof(RTRCUINTPTR);
    21812181
    2182         if (pVM->hwaccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)
     2182        if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem)
    21832183        {
    21842184            /* Write new code to the patch buffer. */
    2185             rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hwaccm.s.pFreeGuestPatchMem, aPatch, off);
     2185            rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off);
    21862186            AssertRC(rc);
    21872187
    21882188#ifdef LOG_ENABLED
    21892189            uint32_t cbCurInstr;
    2190             for (RTGCPTR GCPtrInstr = pVM->hwaccm.s.pFreeGuestPatchMem;
    2191                  GCPtrInstr < pVM->hwaccm.s.pFreeGuestPatchMem + off;
     2190            for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem;
     2191                 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off;
    21922192                 GCPtrInstr += RT_MAX(cbCurInstr, 1))
    21932193            {
     
    22032203
    22042204            pPatch->aNewOpcode[0] = 0xE9;
    2205             *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hwaccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
     2205            *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);
    22062206
    22072207            /* Overwrite the TPR instruction with a jump. */
     
    22112211            DBGFR3DisasInstrCurrentLog(pVCpu, "Jump");
    22122212
    2213             pVM->hwaccm.s.pFreeGuestPatchMem += off;
     2213            pVM->hm.s.pFreeGuestPatchMem += off;
    22142214            pPatch->cbNewOp = 5;
    22152215
    22162216            pPatch->Core.Key = pCtx->eip;
    2217             rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
     2217            rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
    22182218            AssertRC(rc);
    22192219
    2220             pVM->hwaccm.s.cPatches++;
    2221             pVM->hwaccm.s.fTPRPatchingActive = true;
    2222             STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
     2220            pVM->hm.s.cPatches++;
     2221            pVM->hm.s.fTPRPatchingActive = true;
     2222            STAM_COUNTER_INC(&pVM->hm.s.StatTPRPatchSuccess);
    22232223            return VINF_SUCCESS;
    22242224        }
     
    22272227    }
    22282228    else
    2229         Log(("hwaccmR3PatchTprInstr: Failed to patch instr!\n"));
     2229        Log(("hmR3PatchTprInstr: Failed to patch instr!\n"));
    22302230
    22312231
     
    22332233     * Save invalid patch, so we will not try again.
    22342234     */
    2235     pPatch = &pVM->hwaccm.s.aPatches[idx];
     2235    pPatch = &pVM->hm.s.aPatches[idx];
    22362236    pPatch->Core.Key = pCtx->eip;
    2237     pPatch->enmType  = HWACCMTPRINSTR_INVALID;
    2238     rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
     2237    pPatch->enmType  = HMTPRINSTR_INVALID;
     2238    rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
    22392239    AssertRC(rc);
    2240     pVM->hwaccm.s.cPatches++;
    2241     STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
     2240    pVM->hm.s.cPatches++;
     2241    STAM_COUNTER_INC(&pVM->hm.s.StatTPRPatchFailure);
    22422242    return VINF_SUCCESS;
    22432243}
     
    22522252 * @param   pCtx        Pointer to the guest CPU context.
    22532253 */
    2254 VMMR3DECL(int) HWACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     2254VMMR3DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    22552255{
    22562256    NOREF(pCtx);
    22572257    int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE,
    2258                                 pVM->hwaccm.s.pGuestPatchMem ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr,
     2258                                pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr,
    22592259                                (void *)(uintptr_t)pVCpu->idCpu);
    22602260    AssertRC(rc);
     
    22702270 * @param   pCtx        Partial VM execution context.
    22712271 */
    2272 VMMR3DECL(int) HWACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
     2272VMMR3DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)
    22732273{
    22742274    PVMCPU pVCpu = VMMGetCpu(pVM);
    22752275
    2276     Assert(pVM->fHWACCMEnabled);
    2277     Log(("HWACCMR3EmulateIoBlock\n"));
     2276    Assert(pVM->fHMEnabled);
     2277    Log(("HMR3EmulateIoBlock\n"));
    22782278
    22792279    /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */
    2280     if (HWACCMCanEmulateIoBlockEx(pCtx))
    2281     {
    2282         Log(("HWACCMR3EmulateIoBlock -> enabled\n"));
    2283         pVCpu->hwaccm.s.EmulateIoBlock.fEnabled         = true;
    2284         pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
    2285         pVCpu->hwaccm.s.EmulateIoBlock.cr0              = pCtx->cr0;
     2280    if (HMCanEmulateIoBlockEx(pCtx))
     2281    {
     2282        Log(("HMR3EmulateIoBlock -> enabled\n"));
     2283        pVCpu->hm.s.EmulateIoBlock.fEnabled         = true;
     2284        pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;
     2285        pVCpu->hm.s.EmulateIoBlock.cr0              = pCtx->cr0;
    22862286        return VINF_EM_RESCHEDULE_REM;
    22872287    }
     
    22972297 * @param   pCtx        Partial VM execution context.
    22982298 */
    2299 VMMR3DECL(bool) HWACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
     2299VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)
    23002300{
    23012301    PVMCPU pVCpu = VMMGetCpu(pVM);
    23022302
    2303     Assert(pVM->fHWACCMEnabled);
     2303    Assert(pVM->fHMEnabled);
    23042304
    23052305    /* If we're still executing the IO code, then return false. */
    2306     if (    RT_UNLIKELY(pVCpu->hwaccm.s.EmulateIoBlock.fEnabled)
    2307         &&  pCtx->rip <  pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
    2308         &&  pCtx->rip >  pVCpu->hwaccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
    2309         &&  pCtx->cr0 == pVCpu->hwaccm.s.EmulateIoBlock.cr0)
     2306    if (    RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled)
     2307        &&  pCtx->rip <  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200
     2308        &&  pCtx->rip >  pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200
     2309        &&  pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0)
    23102310        return false;
    23112311
    2312     pVCpu->hwaccm.s.EmulateIoBlock.fEnabled = false;
     2312    pVCpu->hm.s.EmulateIoBlock.fEnabled = false;
    23132313
    23142314    /* AMD-V supports real & protected mode with or without paging. */
    2315     if (pVM->hwaccm.s.svm.fEnabled)
    2316     {
    2317         pVCpu->hwaccm.s.fActive = true;
     2315    if (pVM->hm.s.svm.fEnabled)
     2316    {
     2317        pVCpu->hm.s.fActive = true;
    23182318        return true;
    23192319    }
    23202320
    2321     pVCpu->hwaccm.s.fActive = false;
     2321    pVCpu->hm.s.fActive = false;
    23222322
    23232323    /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */
    2324     Assert((pVM->hwaccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));
    2325 
    2326     bool fSupportsRealMode = pVM->hwaccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
    2327     if (!pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     2324    Assert((pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS) || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));
     2325
     2326    bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);
     2327    if (!pVM->hm.s.vmx.fUnrestrictedGuest)
    23282328    {
    23292329        /*
     
    23622362                   mode. VT-x can't handle the CPU state right after a switch
    23632363                   from real to protected mode. (all sorts of RPL & DPL assumptions) */
    2364                 if (    pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
     2364                if (    pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
    23652365                    &&  enmGuestMode >= PGMMODE_PROTECTED)
    23662366                {
     
    23872387        {
    23882388            if (    !CPUMIsGuestInLongModeEx(pCtx)
    2389                 &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest)
     2389                &&  !pVM->hm.s.vmx.fUnrestrictedGuest)
    23902390            {
    23912391                /** @todo   This should (probably) be set on every excursion to the REM,
     
    23932393                 *          back to REM for real mode execution. (The XP hack below doesn't
    23942394                 *          work reliably without this.)
    2395                  *  Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HWACCM.  */
    2396                 pVM->aCpus[0].hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
    2397 
    2398                 if (    !pVM->hwaccm.s.fNestedPaging        /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
     2395                 *  Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM.  */
     2396                pVM->aCpus[0].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     2397
     2398                if (    !pVM->hm.s.fNestedPaging        /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/
    23992399                    ||  CPUMIsGuestInRealModeEx(pCtx))      /* requires a fake TSS for real mode - stored in the VMM device heap */
    24002400                    return false;
     
    24312431    }
    24322432
    2433     if (pVM->hwaccm.s.vmx.fEnabled)
     2433    if (pVM->hm.s.vmx.fEnabled)
    24342434    {
    24352435        uint32_t mask;
    24362436
    24372437        /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
    2438         mask = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0;
    2439         /* Note: We ignore the NE bit here on purpose; see vmmr0\hwaccmr0.cpp for details. */
     2438        mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr0_fixed0;
     2439        /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */
    24402440        mask &= ~X86_CR0_NE;
    24412441
     
    24542454
    24552455        /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
    2456         mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1;
     2456        mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr0_fixed1;
    24572457        if ((pCtx->cr0 & mask) != 0)
    24582458            return false;
    24592459
    24602460        /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
    2461         mask  = (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
     2461        mask  = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
    24622462        mask &= ~X86_CR4_VMXE;
    24632463        if ((pCtx->cr4 & mask) != mask)
     
    24652465
    24662466        /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
    2467         mask = (uint32_t)~pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1;
     2467        mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr4_fixed1;
    24682468        if ((pCtx->cr4 & mask) != 0)
    24692469            return false;
    24702470
    2471         pVCpu->hwaccm.s.fActive = true;
     2471        pVCpu->hm.s.fActive = true;
    24722472        return true;
    24732473    }
     
    24842484 * @param   pCtx        VM execution context.
    24852485 */
    2486 VMMR3DECL(bool) HWACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
     2486VMMR3DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)
    24872487{
    24882488    /*
     
    24902490     * when the unrestricted guest execution feature is missing (VT-x only).
    24912491     */
    2492     if (    pVM->hwaccm.s.vmx.fEnabled
    2493         &&  !pVM->hwaccm.s.vmx.fUnrestrictedGuest
     2492    if (    pVM->hm.s.vmx.fEnabled
     2493        &&  !pVM->hm.s.vmx.fUnrestrictedGuest
    24942494        &&  !CPUMIsGuestInPagedProtectedModeEx(pCtx)
    24952495        &&  !PDMVMMDevHeapIsEnabled(pVM)
    2496         &&  (pVM->hwaccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
     2496        &&  (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))
    24972497        return true;
    24982498
     
    25072507 * @param   pVCpu       Pointer to the current VMCPU.
    25082508 */
    2509 VMMR3DECL(void) HWACCMR3NotifyScheduled(PVMCPU pVCpu)
    2510 {
    2511     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
     2509VMMR3DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu)
     2510{
     2511    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    25122512}
    25132513
     
    25182518 * @param   pVCpu       Pointer to the VMCPU.
    25192519 */
    2520 VMMR3DECL(void) HWACCMR3NotifyEmulated(PVMCPU pVCpu)
    2521 {
    2522     pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;
     2520VMMR3DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu)
     2521{
     2522    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
    25232523}
    25242524
     
    25302530 * @param   pVCpu        Pointer to the VMCPU.
    25312531 */
    2532 VMMR3DECL(bool) HWACCMR3IsActive(PVMCPU pVCpu)
    2533 {
    2534     return pVCpu->hwaccm.s.fActive;
     2532VMMR3DECL(bool) HMR3IsActive(PVMCPU pVCpu)
     2533{
     2534    return pVCpu->hm.s.fActive;
    25352535}
    25362536
     
    25422542 * @param   pVM         Pointer to the VM.
    25432543 */
    2544 VMMR3DECL(bool) HWACCMR3IsNestedPagingActive(PVM pVM)
    2545 {
    2546     return pVM->hwaccm.s.fNestedPaging;
     2544VMMR3DECL(bool) HMR3IsNestedPagingActive(PVM pVM)
     2545{
     2546    return pVM->hm.s.fNestedPaging;
    25472547}
    25482548
     
    25542554 * @param   pVM         Pointer to the VM.
    25552555 */
    2556 VMMR3DECL(bool) HWACCMR3IsVPIDActive(PVM pVM)
    2557 {
    2558     return pVM->hwaccm.s.vmx.fVPID;
     2556VMMR3DECL(bool) HMR3IsVPIDActive(PVM pVM)
     2557{
     2558    return pVM->hm.s.vmx.fVPID;
    25592559}
    25602560
     
    25662566 * @param   pVM         Pointer to the VM.
    25672567 */
    2568 VMMR3DECL(bool) HWACCMR3IsEventPending(PVMCPU pVCpu)
    2569 {
    2570     return HWACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;
     2568VMMR3DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)
     2569{
     2570    return HMIsEnabled(pVCpu->pVMR3) && pVCpu->hm.s.Event.fPending;
    25712571}
    25722572
     
    25782578 * @param   pVM         Pointer to the VM.
    25792579 */
    2580 VMMR3DECL(bool) HWACCMR3IsVmxPreemptionTimerUsed(PVM pVM)
    2581 {
    2582     return HWACCMIsEnabled(pVM)
    2583         && pVM->hwaccm.s.vmx.fEnabled
    2584         && pVM->hwaccm.s.vmx.fUsePreemptTimer;
     2580VMMR3DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM)
     2581{
     2582    return HMIsEnabled(pVM)
     2583        && pVM->hm.s.vmx.fEnabled
     2584        && pVM->hm.s.vmx.fUsePreemptTimer;
    25852585}
    25862586
     
    26002600 * @param   pCtx        Pointer to the guest CPU context.
    26012601 */
    2602 VMMR3DECL(VBOXSTRICTRC) HWACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    2603 {
    2604     HWACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;
    2605 
    2606     pVCpu->hwaccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;
    2607 
    2608     if (    pVCpu->hwaccm.s.PendingIO.GCPtrRip != pCtx->rip
    2609         ||  enmType  == HWACCMPENDINGIO_INVALID)
     2602VMMR3DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     2603{
     2604    HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType;
     2605
     2606    pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID;
     2607
     2608    if (    pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip
     2609        ||  enmType  == HMPENDINGIO_INVALID)
    26102610        return VERR_NOT_FOUND;
    26112611
     
    26132613    switch (enmType)
    26142614    {
    2615         case HWACCMPENDINGIO_PORT_READ:
     2615        case HMPENDINGIO_PORT_READ:
    26162616        {
    2617             uint32_t uAndVal = pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal;
     2617            uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal;
    26182618            uint32_t u32Val  = 0;
    26192619
    2620             rcStrict = IOMIOPortRead(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
     2620            rcStrict = IOMIOPortRead(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort,
    26212621                                     &u32Val,
    2622                                      pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
     2622                                     pVCpu->hm.s.PendingIO.s.Port.cbSize);
    26232623            if (IOM_SUCCESS(rcStrict))
    26242624            {
    26252625                /* Write back to the EAX register. */
    26262626                pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
    2627                 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
     2627                pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
    26282628            }
    26292629            break;
    26302630        }
    26312631
    2632         case HWACCMPENDINGIO_PORT_WRITE:
    2633             rcStrict = IOMIOPortWrite(pVM, pVCpu->hwaccm.s.PendingIO.s.Port.uPort,
    2634                                       pCtx->eax & pVCpu->hwaccm.s.PendingIO.s.Port.uAndVal,
    2635                                       pVCpu->hwaccm.s.PendingIO.s.Port.cbSize);
     2632        case HMPENDINGIO_PORT_WRITE:
     2633            rcStrict = IOMIOPortWrite(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort,
     2634                                      pCtx->eax & pVCpu->hm.s.PendingIO.s.Port.uAndVal,
     2635                                      pVCpu->hm.s.PendingIO.s.Port.cbSize);
    26362636            if (IOM_SUCCESS(rcStrict))
    2637                 pCtx->rip = pVCpu->hwaccm.s.PendingIO.GCPtrRipNext;
     2637                pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext;
    26382638            break;
    26392639
     
    26522652 * @param   pVM         Pointer to the VM.
    26532653 */
    2654 VMMR3DECL(int)  HWACCMR3InjectNMI(PVM pVM)
     2654VMMR3DECL(int)  HMR3InjectNMI(PVM pVM)
    26552655{
    26562656    VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI);
     
    26662666 * @param   iStatusCode VBox status code.
    26672667 */
    2668 VMMR3DECL(void) HWACCMR3CheckError(PVM pVM, int iStatusCode)
     2668VMMR3DECL(void) HMR3CheckError(PVM pVM, int iStatusCode)
    26692669{
    26702670    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    26762676
    26772677            case VERR_VMX_INVALID_VMCS_PTR:
    2678                 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));
    2679                 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision));
    2680                 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu));
    2681                 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idCurrentCpu));
     2678                LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS));
     2679                LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulVMCSRevision));
     2680                LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idEnteredCpu));
     2681                LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idCurrentCpu));
    26822682                break;
    26832683
    26842684            case VERR_VMX_UNABLE_TO_START_VM:
    2685                 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
    2686                 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
    2687                 if (pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
     2685                LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError));
     2686                LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason));
     2687                if (pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)
    26882688                {
    2689                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));
     2689                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pMSRBitmapPhys));
    26902690#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2691                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys  %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pGuestMSRPhys));
    2692                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys   %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pHostMSRPhys));
    2693                     LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs   %x\n",   i, pVM->aCpus[i].hwaccm.s.vmx.cCachedMSRs));
     2691                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys  %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pGuestMSRPhys));
     2692                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys   %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pHostMSRPhys));
     2693                    LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs   %x\n",   i, pVM->aCpus[i].hm.s.vmx.cCachedMSRs));
    26942694#endif
    26952695                }
     
    27002700
    27012701            case VERR_VMX_UNABLE_TO_RESUME_VM:
    2702                 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError));
    2703                 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason));
     2702                LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError));
     2703                LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason       %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason));
    27042704                break;
    27052705
     
    27112711    if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
    27122712    {
    2713         LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %x\n", pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1));
    2714         LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0));
     2713        LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed    %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.allowed1));
     2714        LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0));
    27152715    }
    27162716}
     
    27242724 * @param   pSSM            SSM operation handle.
    27252725 */
    2726 static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
     2726static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM)
    27272727{
    27282728    int rc;
    27292729
    2730     Log(("hwaccmR3Save:\n"));
     2730    Log(("hmR3Save:\n"));
    27312731
    27322732    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    27352735         * Save the basic bits - fortunately all the other things can be resynced on load.
    27362736         */
    2737         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending);
     2737        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending);
    27382738        AssertRCReturn(rc, rc);
    2739         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode);
     2739        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.errCode);
    27402740        AssertRCReturn(rc, rc);
    2741         rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo);
     2741        rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.intInfo);
    27422742        AssertRCReturn(rc, rc);
    27432743
    2744         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode);
     2744        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode);
    27452745        AssertRCReturn(rc, rc);
    2746         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode);
     2746        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode);
    27472747        AssertRCReturn(rc, rc);
    2748         rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode);
     2748        rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode);
    27492749        AssertRCReturn(rc, rc);
    27502750    }
    2751 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
    2752     rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pGuestPatchMem);
     2751#ifdef VBOX_HM_WITH_GUEST_PATCHING
     2752    rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem);
    27532753    AssertRCReturn(rc, rc);
    2754     rc = SSMR3PutGCPtr(pSSM, pVM->hwaccm.s.pFreeGuestPatchMem);
     2754    rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem);
    27552755    AssertRCReturn(rc, rc);
    2756     rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cbGuestPatchMem);
     2756    rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem);
    27572757    AssertRCReturn(rc, rc);
    27582758
    27592759    /* Store all the guest patch records too. */
    2760     rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
     2760    rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches);
    27612761    AssertRCReturn(rc, rc);
    27622762
    2763     for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
    2764     {
    2765         PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
     2763    for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
     2764    {
     2765        PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
    27662766
    27672767        rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
     
    27802780        AssertRCReturn(rc, rc);
    27812781
    2782         AssertCompileSize(HWACCMTPRINSTR, 4);
     2782        AssertCompileSize(HMTPRINSTR, 4);
    27832783        rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType);
    27842784        AssertRCReturn(rc, rc);
     
    28102810 * @param   uPass           The data pass.
    28112811 */
    2812 static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
     2812static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
    28132813{
    28142814    int rc;
    28152815
    2816     Log(("hwaccmR3Load:\n"));
     2816    Log(("hmR3Load:\n"));
    28172817    Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
    28182818
     
    28202820     * Validate version.
    28212821     */
    2822     if (   uVersion != HWACCM_SSM_VERSION
    2823         && uVersion != HWACCM_SSM_VERSION_NO_PATCHING
    2824         && uVersion != HWACCM_SSM_VERSION_2_0_X)
    2825     {
    2826         AssertMsgFailed(("hwaccmR3Load: Invalid version uVersion=%d!\n", uVersion));
     2822    if (   uVersion != HM_SSM_VERSION
     2823        && uVersion != HM_SSM_VERSION_NO_PATCHING
     2824        && uVersion != HM_SSM_VERSION_2_0_X)
     2825    {
     2826        AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion));
    28272827        return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    28282828    }
    28292829    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    28302830    {
    2831         rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending);
     2831        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending);
    28322832        AssertRCReturn(rc, rc);
    2833         rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode);
     2833        rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.errCode);
    28342834        AssertRCReturn(rc, rc);
    2835         rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo);
     2835        rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.intInfo);
    28362836        AssertRCReturn(rc, rc);
    28372837
    2838         if (uVersion >= HWACCM_SSM_VERSION_NO_PATCHING)
     2838        if (uVersion >= HM_SSM_VERSION_NO_PATCHING)
    28392839        {
    28402840            uint32_t val;
     
    28422842            rc = SSMR3GetU32(pSSM, &val);
    28432843            AssertRCReturn(rc, rc);
    2844             pVM->aCpus[i].hwaccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
     2844            pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;
    28452845
    28462846            rc = SSMR3GetU32(pSSM, &val);
    28472847            AssertRCReturn(rc, rc);
    2848             pVM->aCpus[i].hwaccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
     2848            pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val;
    28492849
    28502850            rc = SSMR3GetU32(pSSM, &val);
    28512851            AssertRCReturn(rc, rc);
    2852             pVM->aCpus[i].hwaccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
     2852            pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val;
    28532853        }
    28542854    }
    2855 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
    2856     if (uVersion > HWACCM_SSM_VERSION_NO_PATCHING)
    2857     {
    2858         rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pGuestPatchMem);
     2855#ifdef VBOX_HM_WITH_GUEST_PATCHING
     2856    if (uVersion > HM_SSM_VERSION_NO_PATCHING)
     2857    {
     2858        rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem);
    28592859        AssertRCReturn(rc, rc);
    2860         rc = SSMR3GetGCPtr(pSSM, &pVM->hwaccm.s.pFreeGuestPatchMem);
     2860        rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem);
    28612861        AssertRCReturn(rc, rc);
    2862         rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cbGuestPatchMem);
     2862        rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem);
    28632863        AssertRCReturn(rc, rc);
    28642864
    28652865        /* Fetch all TPR patch records. */
    2866         rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
     2866        rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches);
    28672867        AssertRCReturn(rc, rc);
    28682868
    2869         for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
     2869        for (unsigned i = 0; i < pVM->hm.s.cPatches; i++)
    28702870        {
    2871             PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
     2871            PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i];
    28722872
    28732873            rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
     
    28892889            AssertRCReturn(rc, rc);
    28902890
    2891             if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
    2892                 pVM->hwaccm.s.fTPRPatchingActive = true;
    2893 
    2894             Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
     2891            if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT)
     2892                pVM->hm.s.fTPRPatchingActive = true;
     2893
     2894            Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false);
    28952895
    28962896            rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
     
    29062906            AssertRCReturn(rc, rc);
    29072907
    2908             Log(("hwaccmR3Load: patch %d\n", i));
     2908            Log(("hmR3Load: patch %d\n", i));
    29092909            Log(("Key       = %x\n", pPatch->Core.Key));
    29102910            Log(("cbOp      = %d\n", pPatch->cbOp));
     
    29152915            Log(("cFaults   = %d\n", pPatch->cFaults));
    29162916            Log(("target    = %x\n", pPatch->pJumpTarget));
    2917             rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
     2917            rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core);
    29182918            AssertRC(rc);
    29192919        }
     
    29212921#endif
    29222922
    2923     /* Recheck all VCPUs if we can go straight into hwaccm execution mode. */
    2924     if (HWACCMIsEnabled(pVM))
     2923    /* Recheck all VCPUs if we can go straight into hm execution mode. */
     2924    if (HMIsEnabled(pVM))
    29252925    {
    29262926        for (VMCPUID i = 0; i < pVM->cCpus; i++)
     
    29282928            PVMCPU pVCpu = &pVM->aCpus[i];
    29292929
    2930             HWACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
     2930            HMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));
    29312931        }
    29322932    }
  • trunk/src/VBox/VMM/VMMR3/IOM.cpp

    r41965 r43387  
    5353 *
    5454 *
    55  * @section sec_iom_hwaccm     Hardware Assisted Virtualization Mode
     55 * @section sec_iom_hm     Hardware Assisted Virtualization Mode
    5656 *
    5757 * When running in hardware assisted virtualization mode we'll be doing much the
  • trunk/src/VBox/VMM/VMMR3/PDMLdr.cpp

    r41965 r43387  
    3232#include <VBox/param.h>
    3333#include <VBox/err.h>
    34 #include <VBox/vmm/hwaccm.h>
     34#include <VBox/vmm/hm.h>
    3535#include <VBox/VBoxTpG.h>
    3636
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r42612 r43387  
    622622#include <VBox/vmm/selm.h>
    623623#include <VBox/vmm/ssm.h>
    624 #include <VBox/vmm/hwaccm.h>
     624#include <VBox/vmm/hm.h>
    625625#include "PGMInternal.h"
    626626#include <VBox/vmm/vm.h>
     
    22392239    switch (enmWhat)
    22402240    {
    2241         case VMINITCOMPLETED_HWACCM:
     2241        case VMINITCOMPLETED_HM:
    22422242#ifdef VBOX_WITH_PCI_PASSTHROUGH
    22432243            if (pVM->pgm.s.fPciPassthrough)
    22442244            {
    22452245                AssertLogRelReturn(pVM->pgm.s.fRamPreAlloc, VERR_PCI_PASSTHROUGH_NO_RAM_PREALLOC);
    2246                 AssertLogRelReturn(HWACCMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HWACCM);
    2247                 AssertLogRelReturn(HWACCMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);
     2246                AssertLogRelReturn(HMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HM);
     2247                AssertLogRelReturn(HMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);
    22482248
    22492249                /*
     
    25782578            VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    25792579            pgmR3RefreshShadowModeAfterA20Change(pVCpu);
    2580             HWACCMFlushTLB(pVCpu);
     2580            HMFlushTLB(pVCpu);
    25812581#endif
    25822582        }
     
    31733173        case PGMMODE_PROTECTED:
    31743174            if (    enmShadowMode != PGMMODE_INVALID
    3175                 && !HWACCMIsEnabled(pVM) /* always switch in hwaccm mode! */)
     3175                && !HMIsEnabled(pVM) /* always switch in hm mode! */)
    31763176                break; /* (no change) */
    31773177
     
    33283328    }
    33293329    /* Override the shadow mode is nested paging is active. */
    3330     pVM->pgm.s.fNestedPaging = HWACCMIsNestedPagingActive(pVM);
     3330    pVM->pgm.s.fNestedPaging = HMIsNestedPagingActive(pVM);
    33313331    if (pVM->pgm.s.fNestedPaging)
    3332         enmShadowMode = HWACCMGetShwPagingMode(pVM);
     3332        enmShadowMode = HMGetShwPagingMode(pVM);
    33333333
    33343334    *penmSwitcher = enmSwitcher;
     
    36293629    }
    36303630
    3631     /* Notify HWACCM as well. */
    3632     HWACCMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
     3631    /* Notify HM as well. */
     3632    HMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
    36333633    return rc;
    36343634}
  • trunk/src/VBox/VMM/VMMR3/PGMBth.h

    r41801 r43387  
    132132    PVM pVM = pVCpu->pVMR3;
    133133
    134     Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
     134    Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
    135135    Assert(!pVM->pgm.s.fNestedPaging);
    136136
  • trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp

    r41982 r43387  
    5252#include <VBox/param.h>
    5353#include <VBox/err.h>
    54 #include <VBox/vmm/hwaccm.h>
     54#include <VBox/vmm/hm.h>
    5555
    5656
     
    240240
    241241    /* Not supported/relevant for VT-x and AMD-V. */
    242     if (HWACCMIsEnabled(pVM))
     242    if (HMIsEnabled(pVM))
    243243        return VERR_NOT_IMPLEMENTED;
    244244
     
    293293
    294294    /* Not supported/relevant for VT-x and AMD-V. */
    295     if (HWACCMIsEnabled(pVM))
     295    if (HMIsEnabled(pVM))
    296296        return VERR_NOT_IMPLEMENTED;
    297297
     
    581581            "Hypervisor Virtual handlers:\n"
    582582            "%*s %*s %*s %*s Type       Description\n",
    583             - (int)sizeof(RTGCPTR) * 2,     "From", 
    584             - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)", 
    585             - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC", 
     583            - (int)sizeof(RTGCPTR) * 2,     "From",
     584            - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)",
     585            - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC",
    586586            - (int)sizeof(RTRCPTR) * 2 - 1, "HandlerGC");
    587587        RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3InfoHandlersVirtualOne, &Args);
  • trunk/src/VBox/VMM/VMMR3/PGMMap.cpp

    r41965 r43387  
    512512    if (!pgmMapAreMappingsEnabled(pVM))
    513513    {
    514         Assert(HWACCMIsEnabled(pVM));
     514        Assert(HMIsEnabled(pVM));
    515515        return VINF_SUCCESS;
    516516    }
     
    674674
    675675/**
    676  * Interface for disabling the guest mappings when switching to HWACCM mode
     676 * Interface for disabling the guest mappings when switching to HM mode
    677677 * during VM creation and VM reset.
    678678 *
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r43047 r43387  
    37953795        VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
    37963796        pgmR3RefreshShadowModeAfterA20Change(pVCpu);
    3797         HWACCMFlushTLB(pVCpu);
     3797        HMFlushTLB(pVCpu);
    37983798#endif
    37993799        STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
     
    43264326 *
    43274327 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
    4328  *          in EM.cpp and shouldn't be propagated outside TRPM, HWACCM, EM and
     4328 *          in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
    43294329 *          pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
    43304330 *          handler.
  • trunk/src/VBox/VMM/VMMR3/PGMPool.cpp

    r43163 r43387  
    459459    /** @todo change the pool to handle ROOT page allocations specially when
    460460     *        required. */
    461     bool fCanUseHighMemory = HWACCMIsNestedPagingActive(pVM)
    462                           && HWACCMGetShwPagingMode(pVM) == PGMMODE_EPT;
     461    bool fCanUseHighMemory = HMIsNestedPagingActive(pVM)
     462                          && HMGetShwPagingMode(pVM) == PGMMODE_EPT;
    463463
    464464    pgmLock(pVM);
  • trunk/src/VBox/VMM/VMMR3/PGMShw.h

    r41801 r43387  
    188188    PVM          pVM       = pVCpu->pVMR3;
    189189
    190     Assert(HWACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
     190    Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);
    191191    Assert(pVM->pgm.s.fNestedPaging);
    192192    Assert(!pVCpu->pgm.s.pShwPageCR3R3);
  • trunk/src/VBox/VMM/VMMR3/TRPM.cpp

    r43079 r43387  
    9393# include <VBox/vmm/rem.h>
    9494#endif
    95 #include <VBox/vmm/hwaccm.h>
     95#include <VBox/vmm/hm.h>
    9696
    9797#include <VBox/err.h>
     
    15231523        {
    15241524# ifndef IEM_VERIFICATION_MODE
    1525             if (HWACCMIsEnabled(pVM))
     1525            if (HMIsEnabled(pVM))
    15261526# endif
    15271527            {
     
    15291529                AssertRC(rc);
    15301530                STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]);
    1531                 return HWACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM;
     1531                return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM;
    15321532            }
    15331533            /* If the guest gate is not patched, then we will check (again) if we can patch it. */
     
    15641564        {
    15651565            AssertRC(rc);
    1566             return HWACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */
     1566            return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */
    15671567        }
    15681568#else
    1569         if (HWACCMR3IsActive(pVCpu))
     1569        if (HMR3IsActive(pVCpu))
    15701570        {
    15711571            uint8_t u8Interrupt;
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r41965 r43387  
    6666#include <VBox/vmm/ssm.h>
    6767#include <VBox/vmm/ftm.h>
    68 #include <VBox/vmm/hwaccm.h>
     68#include <VBox/vmm/hm.h>
    6969#include "VMInternal.h"
    7070#include <VBox/vmm/vm.h>
     
    302302
    303303#ifndef RT_OS_DARWIN
    304                 case VERR_HWACCM_CONFIG_MISMATCH:
     304                case VERR_HM_CONFIG_MISMATCH:
    305305                    pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
    306306                                  "This hardware extension is required by the VM configuration");
     
    355355                    break;
    356356
    357                 case VERR_PCI_PASSTHROUGH_NO_HWACCM:
     357                case VERR_PCI_PASSTHROUGH_NO_HM:
    358358                    pszError = N_("PCI passthrough requires VT-x/AMD-V");
    359359                    break;
     
    656656            rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false);
    657657            if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced)
    658                 pVM->fHWACCMEnabled = true;
     658                pVM->fHMEnabled = true;
    659659
    660660            /*
     
    913913        if (RT_SUCCESS(rc))
    914914        {
    915             rc = HWACCMR3Init(pVM);
     915            rc = HMR3Init(pVM);
    916916            if (RT_SUCCESS(rc))
    917917            {
     
    10291029                    AssertRC(rc2);
    10301030                }
    1031                 int rc2 = HWACCMR3Term(pVM);
     1031                int rc2 = HMR3Term(pVM);
    10321032                AssertRC(rc2);
    10331033            }
     
    10711071        rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
    10721072    if (RT_SUCCESS(rc))
    1073         rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HWACCM);
    1074 
    1075     /** @todo Move this to the VMINITCOMPLETED_HWACCM notification handler. */
     1073        rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
     1074
     1075    /** @todo Move this to the VMINITCOMPLETED_HM notification handler. */
    10761076    if (RT_SUCCESS(rc))
    1077         CPUMR3SetHWVirtEx(pVM, HWACCMIsEnabled(pVM));
     1077        CPUMR3SetHWVirtEx(pVM, HMIsEnabled(pVM));
    10781078
    10791079    LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
     
    11251125    int rc = VMMR3InitCompleted(pVM, enmWhat);
    11261126    if (RT_SUCCESS(rc))
    1127         rc = HWACCMR3InitCompleted(pVM, enmWhat);
     1127        rc = HMR3InitCompleted(pVM, enmWhat);
    11281128    if (RT_SUCCESS(rc))
    11291129        rc = PGMR3InitCompleted(pVM, enmWhat);
     
    11881188    PGMR3Relocate(pVM, 0);              /* Repeat after PDM relocation. */
    11891189    CPUMR3Relocate(pVM);
    1190     HWACCMR3Relocate(pVM);
     1190    HMR3Relocate(pVM);
    11911191    SELMR3Relocate(pVM);
    11921192    VMMR3Relocate(pVM, offDelta);
     
    24542454        AssertRC(rc);
    24552455#endif
    2456         rc = HWACCMR3Term(pVM);
     2456        rc = HMR3Term(pVM);
    24572457        AssertRC(rc);
    24582458        rc = PGMR3Term(pVM);
     
    28632863        TMR3Reset(pVM);
    28642864        EMR3Reset(pVM);
    2865         HWACCMR3Reset(pVM);                 /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
     2865        HMR3Reset(pVM);                 /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
    28662866
    28672867#ifdef LOG_ENABLED
     
    44444444    CPUMR3ResetCpu(pVCpu);
    44454445    EMR3ResetCpu(pVCpu);
    4446     HWACCMR3ResetCpu(pVCpu);
     4446    HMR3ResetCpu(pVCpu);
    44474447    return VINF_EM_WAIT_SIPI;
    44484448}
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r42975 r43387  
    105105#include <VBox/param.h>
    106106#include <VBox/version.h>
    107 #include <VBox/vmm/hwaccm.h>
     107#include <VBox/vmm/hm.h>
    108108#include <iprt/assert.h>
    109109#include <iprt/alloc.h>
     
    210210    AssertRCReturn(rc, rc);
    211211
    212     /* GC switchers are enabled by default. Turned off by HWACCM. */
     212    /* GC switchers are enabled by default. Turned off by HM. */
    213213    pVM->vmm.s.fSwitcherDisabled = false;
    214214
     
    448448    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending,     STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending",     STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns.");
    449449    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest,      STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest",      STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns.");
    450     STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR,            STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR",            STAMUNIT_OCCURENCES, "Number of VINF_EM_HWACCM_PATCH_TPR_INSTR returns.");
     450    STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR,            STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR",            STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns.");
    451451    STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3,           STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc",             STAMUNIT_OCCURENCES, "Number of Other ring-3 calls.");
    452452    STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock,            STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock",          STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls.");
     
    681681             */
    682682            if (   pVM->vmm.s.fUsePeriodicPreemptionTimers
    683                 && HWACCMR3IsVmxPreemptionTimerUsed(pVM))
     683                && HMR3IsVmxPreemptionTimerUsed(pVM))
    684684                pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
    685685            LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers));
     
    937937VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM)
    938938{
    939     if (HWACCMIsEnabled(pVM))
     939    if (HMIsEnabled(pVM))
    940940        return pVM->vmm.s.szRing0AssertMsg1;
    941941
     
    957957VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM)
    958958{
    959     if (HWACCMIsEnabled(pVM))
     959    if (HMIsEnabled(pVM))
    960960        return pVM->vmm.s.szRing0AssertMsg2;
    961961
     
    14031403{
    14041404    VM_ASSERT_EMT(pVM);
    1405     if (HWACCMIsEnabled(pVM))
    1406         return HWACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
     1405    if (HMIsEnabled(pVM))
     1406        return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem);
    14071407
    14081408    return VERR_NOT_SUPPORTED;
     
    14191419VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)
    14201420{
    1421     if (HWACCMIsEnabled(pVM))
    1422         return HWACMMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
     1421    if (HMIsEnabled(pVM))
     1422        return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem);
    14231423
    14241424    return VINF_SUCCESS;
     
    23392339        PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK);
    23402340        PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK);
    2341         PRINT_GROUP(VMCPU_FF_,HWACCM_TO_R3,_MASK);
     2341        PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK);
    23422342        PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK);
    23432343        if (c)
  • trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp

    r41965 r43387  
    3434#include <VBox/param.h>
    3535#include <VBox/version.h>
    36 #include <VBox/vmm/hwaccm.h>
     36#include <VBox/vmm/hm.h>
    3737#include <iprt/assert.h>
    3838#include <iprt/time.h>
     
    301301            RTGCUINTPTR     uCR2       = 0xdeadface;
    302302            int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
    303             if (!HWACCMIsEnabled(pVM))
     303            if (!HMIsEnabled(pVM))
    304304            {
    305305                if (RT_SUCCESS(rc2))
     
    320320             * Dump the relevant hypervisor registers and stack.
    321321             */
    322             if (HWACCMIsEnabled(pVM))
     322            if (HMIsEnabled(pVM))
    323323            {
    324324                if (   rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */
     
    592592                                pVCpu->vmm.s.pbEMTStackRC, pVCpu->vmm.s.pbEMTStackBottomRC,
    593593                                VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3);
    594             } /* !HWACCMIsEnabled */
     594            } /* !HMIsEnabled */
    595595            break;
    596596        }
  • trunk/src/VBox/VMM/VMMR3/VMMTests.cpp

    r41985 r43387  
    3434#include <VBox/err.h>
    3535#include <VBox/param.h>
    36 #include <VBox/vmm/hwaccm.h>
     36#include <VBox/vmm/hm.h>
    3737
    3838#include <iprt/assert.h>
     
    477477    PVMCPU   pVCpu = &pVM->aCpus[0];
    478478
    479     if (!HWACCMR3IsAllowed(pVM))
     479    if (!HMR3IsAllowed(pVM))
    480480    {
    481481        RTPrintf("VMM: Hardware accelerated test not available!\n");
     
    542542            CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0);
    543543            CPUMPushHyper(pVCpu, 0);
    544             CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HWACCM_NOP);
     544            CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HM_NOP);
    545545            CPUMPushHyper(pVCpu, pVM->pVMRC);
    546546            CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR));    /* stack frame size */
  • trunk/src/VBox/VMM/VMMRC/HMRCA.asm

    r43373 r43387  
    2323%include "VBox/asmdefs.mac"
    2424%include "VBox/err.mac"
    25 %include "VBox/vmm/hwacc_vmx.mac"
     25%include "VBox/vmm/hm_vmx.mac"
    2626%include "VBox/vmm/cpum.mac"
    2727%include "iprt/x86.mac"
    28 %include "HWACCMInternal.mac"
     28%include "HMInternal.mac"
    2929
    3030%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
     
    535535; * @param   pCtx       Guest context [rsi]
    536536; */
    537 BEGINPROC HWACCMSaveGuestFPU64
     537BEGINPROC HMSaveGuestFPU64
    538538    mov     rax, cr0
    539539    mov     rcx, rax                    ; save old CR0
     
    547547    mov     eax, VINF_SUCCESS
    548548    ret
    549 ENDPROC HWACCMSaveGuestFPU64
     549ENDPROC HMSaveGuestFPU64
    550550
    551551;/**
     
    555555; * @param   pCtx       Guest context [rsi]
    556556; */
    557 BEGINPROC HWACCMSaveGuestDebug64
     557BEGINPROC HMSaveGuestDebug64
    558558    mov rax, dr0
    559559    mov qword [rsi + CPUMCTX.dr + 0*8], rax
     
    568568    mov eax, VINF_SUCCESS
    569569    ret
    570 ENDPROC HWACCMSaveGuestDebug64
     570ENDPROC HMSaveGuestDebug64
    571571
    572572;/**
     
    581581; * @param   pCtx       Guest context [rsi]
    582582; */
    583 BEGINPROC HWACCMTestSwitcher64
     583BEGINPROC HMTestSwitcher64
    584584    mov eax, [rsp+8]
    585585    ret
    586 ENDPROC HWACCMTestSwitcher64
     586ENDPROC HMTestSwitcher64
  • trunk/src/VBox/VMM/VMMRC/VMMRC.cpp

    r41976 r43387  
    120120         * Testcase executes a privileged instruction to force a world switch. (in both SVM & VMX)
    121121         */
    122         case VMMGC_DO_TESTCASE_HWACCM_NOP:
     122        case VMMGC_DO_TESTCASE_HM_NOP:
    123123            ASMRdMsr_Low(MSR_IA32_SYSENTER_CS);
    124124            return 0;
  • trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp

    r41836 r43387  
    18421842 * Starts or migrates the autoset of a virtual CPU.
    18431843 *
    1844  * This is used by HWACCMR0Enter.  When we've longjumped out of the HWACCM
     1844 * This is used by HMR0Enter.  When we've longjumped out of the HM
    18451845 * execution loop with the set open, we'll migrate it when re-entering.  While
    18461846 * under normal circumstances, we'll start it so VMXR0LoadGuestState can access
  • trunk/src/VBox/VMM/include/EMHandleRCTmpl.h

    r42779 r43387  
    2020
    2121/**
    22  * Process a subset of the raw-mode and hwaccm return codes.
     22 * Process a subset of the raw-mode and hm return codes.
    2323 *
    2424 * Since we have to share this with raw-mode single stepping, this inline
     
    3535#ifdef EMHANDLERC_WITH_PATM
    3636int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
    37 #elif defined(EMHANDLERC_WITH_HWACCM)
     37#elif defined(EMHANDLERC_WITH_HM)
    3838int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
    3939#endif
     
    219219            break;
    220220
    221 #ifdef EMHANDLERC_WITH_HWACCM
     221#ifdef EMHANDLERC_WITH_HM
    222222        /*
    223223         * (MM)IO intensive code block detected; fall back to the recompiler for better performance
    224224         */
    225225        case VINF_EM_RAW_EMULATE_IO_BLOCK:
    226             rc = HWACCMR3EmulateIoBlock(pVM, pCtx);
    227             break;
    228 
    229         case VINF_EM_HWACCM_PATCH_TPR_INSTR:
    230             rc = HWACCMR3PatchTprInstr(pVM, pVCpu, pCtx);
     226            rc = HMR3EmulateIoBlock(pVM, pCtx);
     227            break;
     228
     229        case VINF_EM_HM_PATCH_TPR_INSTR:
     230            rc = HMR3PatchTprInstr(pVM, pVCpu, pCtx);
    231231            break;
    232232#endif
     
    334334            break;
    335335
    336 #ifdef EMHANDLERC_WITH_HWACCM
     336#ifdef EMHANDLERC_WITH_HM
    337337        /*
    338338         * Up a level, after HwAccM have done some release logging.
     
    347347        case VERR_VMX_UNABLE_TO_START_VM:
    348348        case VERR_VMX_UNABLE_TO_RESUME_VM:
    349             HWACCMR3CheckError(pVM, rc);
     349            HMR3CheckError(pVM, rc);
    350350            break;
    351351
  • trunk/src/VBox/VMM/include/HMInternal.h

    r43373 r43387  
    1616 */
    1717
    18 #ifndef ___HWACCMInternal_h
    19 #define ___HWACCMInternal_h
     18#ifndef ___HMInternal_h
     19#define ___HMInternal_h
    2020
    2121#include <VBox/cdefs.h>
     
    2424#include <VBox/vmm/stam.h>
    2525#include <VBox/dis.h>
    26 #include <VBox/vmm/hwaccm.h>
    27 #include <VBox/vmm/hwacc_vmx.h>
     26#include <VBox/vmm/hm.h>
     27#include <VBox/vmm/hm_vmx.h>
    2828#include <VBox/vmm/pgm.h>
    2929#include <VBox/vmm/cpum.h>
     
    3939
    4040#define VMX_USE_CACHED_VMCS_ACCESSES
    41 #define HWACCM_VMX_EMULATE_REALMODE
     41#define HM_VMX_EMULATE_REALMODE
    4242
    4343/* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
     
    5252
    5353
    54 /** @defgroup grp_hwaccm_int       Internal
    55  * @ingroup grp_hwaccm
     54/** @defgroup grp_hm_int       Internal
     55 * @ingroup grp_hm
    5656 * @internal
    5757 * @{
     
    6969 * @{
    7070 */
    71 #define HWACCM_CHANGED_GUEST_FPU                RT_BIT(0)
    72 #define HWACCM_CHANGED_GUEST_CR0                RT_BIT(1)
    73 #define HWACCM_CHANGED_GUEST_CR3                RT_BIT(2)
    74 #define HWACCM_CHANGED_GUEST_CR4                RT_BIT(3)
    75 #define HWACCM_CHANGED_GUEST_GDTR               RT_BIT(4)
    76 #define HWACCM_CHANGED_GUEST_IDTR               RT_BIT(5)
    77 #define HWACCM_CHANGED_GUEST_LDTR               RT_BIT(6)
    78 #define HWACCM_CHANGED_GUEST_TR                 RT_BIT(7)
    79 #define HWACCM_CHANGED_GUEST_MSR                RT_BIT(8)
    80 #define HWACCM_CHANGED_GUEST_SEGMENT_REGS       RT_BIT(9)
    81 #define HWACCM_CHANGED_GUEST_DEBUG              RT_BIT(10)
    82 #define HWACCM_CHANGED_HOST_CONTEXT             RT_BIT(11)
    83 
    84 #define HWACCM_CHANGED_ALL                  (   HWACCM_CHANGED_GUEST_SEGMENT_REGS \
    85                                             |   HWACCM_CHANGED_GUEST_CR0          \
    86                                             |   HWACCM_CHANGED_GUEST_CR3          \
    87                                             |   HWACCM_CHANGED_GUEST_CR4          \
    88                                             |   HWACCM_CHANGED_GUEST_GDTR         \
    89                                             |   HWACCM_CHANGED_GUEST_IDTR         \
    90                                             |   HWACCM_CHANGED_GUEST_LDTR         \
    91                                             |   HWACCM_CHANGED_GUEST_TR           \
    92                                             |   HWACCM_CHANGED_GUEST_MSR          \
    93                                             |   HWACCM_CHANGED_GUEST_FPU          \
    94                                             |   HWACCM_CHANGED_GUEST_DEBUG        \
    95                                             |   HWACCM_CHANGED_HOST_CONTEXT)
    96 
    97 #define HWACCM_CHANGED_ALL_GUEST            (   HWACCM_CHANGED_GUEST_SEGMENT_REGS \
    98                                             |   HWACCM_CHANGED_GUEST_CR0          \
    99                                             |   HWACCM_CHANGED_GUEST_CR3          \
    100                                             |   HWACCM_CHANGED_GUEST_CR4          \
    101                                             |   HWACCM_CHANGED_GUEST_GDTR         \
    102                                             |   HWACCM_CHANGED_GUEST_IDTR         \
    103                                             |   HWACCM_CHANGED_GUEST_LDTR         \
    104                                             |   HWACCM_CHANGED_GUEST_TR           \
    105                                             |   HWACCM_CHANGED_GUEST_MSR          \
    106                                             |   HWACCM_CHANGED_GUEST_DEBUG        \
    107                                             |   HWACCM_CHANGED_GUEST_FPU)
     71#define HM_CHANGED_GUEST_FPU                RT_BIT(0)
     72#define HM_CHANGED_GUEST_CR0                RT_BIT(1)
     73#define HM_CHANGED_GUEST_CR3                RT_BIT(2)
     74#define HM_CHANGED_GUEST_CR4                RT_BIT(3)
     75#define HM_CHANGED_GUEST_GDTR               RT_BIT(4)
     76#define HM_CHANGED_GUEST_IDTR               RT_BIT(5)
     77#define HM_CHANGED_GUEST_LDTR               RT_BIT(6)
     78#define HM_CHANGED_GUEST_TR                 RT_BIT(7)
     79#define HM_CHANGED_GUEST_MSR                RT_BIT(8)
     80#define HM_CHANGED_GUEST_SEGMENT_REGS       RT_BIT(9)
     81#define HM_CHANGED_GUEST_DEBUG              RT_BIT(10)
     82#define HM_CHANGED_HOST_CONTEXT             RT_BIT(11)
     83
     84#define HM_CHANGED_ALL                  (   HM_CHANGED_GUEST_SEGMENT_REGS \
     85                                            |   HM_CHANGED_GUEST_CR0          \
     86                                            |   HM_CHANGED_GUEST_CR3          \
     87                                            |   HM_CHANGED_GUEST_CR4          \
     88                                            |   HM_CHANGED_GUEST_GDTR         \
     89                                            |   HM_CHANGED_GUEST_IDTR         \
     90                                            |   HM_CHANGED_GUEST_LDTR         \
     91                                            |   HM_CHANGED_GUEST_TR           \
     92                                            |   HM_CHANGED_GUEST_MSR          \
     93                                            |   HM_CHANGED_GUEST_FPU          \
     94                                            |   HM_CHANGED_GUEST_DEBUG        \
     95                                            |   HM_CHANGED_HOST_CONTEXT)
     96
     97#define HM_CHANGED_ALL_GUEST            (   HM_CHANGED_GUEST_SEGMENT_REGS \
     98                                            |   HM_CHANGED_GUEST_CR0          \
     99                                            |   HM_CHANGED_GUEST_CR3          \
     100                                            |   HM_CHANGED_GUEST_CR4          \
     101                                            |   HM_CHANGED_GUEST_GDTR         \
     102                                            |   HM_CHANGED_GUEST_IDTR         \
     103                                            |   HM_CHANGED_GUEST_LDTR         \
     104                                            |   HM_CHANGED_GUEST_TR           \
     105                                            |   HM_CHANGED_GUEST_MSR          \
     106                                            |   HM_CHANGED_GUEST_DEBUG        \
     107                                            |   HM_CHANGED_GUEST_FPU)
    108108
    109109/** @} */
    110110
    111111/** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */
    112 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES      8
     112#define HM_MAX_TLB_SHOOTDOWN_PAGES      8
    113113
    114114/** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */
    115 #define HWACCM_EPT_IDENTITY_PG_TABLE_SIZE   PAGE_SIZE
     115#define HM_EPT_IDENTITY_PG_TABLE_SIZE   PAGE_SIZE
    116116/** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */
    117 #define HWACCM_VTX_TSS_SIZE                 (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
     117#define HM_VTX_TSS_SIZE                 (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)
    118118/** Total guest mapped memory needed. */
    119 #define HWACCM_VTX_TOTAL_DEVHEAP_MEM        (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)
     119#define HM_VTX_TOTAL_DEVHEAP_MEM        (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE)
    120120
    121121/** Enable for TPR guest patching. */
    122 #define VBOX_HWACCM_WITH_GUEST_PATCHING
    123 
    124 /** HWACCM SSM version
    125  */
    126 #ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
    127 # define HWACCM_SSM_VERSION                 5
    128 # define HWACCM_SSM_VERSION_NO_PATCHING     4
     122#define VBOX_HM_WITH_GUEST_PATCHING
     123
     124/** HM SSM version
     125 */
     126#ifdef VBOX_HM_WITH_GUEST_PATCHING
     127# define HM_SSM_VERSION                 5
     128# define HM_SSM_VERSION_NO_PATCHING     4
    129129#else
    130 # define HWACCM_SSM_VERSION                 4
    131 # define HWACCM_SSM_VERSION_NO_PATCHING     4
    132 #endif
    133 #define HWACCM_SSM_VERSION_2_0_X            3
     130# define HM_SSM_VERSION                 4
     131# define HM_SSM_VERSION_NO_PATCHING     4
     132#endif
     133#define HM_SSM_VERSION_2_0_X            3
    134134
    135135/**
     
    160160typedef enum
    161161{
    162     HWACCMPENDINGIO_INVALID = 0,
    163     HWACCMPENDINGIO_PORT_READ,
    164     HWACCMPENDINGIO_PORT_WRITE,
    165     HWACCMPENDINGIO_STRING_READ,
    166     HWACCMPENDINGIO_STRING_WRITE,
     162    HMPENDINGIO_INVALID = 0,
     163    HMPENDINGIO_PORT_READ,
     164    HMPENDINGIO_PORT_WRITE,
     165    HMPENDINGIO_STRING_READ,
     166    HMPENDINGIO_STRING_WRITE,
    167167    /** The usual 32-bit paranoia. */
    168     HWACCMPENDINGIO_32BIT_HACK   = 0x7fffffff
    169 } HWACCMPENDINGIO;
     168    HMPENDINGIO_32BIT_HACK   = 0x7fffffff
     169} HMPENDINGIO;
    170170
    171171
    172172typedef enum
    173173{
    174     HWACCMTPRINSTR_INVALID,
    175     HWACCMTPRINSTR_READ,
    176     HWACCMTPRINSTR_READ_SHR4,
    177     HWACCMTPRINSTR_WRITE_REG,
    178     HWACCMTPRINSTR_WRITE_IMM,
    179     HWACCMTPRINSTR_JUMP_REPLACEMENT,
     174    HMTPRINSTR_INVALID,
     175    HMTPRINSTR_READ,
     176    HMTPRINSTR_READ_SHR4,
     177    HMTPRINSTR_WRITE_REG,
     178    HMTPRINSTR_WRITE_IMM,
     179    HMTPRINSTR_JUMP_REPLACEMENT,
    180180    /** The usual 32-bit paranoia. */
    181     HWACCMTPRINSTR_32BIT_HACK   = 0x7fffffff
    182 } HWACCMTPRINSTR;
     181    HMTPRINSTR_32BIT_HACK   = 0x7fffffff
     182} HMTPRINSTR;
    183183
    184184typedef struct
     
    195195    uint32_t                cbNewOp;
    196196    /** Instruction type. */
    197     HWACCMTPRINSTR          enmType;
     197    HMTPRINSTR          enmType;
    198198    /** Source operand. */
    199199    uint32_t                uSrcOperand;
     
    204204    /** Patch address of the jump replacement. */
    205205    RTGCPTR32               pJumpTarget;
    206 } HWACCMTPRPATCH;
    207 /** Pointer to HWACCMTPRPATCH. */
    208 typedef HWACCMTPRPATCH *PHWACCMTPRPATCH;
     206} HMTPRPATCH;
     207/** Pointer to HMTPRPATCH. */
     208typedef HMTPRPATCH *PHMTPRPATCH;
    209209
    210210/**
     
    215215 * @returns Return code indicating the action to take.
    216216 */
    217 typedef DECLCALLBACK (int) FNHWACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
     217typedef DECLCALLBACK (int) FNHMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);
    218218/** Pointer to switcher function. */
    219 typedef FNHWACCMSWITCHERHC *PFNHWACCMSWITCHERHC;
     219typedef FNHMSWITCHERHC *PFNHMSWITCHERHC;
    220220
    221221/**
    222  * HWACCM VM Instance data.
    223  * Changes to this must checked against the padding of the hwaccm union in VM!
    224  */
    225 typedef struct HWACCM
     222 * HM VM Instance data.
     223 * Changes to this must checked against the padding of the hm union in VM!
     224 */
     225typedef struct HM
    226226{
    227227    /** Set when we've initialized VMX or SVM. */
     
    276276#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    277277    /** 32 to 64 bits switcher entrypoint. */
    278     R0PTRTYPE(PFNHWACCMSWITCHERHC) pfnHost32ToGuest64R0;
     278    R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0;
    279279
    280280    /* AMD-V 64 bits vmrun handler */
     
    300300    struct
    301301    {
    302         /** Set by the ring-0 side of HWACCM to indicate VMX is supported by the
     302        /** Set by the ring-0 side of HM to indicate VMX is supported by the
    303303         *  CPU. */
    304304        bool                        fSupported;
     
    400400    struct
    401401    {
    402         /** Set by the ring-0 side of HWACCM to indicate SVM is supported by the
     402        /** Set by the ring-0 side of HM to indicate SVM is supported by the
    403403         *  CPU. */
    404404        bool                        fSupported;
     
    432432    AVLOU32TREE                     PatchTree;
    433433    uint32_t                        cPatches;
    434     HWACCMTPRPATCH                  aPatches[64];
     434    HMTPRPATCH                  aPatches[64];
    435435
    436436    struct
     
    443443    int32_t                 lLastError;
    444444
    445     /** HWACCMR0Init was run */
    446     bool                    fHWACCMR0Init;
     445    /** HMR0Init was run */
     446    bool                    fHMR0Init;
    447447    bool                    u8Alignment1[7];
    448448
     
    451451    STAMCOUNTER             StatTPRReplaceSuccess;
    452452    STAMCOUNTER             StatTPRReplaceFailure;
    453 } HWACCM;
    454 /** Pointer to HWACCM VM instance data. */
    455 typedef HWACCM *PHWACCM;
     453} HM;
     454/** Pointer to HM VM instance data. */
     455typedef HM *PHM;
    456456
    457457/* Maximum number of cached entries. */
     
    518518
    519519/** VMX StartVM function. */
    520 typedef DECLCALLBACK(int) FNHWACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
     520typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    521521/** Pointer to a VMX StartVM function. */
    522 typedef R0PTRTYPE(FNHWACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;
     522typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM;
    523523
    524524/** SVM VMRun function. */
    525 typedef DECLCALLBACK(int) FNHWACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
     525typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    526526/** Pointer to a SVM VMRun function. */
    527 typedef R0PTRTYPE(FNHWACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;
     527typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN;
    528528
    529529/**
    530  * HWACCM VMCPU Instance data.
    531  */
    532 typedef struct HWACCMCPU
     530 * HM VMCPU Instance data.
     531 */
     532typedef struct HMCPU
    533533{
    534534    /** Old style FPU reporting trap mask override performed (optimization) */
     
    551551    volatile uint32_t           cWorldSwitchExits;
    552552
    553     /** HWACCM_CHANGED_* flags. */
     553    /** HM_CHANGED_* flags. */
    554554    uint32_t                    fContextUseFlags;
    555555
     
    578578
    579579        /** Ring 0 handlers for VT-x. */
    580         PFNHWACCMVMXSTARTVM         pfnStartVM;
     580        PFNHMVMXSTARTVM         pfnStartVM;
    581581
    582582#if HC_ARCH_BITS == 32
     
    658658        /** The last seen guest paging mode (by VT-x). */
    659659        PGMMODE                     enmLastSeenGuestMode;
    660         /** Current guest paging mode (as seen by HWACCMR3PagingModeChanged). */
     660        /** Current guest paging mode (as seen by HMR3PagingModeChanged). */
    661661        PGMMODE                     enmCurrGuestMode;
    662         /** Previous guest paging mode (as seen by HWACCMR3PagingModeChanged). */
     662        /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */
    663663        PGMMODE                     enmPrevGuestMode;
    664664    } vmx;
     
    681681
    682682        /** Ring 0 handlers for VT-x. */
    683         PFNHWACCMSVMVMRUN           pfnVMRun;
     683        PFNHMSVMVMRUN           pfnVMRun;
    684684
    685685        /** R0 memory object for the MSR bitmap (8kb). */
     
    714714    {
    715715        /* Pending IO operation type. */
    716         HWACCMPENDINGIO         enmType;
     716        HMPENDINGIO         enmType;
    717717        uint32_t                uPadding;
    718718        RTGCPTR                 GCPtrRip;
     
    734734
    735735    /** The CPU ID of the CPU currently owning the VMCS. Set in
    736      * HWACCMR0Enter and cleared in HWACCMR0Leave. */
     736     * HMR0Enter and cleared in HMR0Leave. */
    737737    RTCPUID                 idEnteredCpu;
    738738
     
    740740    struct
    741741    {
    742         RTGCPTR             aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES];
     742        RTGCPTR             aPages[HM_MAX_TLB_SHOOTDOWN_PAGES];
    743743        unsigned            cPages;
    744744    } TlbShootdown;
     
    858858    R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0;
    859859#endif
    860 } HWACCMCPU;
    861 /** Pointer to HWACCM VM instance data. */
    862 typedef HWACCMCPU *PHWACCMCPU;
     860} HMCPU;
     861/** Pointer to HM VM instance data. */
     862typedef HMCPU *PHMCPU;
    863863
    864864
    865865#ifdef IN_RING0
    866866
    867 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpu(void);
    868 VMMR0DECL(PHMGLOBLCPUINFO) HWACCMR0GetCurrentCpuEx(RTCPUID idCpu);
     867VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void);
     868VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu);
    869869
    870870
    871871#ifdef VBOX_STRICT
    872 VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    873 VMMR0DECL(void) HWACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
     872VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     873VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);
    874874#else
    875 # define HWACCMDumpRegs(a, b ,c)            do { } while (0)
    876 # define HWACCMR0DumpDescriptor(a, b, c)    do { } while (0)
     875# define HMDumpRegs(a, b ,c)            do { } while (0)
     876# define HMR0DumpDescriptor(a, b, c)    do { } while (0)
    877877#endif
    878878
    879879# ifdef VBOX_WITH_KERNEL_USING_XMM
    880 DECLASM(int)   hwaccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);
    881 DECLASM(int)   hwaccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);
     880DECLASM(int)   hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
     881DECLASM(int)   hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
    882882# endif
    883883
     
    888888 * @param  pIdtr        Where to store the 64-bit IDTR.
    889889 */
    890 DECLASM(void) hwaccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
     890DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
    891891
    892892/**
     
    894894 * @returns CR3
    895895 */
    896 DECLASM(uint64_t) hwaccmR0Get64bitCR3(void);
     896DECLASM(uint64_t) hmR0Get64bitCR3(void);
    897897# endif
    898898
  • trunk/src/VBox/VMM/include/HMInternal.mac

    r43373 r43387  
    11;$Id$
    22;; @file
    3 ; HWACCM - Internal header file.
     3; HM - Internal header file.
    44;
    55;
  • trunk/src/VBox/VMM/include/PGMInline.h

    r43303 r43387  
    3232#include <VBox/log.h>
    3333#include <VBox/vmm/gmm.h>
    34 #include <VBox/vmm/hwaccm.h>
     34#include <VBox/vmm/hm.h>
    3535#include <iprt/asm.h>
    3636#include <iprt/assert.h>
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r43302 r43387  
    3333#include <VBox/log.h>
    3434#include <VBox/vmm/gmm.h>
    35 #include <VBox/vmm/hwaccm.h>
    36 #include <VBox/vmm/hwacc_vmx.h>
     35#include <VBox/vmm/hm.h>
     36#include <VBox/vmm/hm_vmx.h>
    3737#include "internal/pgm.h"
    3838#include <iprt/asm.h>
     
    348348# define PGM_INVL_PG(pVCpu, GCVirt)             ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
    349349#elif defined(IN_RING0)
    350 # define PGM_INVL_PG(pVCpu, GCVirt)             HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
     350# define PGM_INVL_PG(pVCpu, GCVirt)             HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
    351351#else
    352 # define PGM_INVL_PG(pVCpu, GCVirt)             HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
     352# define PGM_INVL_PG(pVCpu, GCVirt)             HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
    353353#endif
    354354
     
    362362# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      ASMInvalidatePage((void *)(uintptr_t)(GCVirt))
    363363#elif defined(IN_RING0)
    364 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
     364# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
    365365#else
    366 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
     366# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
    367367#endif
    368368
     
    376376# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         ASMReloadCR3()
    377377#elif defined(IN_RING0)
    378 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HWACCMFlushTLB(pVCpu)
     378# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTLB(pVCpu)
    379379#else
    380 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HWACCMFlushTLB(pVCpu)
     380# define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HMFlushTLB(pVCpu)
    381381#endif
    382382
     
    389389# define PGM_INVL_VCPU_TLBS(pVCpu)             ASMReloadCR3()
    390390#elif defined(IN_RING0)
    391 # define PGM_INVL_VCPU_TLBS(pVCpu)             HWACCMFlushTLB(pVCpu)
     391# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTLB(pVCpu)
    392392#else
    393 # define PGM_INVL_VCPU_TLBS(pVCpu)             HWACCMFlushTLB(pVCpu)
     393# define PGM_INVL_VCPU_TLBS(pVCpu)             HMFlushTLB(pVCpu)
    394394#endif
    395395
     
    402402# define PGM_INVL_ALL_VCPU_TLBS(pVM)            ASMReloadCR3()
    403403#elif defined(IN_RING0)
    404 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            HWACCMFlushTLBOnAllVCpus(pVM)
     404# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTLBOnAllVCpus(pVM)
    405405#else
    406 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            HWACCMFlushTLBOnAllVCpus(pVM)
     406# define PGM_INVL_ALL_VCPU_TLBS(pVM)            HMFlushTLBOnAllVCpus(pVM)
    407407#endif
    408408
     
    30243024    bool                            fLessThan52PhysicalAddressBits;
    30253025    /** Set when nested paging is active.
    3026      * This is meant to save calls to HWACCMIsNestedPagingActive and let the
     3026     * This is meant to save calls to HMIsNestedPagingActive and let the
    30273027     * compilers optimize the code better.  Whether we use nested paging or
    30283028     * not is something we find out during VMM initialization and we won't
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r41976 r43387  
    507507    VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
    508508    /** Switching testing and profiling stub. */
    509     VMMGC_DO_TESTCASE_HWACCM_NOP,
     509    VMMGC_DO_TESTCASE_HM_NOP,
    510510
    511511    /** The usual 32-bit hack. */
  • trunk/src/VBox/VMM/testcase/Makefile.kmk

    r41976 r43387  
    3636 PROGRAMS  += tstGlobalConfig tstInstrEmul
    3737 ifdef VBOX_WITH_RAW_MODE
    38   PROGRAMS  += tstVMM tstVMM-HwAccm
     38  PROGRAMS  += tstVMM tstVMM-HM
    3939  ifneq ($(KBUILD_TARGET),win)
    4040   PROGRAMS += tstVMMFork
     
    256256 tstVMM_LIBS            = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
    257257
    258  tstVMM-HwAccm_TEMPLATE = VBOXR3EXE
    259  tstVMM-HwAccm_SOURCES  = tstVMM-HwAccm.cpp
    260  tstVMM-HwAccm_LIBS     = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
     258 tstVMM-HM_TEMPLATE    = VBOXR3EXE
     259 tstVMM-HM_SOURCES      = tstVMM-HM.cpp
     260 tstVMM-HM_LIBS         = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
    261261
    262262 tstVMMFork_TEMPLATE    = VBOXR3EXE
     
    396396                $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \
    397397                $(VBOX_PATH_VMM_SRC)/include/TRPMInternal.mac \
    398                 $(VBOX_PATH_VMM_SRC)/include/HWACCMInternal.mac \
     398                $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \
    399399                $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \
    400400                $(VBOX_PATH_VMM_SRC)/testcase/Makefile.kmk \
     
    423423                $(DEPTH)/include/iprt/x86.mac \
    424424                $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \
    425                 $(VBOX_PATH_VMM_SRC)/include/HWACCMInternal.mac \
     425                $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \
    426426                $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \
    427427                $(VBOX_PATH_VMM_SRC)/include/VMMSwitcher.mac \
  • trunk/src/VBox/VMM/testcase/tstAsmStructs.cpp

    r41965 r43387  
    2323#include <VBox/vmm/trpm.h>
    2424#include "TRPMInternal.h"
    25 #include "HWACCMInternal.h"
     25#include "HMInternal.h"
    2626#include "VMMSwitcher.h"
    2727#include "VMMInternal.h"
  • trunk/src/VBox/VMM/testcase/tstAsmStructsAsm.asm

    r35346 r43387  
    2424
    2525%include "CPUMInternal.mac"
    26 %include "HWACCMInternal.mac"
     26%include "HMInternal.mac"
    2727%include "TRPMInternal.mac"
    2828%include "VMMInternal.mac"
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r42407 r43387  
    13291329    GEN_CHECK_OFF(VM, fPATMEnabled);
    13301330    GEN_CHECK_OFF(VM, fCSAMEnabled);
    1331     GEN_CHECK_OFF(VM, fHWACCMEnabled);
     1331    GEN_CHECK_OFF(VM, fHMEnabled);
    13321332    GEN_CHECK_OFF(VM, fHwVirtExtForced);
    13331333    GEN_CHECK_OFF(VM, fFaultTolerantMaster);
     
    13561356    GEN_CHECK_OFF(VM, vmm);
    13571357    GEN_CHECK_OFF(VM, pgm);
    1358     GEN_CHECK_OFF(VM, hwaccm);
     1358    GEN_CHECK_OFF(VM, hm);
    13591359    GEN_CHECK_OFF(VM, trpm);
    13601360    GEN_CHECK_OFF(VM, selm);
     
    13901390    GEN_CHECK_OFF(VMCPU, aStatAdHoc);
    13911391    GEN_CHECK_OFF(VMCPU, cpum);
    1392     GEN_CHECK_OFF(VMCPU, hwaccm);
     1392    GEN_CHECK_OFF(VMCPU, hm);
    13931393    GEN_CHECK_OFF(VMCPU, em);
    13941394    GEN_CHECK_OFF(VMCPU, iem);
  • trunk/src/VBox/VMM/testcase/tstVMStructDTrace.cpp

    r41268 r43387  
    4242#include "IOMInternal.h"
    4343#include "REMInternal.h"
    44 #include "HWACCMInternal.h"
     44#include "HMInternal.h"
    4545#include "PATMInternal.h"
    4646#include "VMMInternal.h"
  • trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp

    r41965 r43387  
    7070#include "IOMInternal.h"
    7171#include "REMInternal.h"
    72 #include "HWACCMInternal.h"
     72#include "HMInternal.h"
    7373#include "PATMInternal.h"
    7474#include "VMMInternal.h"
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r41965 r43387  
    4141#include "REMInternal.h"
    4242#include "SSMInternal.h"
    43 #include "HWACCMInternal.h"
     43#include "HMInternal.h"
    4444#include "PATMInternal.h"
    4545#include "VMMInternal.h"
     
    207207    PRINT_OFFSET(VM, pgm.s.CritSectX);
    208208    CHECK_PADDING_VM(64, pgm);
    209     PRINT_OFFSET(VM, hwaccm);
    210     CHECK_PADDING_VM(64, hwaccm);
     209    PRINT_OFFSET(VM, hm);
     210    CHECK_PADDING_VM(64, hm);
    211211    CHECK_PADDING_VM(64, trpm);
    212212    CHECK_PADDING_VM(64, selm);
     
    227227    PRINT_OFFSET(VMCPU, cpum);
    228228    CHECK_PADDING_VMCPU(64, cpum);
    229     CHECK_PADDING_VMCPU(64, hwaccm);
     229    CHECK_PADDING_VMCPU(64, hm);
    230230    CHECK_PADDING_VMCPU(64, em);
    231231    CHECK_PADDING_VMCPU(64, iem);
     
    394394    CHECK_MEMBER_ALIGNMENT(MMHYPERHEAP, Lock, sizeof(uintptr_t));
    395395
    396     /* hwaccm - 32-bit gcc won't align uint64_t naturally, so check. */
    397     CHECK_MEMBER_ALIGNMENT(HWACCM, u64RegisterMask, 8);
    398     CHECK_MEMBER_ALIGNMENT(HWACCM, vmx.hostCR4, 8);
    399     CHECK_MEMBER_ALIGNMENT(HWACCM, vmx.msr.feature_ctrl, 8);
    400     CHECK_MEMBER_ALIGNMENT(HWACCM, StatTPRPatchSuccess, 8);
    401     CHECK_MEMBER_ALIGNMENT(HWACCMCPU, StatEntry, 8);
    402     CHECK_MEMBER_ALIGNMENT(HWACCMCPU, vmx.HCPhysVMCS, sizeof(RTHCPHYS));
    403     CHECK_MEMBER_ALIGNMENT(HWACCMCPU, vmx.proc_ctls, 8);
    404     CHECK_MEMBER_ALIGNMENT(HWACCMCPU, Event.intInfo, 8);
     396    /* hm - 32-bit gcc won't align uint64_t naturally, so check. */
     397    CHECK_MEMBER_ALIGNMENT(HM, u64RegisterMask, 8);
     398    CHECK_MEMBER_ALIGNMENT(HM, vmx.hostCR4, 8);
     399    CHECK_MEMBER_ALIGNMENT(HM, vmx.msr.feature_ctrl, 8);
     400    CHECK_MEMBER_ALIGNMENT(HM, StatTPRPatchSuccess, 8);
     401    CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8);
     402    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVMCS, sizeof(RTHCPHYS));
     403    CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.proc_ctls, 8);
     404    CHECK_MEMBER_ALIGNMENT(HMCPU, Event.intInfo, 8);
    405405
    406406    /* Make sure the set is large enough and has the correct size. */
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette