VirtualBox

Changeset 26150 in vbox


Ignore:
Timestamp:
Feb 2, 2010 3:52:54 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
57163
Message:

PGM: Split out the inlined code from PGMInternal.h and into PGMInline.h so we can drop all the &pVM->pgm.s and &pVCpu->pgm.s stuff.

Location:
trunk/src/VBox/VMM
Files:
16 edited
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r26107 r26150  
    590590#include "PGMInternal.h"
    591591#include <VBox/vm.h>
     592#include "PGMInline.h"
    592593
    593594#include <VBox/dbg.h>
  • trunk/src/VBox/VMM/PGMDbg.cpp

    r24061 r26150  
    2828#include "PGMInternal.h"
    2929#include <VBox/vm.h>
     30#include "PGMInline.h"
    3031#include <iprt/assert.h>
    3132#include <iprt/asm.h>
     
    3536#include <VBox/err.h>
    3637
     38
     39/*******************************************************************************
     40*   Defined Constants And Macros                                               *
     41*******************************************************************************/
    3742/** The max needle size that we will bother searching for
    3843 * This must not be more than half a page! */
  • trunk/src/VBox/VMM/PGMHandler.cpp

    r20808 r26150  
    4141#include "PGMInternal.h"
    4242#include <VBox/vm.h>
     43#include "PGMInline.h"
    4344#include <VBox/dbg.h>
    4445
  • trunk/src/VBox/VMM/PGMInline.h

    r26139 r26150  
    11/* $Id$ */
    22/** @file
    3  * PGM - Internal header file.
     3 * PGM - Inlined functions.
    44 */
    55
     
    2020 */
    2121
    22 #ifndef ___PGMInternal_h
    23 #define ___PGMInternal_h
     22#ifndef ___PGMInline_h
     23#define ___PGMInline_h
    2424
    2525#include <VBox/cdefs.h>
     
    4545
    4646
    47 /** @defgroup grp_pgm_int   Internals
    48  * @ingroup grp_pgm
     47/** @addtogroup grp_pgm_int   Internals
    4948 * @internal
    5049 * @{
    5150 */
    52 
    53 
    54 /** @name PGM Compile Time Config
    55  * @{
    56  */
    57 
    58 /**
    59  * Indicates that there are no guest mappings to care about.
    60  * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
    61  */
    62 #if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
    63 # define PGM_WITHOUT_MAPPINGS
    64 #endif
    65 
    66 /**
    67  * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
    68  * Comment it if it will break something.
    69  */
    70 #define PGM_OUT_OF_SYNC_IN_GC
    71 
    72 /**
    73  * Check and skip global PDEs for non-global flushes
    74  */
    75 #define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
    76 
    77 /**
    78  * Optimization for PAE page tables that are modified often
    79  */
    80 //#if 0 /* disabled again while debugging */
    81 #ifndef IN_RC
    82 # define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
    83 #endif
    84 //#endif
    85 
    86 /**
    87  * Sync N pages instead of a whole page table
    88  */
    89 #define PGM_SYNC_N_PAGES
    90 
    91 /**
    92  * Number of pages to sync during a page fault
    93  *
    94  * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
    95  * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
    96  *
    97  * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
    98  * world switch overhead, so let's sync more.
    99  */
    100 # ifdef IN_RING0
    101 /* Chose 32 based on the compile test in #4219; 64 shows worse stats.
    102  * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
    103  * but ~5% fewer faults.
    104  */
    105 # define PGM_SYNC_NR_PAGES               32
    106 #else
    107 # define PGM_SYNC_NR_PAGES               8
    108 #endif
    109 
    110 /**
    111  * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
    112  */
    113 #define PGM_MAX_PHYSCACHE_ENTRIES       64
    114 #define PGM_MAX_PHYSCACHE_ENTRIES_MASK  (PGM_MAX_PHYSCACHE_ENTRIES-1)
    115 
    116 
    117 /** @def PGMPOOL_CFG_MAX_GROW
    118  * The maximum number of pages to add to the pool in one go.
    119  */
    120 #define PGMPOOL_CFG_MAX_GROW            (_256K >> PAGE_SHIFT)
    121 
    122 /** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
    123  * Enables some extra assertions for virtual handlers (mainly phys2virt related).
    124  */
    125 #ifdef VBOX_STRICT
    126 # define VBOX_STRICT_PGM_HANDLER_VIRTUAL
    127 #endif
    128 
    129 /** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    130  * Enables the experimental lazy page allocation code. */
    131 /*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
    132 
    133 /** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
    134  * Enables real write monitoring of pages, i.e. mapping them read-only and
    135  * only making them writable when getting a write access #PF. */
    136 #define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
    137 
    138 /** @} */
    139 
    140 
    141 /** @name PDPT and PML4 flags.
    142  * These are placed in the three bits available for system programs in
    143  * the PDPT and PML4 entries.
    144  * @{ */
    145 /** The entry is a permanent one and it's must always be present.
    146  * Never free such an entry. */
    147 #define PGM_PLXFLAGS_PERMANENT          RT_BIT_64(10)
    148 /** Mapping (hypervisor allocated pagetable). */
    149 #define PGM_PLXFLAGS_MAPPING            RT_BIT_64(11)
    150 /** @} */
    151 
    152 /** @name Page directory flags.
    153  * These are placed in the three bits available for system programs in
    154  * the page directory entries.
    155  * @{ */
    156 /** Mapping (hypervisor allocated pagetable). */
    157 #define PGM_PDFLAGS_MAPPING             RT_BIT_64(10)
    158 /** Made read-only to facilitate dirty bit tracking. */
    159 #define PGM_PDFLAGS_TRACK_DIRTY         RT_BIT_64(11)
    160 /** @} */
    161 
    162 /** @name Page flags.
    163  * These are placed in the three bits available for system programs in
    164  * the page entries.
    165  * @{ */
    166 /** Made read-only to facilitate dirty bit tracking. */
    167 #define PGM_PTFLAGS_TRACK_DIRTY         RT_BIT_64(9)
    168 
    169 #ifndef PGM_PTFLAGS_CSAM_VALIDATED
    170 /** Scanned and approved by CSAM (tm).
    171  * NOTE: Must be identical to the one defined in CSAMInternal.h!!
    172  * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
    173 #define PGM_PTFLAGS_CSAM_VALIDATED      RT_BIT_64(11)
    174 #endif
    175 
    176 /** @} */
    177 
    178 /** @name Defines used to indicate the shadow and guest paging in the templates.
    179  * @{ */
    180 #define PGM_TYPE_REAL                   1
    181 #define PGM_TYPE_PROT                   2
    182 #define PGM_TYPE_32BIT                  3
    183 #define PGM_TYPE_PAE                    4
    184 #define PGM_TYPE_AMD64                  5
    185 #define PGM_TYPE_NESTED                 6
    186 #define PGM_TYPE_EPT                    7
    187 #define PGM_TYPE_MAX                    PGM_TYPE_EPT
    188 /** @} */
    189 
    190 /** Macro for checking if the guest is using paging.
    191  * @param uGstType     PGM_TYPE_*
    192  * @param uShwType     PGM_TYPE_*
    193  * @remark  ASSUMES certain order of the PGM_TYPE_* values.
    194  */
    195 #define PGM_WITH_PAGING(uGstType, uShwType)  \
    196     (   (uGstType) >= PGM_TYPE_32BIT \
    197      && (uShwType) != PGM_TYPE_NESTED \
    198      && (uShwType) != PGM_TYPE_EPT)
    199 
    200 /** Macro for checking if the guest supports the NX bit.
    201  * @param uGstType     PGM_TYPE_*
    202  * @param uShwType     PGM_TYPE_*
    203  * @remark  ASSUMES certain order of the PGM_TYPE_* values.
    204  */
    205 #define PGM_WITH_NX(uGstType, uShwType)  \
    206     (   (uGstType) >= PGM_TYPE_PAE \
    207      && (uShwType) != PGM_TYPE_NESTED \
    208      && (uShwType) != PGM_TYPE_EPT)
    209 
    210 
    211 /** @def PGM_HCPHYS_2_PTR
    212  * Maps a HC physical page pool address to a virtual address.
    213  *
    214  * @returns VBox status code.
    215  * @param   pVM     The VM handle.
    216  * @param   HCPhys  The HC physical address to map to a virtual one.
    217  * @param   ppv     Where to store the virtual address. No need to cast this.
    218  *
    219  * @remark  In GC this uses PGMGCDynMapHCPage(), so it will consume of the
    220  *          small page window employeed by that function. Be careful.
    221  * @remark  There is no need to assert on the result.
    222  */
    223 #ifdef IN_RC
    224 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    225      PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
    226 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    227 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    228      pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
    229 #else
    230 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    231      MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
    232 #endif
    233 
    234 /** @def PGM_HCPHYS_2_PTR_BY_PGM
    235  * Maps a HC physical page pool address to a virtual address.
    236  *
    237  * @returns VBox status code.
    238  * @param   pPGM    The PGM instance data.
    239  * @param   HCPhys  The HC physical address to map to a virtual one.
    240  * @param   ppv     Where to store the virtual address. No need to cast this.
    241  *
    242  * @remark  In GC this uses PGMGCDynMapHCPage(), so it will consume of the
    243  *          small page window employeed by that function. Be careful.
    244  * @remark  There is no need to assert on the result.
    245  */
    246 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    247 # define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
    248      pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
    249 #else
    250 # define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
    251      PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
    252 #endif
    253 
    254 /** @def PGM_GCPHYS_2_PTR
    255  * Maps a GC physical page address to a virtual address.
    256  *
    257  * @returns VBox status code.
    258  * @param   pVM     The VM handle.
    259  * @param   GCPhys  The GC physical address to map to a virtual one.
    260  * @param   ppv     Where to store the virtual address. No need to cast this.
    261  *
    262  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    263  *          small page window employeed by that function. Be careful.
    264  * @remark  There is no need to assert on the result.
    265  */
    266 #ifdef IN_RC
    267 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    268      PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
    269 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    270 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    271      pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
    272 #else
    273 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    274      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
    275 #endif
    276 
    277 /** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
    278  * Maps a GC physical page address to a virtual address.
    279  *
    280  * @returns VBox status code.
    281  * @param   pPGM    Pointer to the PGM instance data.
    282  * @param   GCPhys  The GC physical address to map to a virtual one.
    283  * @param   ppv     Where to store the virtual address. No need to cast this.
    284  *
    285  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    286  *          small page window employeed by that function. Be careful.
    287  * @remark  There is no need to assert on the result.
    288  */
    289 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    290 # define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
    291      pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
    292 #else
    293 # define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
    294      PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
    295 #endif
    296 
    297 /** @def PGM_GCPHYS_2_PTR_EX
    298  * Maps a unaligned GC physical page address to a virtual address.
    299  *
    300  * @returns VBox status code.
    301  * @param   pVM     The VM handle.
    302  * @param   GCPhys  The GC physical address to map to a virtual one.
    303  * @param   ppv     Where to store the virtual address. No need to cast this.
    304  *
    305  * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    306  *          small page window employeed by that function. Be careful.
    307  * @remark  There is no need to assert on the result.
    308  */
    309 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    310 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    311      PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
    312 #else
    313 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
    314      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
    315 #endif
    316 
    317 /** @def PGM_INVL_PG
    318  * Invalidates a page.
    319  *
    320  * @param   pVCpu       The VMCPU handle.
    321  * @param   GCVirt      The virtual address of the page to invalidate.
    322  */
    323 #ifdef IN_RC
    324 # define PGM_INVL_PG(pVCpu, GCVirt)             ASMInvalidatePage((void *)(GCVirt))
    325 #elif defined(IN_RING0)
    326 # define PGM_INVL_PG(pVCpu, GCVirt)             HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
    327 #else
    328 # define PGM_INVL_PG(pVCpu, GCVirt)             HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
    329 #endif
    330 
    331 /** @def PGM_INVL_PG_ALL_VCPU
    332  * Invalidates a page on all VCPUs
    333  *
    334  * @param   pVM         The VM handle.
    335  * @param   GCVirt      The virtual address of the page to invalidate.
    336  */
    337 #ifdef IN_RC
    338 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      ASMInvalidatePage((void *)(GCVirt))
    339 #elif defined(IN_RING0)
    340 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
    341 #else
    342 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt)      HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
    343 #endif
    344 
    345 /** @def PGM_INVL_BIG_PG
    346  * Invalidates a 4MB page directory entry.
    347  *
    348  * @param   pVCpu       The VMCPU handle.
    349  * @param   GCVirt      The virtual address within the page directory to invalidate.
    350  */
    351 #ifdef IN_RC
    352 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         ASMReloadCR3()
    353 #elif defined(IN_RING0)
    354 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HWACCMFlushTLB(pVCpu)
    355 #else
    356 # define PGM_INVL_BIG_PG(pVCpu, GCVirt)         HWACCMFlushTLB(pVCpu)
    357 #endif
    358 
    359 /** @def PGM_INVL_VCPU_TLBS()
    360  * Invalidates the TLBs of the specified VCPU
    361  *
    362  * @param   pVCpu       The VMCPU handle.
    363  */
    364 #ifdef IN_RC
    365 # define PGM_INVL_VCPU_TLBS(pVCpu)             ASMReloadCR3()
    366 #elif defined(IN_RING0)
    367 # define PGM_INVL_VCPU_TLBS(pVCpu)             HWACCMFlushTLB(pVCpu)
    368 #else
    369 # define PGM_INVL_VCPU_TLBS(pVCpu)             HWACCMFlushTLB(pVCpu)
    370 #endif
    371 
    372 /** @def PGM_INVL_ALL_VCPU_TLBS()
    373  * Invalidates the TLBs of all VCPUs
    374  *
    375  * @param   pVM         The VM handle.
    376  */
    377 #ifdef IN_RC
    378 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            ASMReloadCR3()
    379 #elif defined(IN_RING0)
    380 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            HWACCMFlushTLBOnAllVCpus(pVM)
    381 #else
    382 # define PGM_INVL_ALL_VCPU_TLBS(pVM)            HWACCMFlushTLBOnAllVCpus(pVM)
    383 #endif
    384 
    385 /** Size of the GCPtrConflict array in PGMMAPPING.
    386  * @remarks Must be a power of two. */
    387 #define PGMMAPPING_CONFLICT_MAX         8
    388 
    389 /**
    390  * Structure for tracking GC Mappings.
    391  *
    392  * This structure is used by linked list in both GC and HC.
    393  */
    394 typedef struct PGMMAPPING
    395 {
    396     /** Pointer to next entry. */
    397     R3PTRTYPE(struct PGMMAPPING *)      pNextR3;
    398     /** Pointer to next entry. */
    399     R0PTRTYPE(struct PGMMAPPING *)      pNextR0;
    400     /** Pointer to next entry. */
    401     RCPTRTYPE(struct PGMMAPPING *)      pNextRC;
    402     /** Indicate whether this entry is finalized. */
    403     bool                                fFinalized;
    404     /** Start Virtual address. */
    405     RTGCPTR                             GCPtr;
    406     /** Last Virtual address (inclusive). */
    407     RTGCPTR                             GCPtrLast;
    408     /** Range size (bytes). */
    409     RTGCPTR                             cb;
    410     /** Pointer to relocation callback function. */
    411     R3PTRTYPE(PFNPGMRELOCATE)           pfnRelocate;
    412     /** User argument to the callback. */
    413     R3PTRTYPE(void *)                   pvUser;
    414     /** Mapping description / name. For easing debugging. */
    415     R3PTRTYPE(const char *)             pszDesc;
    416     /** Last 8 addresses that caused conflicts. */
    417     RTGCPTR                             aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
    418     /** Number of conflicts for this hypervisor mapping. */
    419     uint32_t                            cConflicts;
    420     /** Number of page tables. */
    421     uint32_t                            cPTs;
    422 
    423     /** Array of page table mapping data. Each entry
    424      * describes one page table. The array can be longer
    425      * than the declared length.
    426      */
    427     struct
    428     {
    429         /** The HC physical address of the page table. */
    430         RTHCPHYS                        HCPhysPT;
    431         /** The HC physical address of the first PAE page table. */
    432         RTHCPHYS                        HCPhysPaePT0;
    433         /** The HC physical address of the second PAE page table. */
    434         RTHCPHYS                        HCPhysPaePT1;
    435         /** The HC virtual address of the 32-bit page table. */
    436         R3PTRTYPE(PX86PT)               pPTR3;
    437         /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
    438         R3PTRTYPE(PX86PTPAE)            paPaePTsR3;
    439         /** The RC virtual address of the 32-bit page table. */
    440         RCPTRTYPE(PX86PT)               pPTRC;
    441         /** The RC virtual address of the two PAE page table. */
    442         RCPTRTYPE(PX86PTPAE)            paPaePTsRC;
    443         /** The R0 virtual address of the 32-bit page table. */
    444         R0PTRTYPE(PX86PT)               pPTR0;
    445         /** The R0 virtual address of the two PAE page table. */
    446         R0PTRTYPE(PX86PTPAE)            paPaePTsR0;
    447     } aPTs[1];
    448 } PGMMAPPING;
    449 /** Pointer to structure for tracking GC Mappings. */
    450 typedef struct PGMMAPPING *PPGMMAPPING;
    451 
    452 
    453 /**
    454  * Physical page access handler structure.
    455  *
    456  * This is used to keep track of physical address ranges
    457  * which are being monitored in some kind of way.
    458  */
    459 typedef struct PGMPHYSHANDLER
    460 {
    461     AVLROGCPHYSNODECORE                 Core;
    462     /** Access type. */
    463     PGMPHYSHANDLERTYPE                  enmType;
    464     /** Number of pages to update. */
    465     uint32_t                            cPages;
    466     /** Pointer to R3 callback function. */
    467     R3PTRTYPE(PFNPGMR3PHYSHANDLER)      pfnHandlerR3;
    468     /** User argument for R3 handlers. */
    469     R3PTRTYPE(void *)                   pvUserR3;
    470     /** Pointer to R0 callback function. */
    471     R0PTRTYPE(PFNPGMR0PHYSHANDLER)      pfnHandlerR0;
    472     /** User argument for R0 handlers. */
    473     R0PTRTYPE(void *)                   pvUserR0;
    474     /** Pointer to RC callback function. */
    475     RCPTRTYPE(PFNPGMRCPHYSHANDLER)      pfnHandlerRC;
    476     /** User argument for RC handlers. */
    477     RCPTRTYPE(void *)                   pvUserRC;
    478     /** Description / Name. For easing debugging. */
    479     R3PTRTYPE(const char *)             pszDesc;
    480 #ifdef VBOX_WITH_STATISTICS
    481     /** Profiling of this handler. */
    482     STAMPROFILE                         Stat;
    483 #endif
    484 } PGMPHYSHANDLER;
    485 /** Pointer to a physical page access handler structure. */
    486 typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
    487 
    488 
    489 /**
    490  * Cache node for the physical addresses covered by a virtual handler.
    491  */
    492 typedef struct PGMPHYS2VIRTHANDLER
    493 {
    494     /** Core node for the tree based on physical ranges. */
    495     AVLROGCPHYSNODECORE                 Core;
    496     /** Offset from this struct to the PGMVIRTHANDLER structure. */
    497     int32_t                             offVirtHandler;
    498     /** Offset of the next alias relative to this one.
    499      * Bit 0 is used for indicating whether we're in the tree.
    500      * Bit 1 is used for indicating that we're the head node.
    501      */
    502     int32_t                             offNextAlias;
    503 } PGMPHYS2VIRTHANDLER;
    504 /** Pointer to a phys to virtual handler structure. */
    505 typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
    506 
    507 /** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
    508  * node is in the tree. */
    509 #define PGMPHYS2VIRTHANDLER_IN_TREE     RT_BIT(0)
    510 /** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
    511  * node is in the head of an alias chain.
    512  * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
    513 #define PGMPHYS2VIRTHANDLER_IS_HEAD     RT_BIT(1)
    514 /** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
    515 #define PGMPHYS2VIRTHANDLER_OFF_MASK    (~(int32_t)3)
    516 
    517 
    518 /**
    519  * Virtual page access handler structure.
    520  *
    521  * This is used to keep track of virtual address ranges
    522  * which are being monitored in some kind of way.
    523  */
    524 typedef struct PGMVIRTHANDLER
    525 {
    526     /** Core node for the tree based on virtual ranges. */
    527     AVLROGCPTRNODECORE                  Core;
    528     /** Size of the range (in bytes). */
    529     RTGCPTR                             cb;
    530     /** Number of cache pages. */
    531     uint32_t                            cPages;
    532     /** Access type. */
    533     PGMVIRTHANDLERTYPE                  enmType;
    534     /** Pointer to the RC callback function. */
    535     RCPTRTYPE(PFNPGMRCVIRTHANDLER)      pfnHandlerRC;
    536 #if HC_ARCH_BITS == 64
    537     RTRCPTR                             padding;
    538 #endif
    539     /** Pointer to the R3 callback function for invalidation. */
    540     R3PTRTYPE(PFNPGMR3VIRTINVALIDATE)   pfnInvalidateR3;
    541     /** Pointer to the R3 callback function. */
    542     R3PTRTYPE(PFNPGMR3VIRTHANDLER)      pfnHandlerR3;
    543     /** Description / Name. For easing debugging. */
    544     R3PTRTYPE(const char *)             pszDesc;
    545 #ifdef VBOX_WITH_STATISTICS
    546     /** Profiling of this handler. */
    547     STAMPROFILE                         Stat;
    548 #endif
    549     /** Array of cached physical addresses for the monitored ranged.  */
    550     PGMPHYS2VIRTHANDLER                 aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
    551 } PGMVIRTHANDLER;
    552 /** Pointer to a virtual page access handler structure. */
    553 typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
    554 
    555 
    556 /**
    557  * Page type.
    558  *
    559  * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
    560  * @remarks This is used in the saved state, so changes to it requires bumping
    561  *          the saved state version.
    562  * @todo    So, convert to \#defines!
    563  */
    564 typedef enum PGMPAGETYPE
    565 {
    566     /** The usual invalid zero entry. */
    567     PGMPAGETYPE_INVALID = 0,
    568     /** RAM page. (RWX) */
    569     PGMPAGETYPE_RAM,
    570     /** MMIO2 page. (RWX) */
    571     PGMPAGETYPE_MMIO2,
    572     /** MMIO2 page aliased over an MMIO page. (RWX)
    573      * See PGMHandlerPhysicalPageAlias(). */
    574     PGMPAGETYPE_MMIO2_ALIAS_MMIO,
    575     /** Shadowed ROM. (RWX) */
    576     PGMPAGETYPE_ROM_SHADOW,
    577     /** ROM page. (R-X) */
    578     PGMPAGETYPE_ROM,
    579     /** MMIO page. (---) */
    580     PGMPAGETYPE_MMIO,
    581     /** End of valid entries. */
    582     PGMPAGETYPE_END
    583 } PGMPAGETYPE;
    584 AssertCompile(PGMPAGETYPE_END <= 7);
    585 
    586 /** @name Page type predicates.
    587  * @{ */
    588 #define PGMPAGETYPE_IS_READABLE(type)   ( (type) <= PGMPAGETYPE_ROM )
    589 #define PGMPAGETYPE_IS_WRITEABLE(type)  ( (type) <= PGMPAGETYPE_ROM_SHADOW )
    590 #define PGMPAGETYPE_IS_RWX(type)        ( (type) <= PGMPAGETYPE_ROM_SHADOW )
    591 #define PGMPAGETYPE_IS_ROX(type)        ( (type) == PGMPAGETYPE_ROM )
    592 #define PGMPAGETYPE_IS_NP(type)         ( (type) == PGMPAGETYPE_MMIO )
    593 /** @} */
    594 
    595 
    596 /**
    597  * A Physical Guest Page tracking structure.
    598  *
    599  * The format of this structure is complicated because we have to fit a lot
    600  * of information into as few bits as possible. The format is also subject
    601  * to change (there is one comming up soon). Which means that for we'll be
    602  * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
    603  * accesses to the structure.
    604  */
    605 typedef struct PGMPAGE
    606 {
    607     /** The physical address and the Page ID. */
    608     RTHCPHYS    HCPhysAndPageID;
    609     /** Combination of:
    610      *  - [0-7]: u2HandlerPhysStateY - the physical handler state
    611      *    (PGM_PAGE_HNDL_PHYS_STATE_*).
    612      *  - [8-9]: u2HandlerVirtStateY - the virtual handler state
    613      *    (PGM_PAGE_HNDL_VIRT_STATE_*).
    614      *  - [15]:  fWrittenToY - flag indicating that a write monitored page was
    615      *    written to when set.
    616      *  - [10-14]: 5 unused bits.
    617      * @remarks Warning! All accesses to the bits are hardcoded.
    618      *
    619      * @todo    Change this to a union with both bitfields, u8 and u accessors.
    620      *          That'll help deal with some of the hardcoded accesses.
    621      *
    622      * @todo    Include uStateY and uTypeY as well so it becomes 32-bit.  This
    623      *          will make it possible to turn some of the 16-bit accesses into
    624      *          32-bit ones, which may be efficient (stalls).
    625      */
    626     RTUINT16U   u16MiscY;
    627     /** The page state.
    628      * Only 2 bits are really needed for this. */
    629     uint8_t     uStateY;
    630     /** The page type (PGMPAGETYPE).
    631      * Only 3 bits are really needed for this. */
    632     uint8_t     uTypeY;
    633     /** Usage tracking (page pool). */
    634     uint16_t    u16TrackingY;
    635     /** The number of read locks on this page. */
    636     uint8_t     cReadLocksY;
    637     /** The number of write locks on this page. */
    638     uint8_t     cWriteLocksY;
    639 } PGMPAGE;
    640 AssertCompileSize(PGMPAGE, 16);
    641 /** Pointer to a physical guest page. */
    642 typedef PGMPAGE *PPGMPAGE;
    643 /** Pointer to a const physical guest page. */
    644 typedef const PGMPAGE *PCPGMPAGE;
    645 /** Pointer to a physical guest page pointer. */
    646 typedef PPGMPAGE *PPPGMPAGE;
    647 
    648 
    649 /**
    650  * Clears the page structure.
    651  * @param   pPage       Pointer to the physical guest page tracking structure.
    652  */
    653 #define PGM_PAGE_CLEAR(pPage) \
    654     do { \
    655         (pPage)->HCPhysAndPageID     = 0; \
    656         (pPage)->uStateY             = 0; \
    657         (pPage)->uTypeY              = 0; \
    658         (pPage)->u16MiscY.u          = 0; \
    659         (pPage)->u16TrackingY        = 0; \
    660         (pPage)->cReadLocksY         = 0; \
    661         (pPage)->cWriteLocksY        = 0; \
    662     } while (0)
    663 
    664 /**
    665  * Initializes the page structure.
    666  * @param   pPage       Pointer to the physical guest page tracking structure.
    667  */
    668 #define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
    669     do { \
    670         RTHCPHYS SetHCPhysTmp = (_HCPhys); \
    671         AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
    672         (pPage)->HCPhysAndPageID     = (SetHCPhysTmp << (28-12)) | ((_idPage) & UINT32_C(0x0fffffff)); \
    673         (pPage)->uStateY             = (_uState); \
    674         (pPage)->uTypeY              = (_uType); \
    675         (pPage)->u16MiscY.u          = 0; \
    676         (pPage)->u16TrackingY        = 0; \
    677         (pPage)->cReadLocksY         = 0; \
    678         (pPage)->cWriteLocksY        = 0; \
    679     } while (0)
    680 
    681 /**
    682  * Initializes the page structure of a ZERO page.
    683  * @param   pPage       Pointer to the physical guest page tracking structure.
    684  * @param   pVM         The VM handle (for getting the zero page address).
    685  * @param   uType       The page type (PGMPAGETYPE).
    686  */
    687 #define PGM_PAGE_INIT_ZERO(pPage, pVM, uType)  \
    688     PGM_PAGE_INIT((pPage), (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (uType), PGM_PAGE_STATE_ZERO)
    689 
    690 
    691 /** @name The Page state, PGMPAGE::uStateY.
    692  * @{ */
    693 /** The zero page.
    694  * This is a per-VM page that's never ever mapped writable. */
    695 #define PGM_PAGE_STATE_ZERO             0
    696 /** A allocated page.
    697  * This is a per-VM page allocated from the page pool (or wherever
    698  * we get MMIO2 pages from if the type is MMIO2).
    699  */
    700 #define PGM_PAGE_STATE_ALLOCATED        1
    701 /** A allocated page that's being monitored for writes.
    702  * The shadow page table mappings are read-only. When a write occurs, the
    703  * fWrittenTo member is set, the page remapped as read-write and the state
    704  * moved back to allocated. */
    705 #define PGM_PAGE_STATE_WRITE_MONITORED  2
    706 /** The page is shared, aka. copy-on-write.
    707  * This is a page that's shared with other VMs. */
    708 #define PGM_PAGE_STATE_SHARED           3
    709 /** @} */
    710 
    711 
    712 /**
    713  * Gets the page state.
    714  * @returns page state (PGM_PAGE_STATE_*).
    715  * @param   pPage       Pointer to the physical guest page tracking structure.
    716  */
    717 #define PGM_PAGE_GET_STATE(pPage)           ( (pPage)->uStateY )
    718 
    719 /**
    720  * Sets the page state.
    721  * @param   pPage       Pointer to the physical guest page tracking structure.
    722  * @param   _uState     The new page state.
    723  */
    724 #define PGM_PAGE_SET_STATE(pPage, _uState)  do { (pPage)->uStateY = (_uState); } while (0)
    725 
    726 
    727 /**
    728  * Gets the host physical address of the guest page.
    729  * @returns host physical address (RTHCPHYS).
    730  * @param   pPage       Pointer to the physical guest page tracking structure.
    731  */
    732 #define PGM_PAGE_GET_HCPHYS(pPage)          ( ((pPage)->HCPhysAndPageID >> 28) << 12 )
    733 
    734 /**
    735  * Sets the host physical address of the guest page.
    736  * @param   pPage       Pointer to the physical guest page tracking structure.
    737  * @param   _HCPhys     The new host physical address.
    738  */
    739 #define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
    740     do { \
    741         RTHCPHYS SetHCPhysTmp = (_HCPhys); \
    742         AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
    743         (pPage)->HCPhysAndPageID = ((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) \
    744                                  | (SetHCPhysTmp << (28-12)); \
    745     } while (0)
    746 
    747 /**
    748  * Get the Page ID.
    749  * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
    750  * @param   pPage       Pointer to the physical guest page tracking structure.
    751  */
    752 #define PGM_PAGE_GET_PAGEID(pPage)          (  (uint32_t)((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) )
    753 
    754 /**
    755  * Sets the Page ID.
    756  * @param   pPage       Pointer to the physical guest page tracking structure.
    757  */
    758 #define PGM_PAGE_SET_PAGEID(pPage, _idPage) \
    759     do { \
    760         (pPage)->HCPhysAndPageID = (((pPage)->HCPhysAndPageID) & UINT64_C(0xfffffffff0000000)) \
    761                                  | ((_idPage) & UINT32_C(0x0fffffff)); \
    762     } while (0)
    763 
    764 /**
    765  * Get the Chunk ID.
    766  * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
    767  * @param   pPage       Pointer to the physical guest page tracking structure.
    768  */
    769 #define PGM_PAGE_GET_CHUNKID(pPage)         ( PGM_PAGE_GET_PAGEID(pPage) >> GMM_CHUNKID_SHIFT )
    770 
    771 /**
    772  * Get the index of the page within the allocation chunk.
    773  * @returns The page index.
    774  * @param   pPage       Pointer to the physical guest page tracking structure.
    775  */
    776 #define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage)   ( (uint32_t)((pPage)->HCPhysAndPageID & GMM_PAGEID_IDX_MASK) )
    777 
    778 /**
    779  * Gets the page type.
    780  * @returns The page type.
    781  * @param   pPage       Pointer to the physical guest page tracking structure.
    782  */
    783 #define PGM_PAGE_GET_TYPE(pPage)            (pPage)->uTypeY
    784 
    785 /**
    786  * Sets the page type.
    787  * @param   pPage       Pointer to the physical guest page tracking structure.
    788  * @param   _enmType    The new page type (PGMPAGETYPE).
    789  */
    790 #define PGM_PAGE_SET_TYPE(pPage, _enmType)  do { (pPage)->uTypeY = (_enmType); } while (0)
    791 
    792 /**
    793  * Checks if the page is marked for MMIO.
    794  * @returns true/false.
    795  * @param   pPage       Pointer to the physical guest page tracking structure.
    796  */
    797 #define PGM_PAGE_IS_MMIO(pPage)             ( (pPage)->uTypeY == PGMPAGETYPE_MMIO )
    798 
    799 /**
    800  * Checks if the page is backed by the ZERO page.
    801  * @returns true/false.
    802  * @param   pPage       Pointer to the physical guest page tracking structure.
    803  */
    804 #define PGM_PAGE_IS_ZERO(pPage)             ( (pPage)->uStateY == PGM_PAGE_STATE_ZERO )
    805 
    806 /**
    807  * Checks if the page is backed by a SHARED page.
    808  * @returns true/false.
    809  * @param   pPage       Pointer to the physical guest page tracking structure.
    810  */
    811 #define PGM_PAGE_IS_SHARED(pPage)           ( (pPage)->uStateY == PGM_PAGE_STATE_SHARED )
    812 
    813 
    814 /**
    815  * Marks the paget as written to (for GMM change monitoring).
    816  * @param   pPage       Pointer to the physical guest page tracking structure.
    817  */
    818 #define PGM_PAGE_SET_WRITTEN_TO(pPage)      do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x80); } while (0)
    819 
    820 /**
    821  * Clears the written-to indicator.
    822  * @param   pPage       Pointer to the physical guest page tracking structure.
    823  */
    824 #define PGM_PAGE_CLEAR_WRITTEN_TO(pPage)    do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0x7f); } while (0)
    825 
    826 /**
    827  * Checks if the page was marked as written-to.
    828  * @returns true/false.
    829  * @param   pPage       Pointer to the physical guest page tracking structure.
    830  */
    831 #define PGM_PAGE_IS_WRITTEN_TO(pPage)       ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
    832 
    833 
    834 /** Enabled optimized access handler tests.
    835  * These optimizations makes ASSUMPTIONS about the state values and the u16MiscY
    836  * layout.  When enabled, the compiler should normally generate more compact
    837  * code.
    838  */
    839 #define PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS 1
    840 
    841 /** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
    842  *
    843  * @remarks The values are assigned in order of priority, so we can calculate
    844  *          the correct state for a page with different handlers installed.
    845  * @{ */
    846 /** No handler installed. */
    847 #define PGM_PAGE_HNDL_PHYS_STATE_NONE       0
    848 /** Monitoring is temporarily disabled. */
    849 #define PGM_PAGE_HNDL_PHYS_STATE_DISABLED   1
    850 /** Write access is monitored. */
    851 #define PGM_PAGE_HNDL_PHYS_STATE_WRITE      2
    852 /** All access is monitored. */
    853 #define PGM_PAGE_HNDL_PHYS_STATE_ALL        3
    854 /** @} */
    855 
    856 /**
    857  * Gets the physical access handler state of a page.
    858  * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
    859  * @param   pPage       Pointer to the physical guest page tracking structure.
    860  */
    861 #define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)  \
    862     ( (pPage)->u16MiscY.au8[0] )
    863 
    864 /**
    865  * Sets the physical access handler state of a page.
    866  * @param   pPage       Pointer to the physical guest page tracking structure.
    867  * @param   _uState     The new state value.
    868  */
    869 #define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
    870     do { (pPage)->u16MiscY.au8[0] = (_uState); } while (0)
    871 
    872 /**
    873  * Checks if the page has any physical access handlers, including temporariliy disabled ones.
    874  * @returns true/false
    875  * @param   pPage       Pointer to the physical guest page tracking structure.
    876  */
    877 #define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) \
    878     ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
    879 
    880 /**
    881  * Checks if the page has any active physical access handlers.
    882  * @returns true/false
    883  * @param   pPage       Pointer to the physical guest page tracking structure.
    884  */
    885 #define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) \
    886     ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
    887 
    888 
    889 /** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateY).
    890  *
    891  * @remarks The values are assigned in order of priority, so we can calculate
    892  *          the correct state for a page with different handlers installed.
    893  * @{ */
    894 /** No handler installed. */
    895 #define PGM_PAGE_HNDL_VIRT_STATE_NONE       0
    896 /* 1 is reserved so the lineup is identical with the physical ones. */
    897 /** Write access is monitored. */
    898 #define PGM_PAGE_HNDL_VIRT_STATE_WRITE      2
    899 /** All access is monitored. */
    900 #define PGM_PAGE_HNDL_VIRT_STATE_ALL        3
    901 /** @} */
    902 
    903 /**
    904  * Gets the virtual access handler state of a page.
    905  * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
    906  * @param   pPage       Pointer to the physical guest page tracking structure.
    907  */
    908 #define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u16MiscY.au8[1] & UINT8_C(0x03) )
    909 
    910 /**
    911  * Sets the virtual access handler state of a page.
    912  * @param   pPage       Pointer to the physical guest page tracking structure.
    913  * @param   _uState     The new state value.
    914  */
    915 #define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
    916     do { \
    917         (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0xfc)) \
    918                                  | ((_uState)                & UINT8_C(0x03)); \
    919     } while (0)
    920 
    921 /**
    922  * Checks if the page has any virtual access handlers.
    923  * @returns true/false
    924  * @param   pPage       Pointer to the physical guest page tracking structure.
    925  */
    926 #define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) \
    927     ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
    928 
    929 /**
    930  * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
    931  * virtual handlers.
    932  * @returns true/false
    933  * @param   pPage       Pointer to the physical guest page tracking structure.
    934  */
    935 #define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) \
    936     PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
    937 
    938 
    939 /**
    940  * Checks if the page has any access handlers, including temporarily disabled ones.
    941  * @returns true/false
    942  * @param   pPage       Pointer to the physical guest page tracking structure.
    943  */
    944 #ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
    945 # define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
    946     ( ((pPage)->u16MiscY.u & UINT16_C(0x0303)) != 0 )
    947 #else
    948 # define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
    949     (   PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE \
    950      || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
    951 #endif
    952 
    953 /**
    954  * Checks if the page has any active access handlers.
    955  * @returns true/false
    956  * @param   pPage       Pointer to the physical guest page tracking structure.
    957  */
    958 #ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
    959 # define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
    960     ( ((pPage)->u16MiscY.u & UINT16_C(0x0202)) != 0 )
    961 #else
    962 # define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
    963     (   PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
    964      || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
    965 #endif
    966 
    967 /**
    968  * Checks if the page has any active access handlers catching all accesses.
    969  * @returns true/false
    970  * @param   pPage       Pointer to the physical guest page tracking structure.
    971  */
    972 #ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
    973 # define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
    974     (   ( ((pPage)->u16MiscY.au8[0] | (pPage)->u16MiscY.au8[1]) & UINT8_C(0x3) ) \
    975      == PGM_PAGE_HNDL_PHYS_STATE_ALL )
    976 #else
    977 # define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
    978     (   PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL \
    979      || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL )
    980 #endif
    981 
    982 
    983 /** @def PGM_PAGE_GET_TRACKING
    984  * Gets the packed shadow page pool tracking data associated with a guest page.
    985  * @returns uint16_t containing the data.
    986  * @param   pPage       Pointer to the physical guest page tracking structure.
    987  */
    988 #define PGM_PAGE_GET_TRACKING(pPage)        ( (pPage)->u16TrackingY )
    989 
    990 /** @def PGM_PAGE_SET_TRACKING
    991  * Sets the packed shadow page pool tracking data associated with a guest page.
    992  * @param   pPage               Pointer to the physical guest page tracking structure.
    993  * @param   u16TrackingData     The tracking data to store.
    994  */
    995 #define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
    996     do { (pPage)->u16TrackingY = (u16TrackingData); } while (0)
    997 
    998 /** @def PGM_PAGE_GET_TD_CREFS
    999  * Gets the @a cRefs tracking data member.
    1000  * @returns cRefs.
    1001  * @param   pPage               Pointer to the physical guest page tracking structure.
    1002  */
    1003 #define PGM_PAGE_GET_TD_CREFS(pPage) \
    1004     ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
    1005 
    1006 /** @def PGM_PAGE_GET_TD_IDX
    1007  * Gets the @a idx tracking data member.
    1008  * @returns idx.
    1009  * @param   pPage               Pointer to the physical guest page tracking structure.
    1010  */
    1011 #define PGM_PAGE_GET_TD_IDX(pPage) \
    1012     ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT)   & PGMPOOL_TD_IDX_MASK)
    1013 
    1014 
    1015 /** Max number of locks on a page. */
    1016 #define PGM_PAGE_MAX_LOCKS                  UINT8_C(254)
    1017 
    1018 /** Get the read lock count.
    1019  * @returns count.
    1020  * @param   pPage               Pointer to the physical guest page tracking structure.
    1021  */
    1022 #define PGM_PAGE_GET_READ_LOCKS(pPage)      ( (pPage)->cReadLocksY )
    1023 
    1024 /** Get the write lock count.
    1025  * @returns count.
    1026  * @param   pPage               Pointer to the physical guest page tracking structure.
    1027  */
    1028 #define PGM_PAGE_GET_WRITE_LOCKS(pPage)     ( (pPage)->cWriteLocksY )
    1029 
    1030 /** Decrement the read lock counter.
    1031  * @param   pPage               Pointer to the physical guest page tracking structure.
    1032  */
    1033 #define PGM_PAGE_DEC_READ_LOCKS(pPage)      do { --(pPage)->cReadLocksY; } while (0)
    1034 
    1035 /** Decrement the write lock counter.
    1036  * @param   pPage               Pointer to the physical guest page tracking structure.
    1037  */
    1038 #define PGM_PAGE_DEC_WRITE_LOCKS(pPage)     do { --(pPage)->cWriteLocksY; } while (0)
    1039 
    1040 /** Increment the read lock counter.
    1041  * @param   pPage               Pointer to the physical guest page tracking structure.
    1042  */
    1043 #define PGM_PAGE_INC_READ_LOCKS(pPage)      do { ++(pPage)->cReadLocksY; } while (0)
    1044 
    1045 /** Increment the write lock counter.
    1046  * @param   pPage               Pointer to the physical guest page tracking structure.
    1047  */
    1048 #define PGM_PAGE_INC_WRITE_LOCKS(pPage)     do { ++(pPage)->cWriteLocksY; } while (0)
    1049 
    1050 
    1051 #if 0
    1052 /** Enables sanity checking of write monitoring using CRC-32.  */
    1053 # define PGMLIVESAVERAMPAGE_WITH_CRC32
    1054 #endif
    1055 
    1056 /**
    1057  * Per page live save tracking data.
    1058  */
    1059 typedef struct PGMLIVESAVERAMPAGE
    1060 {
    1061     /** Number of times it has been dirtied. */
    1062     uint32_t    cDirtied : 24;
    1063     /** Whether it is currently dirty. */
    1064     uint32_t    fDirty : 1;
    1065     /** Ignore the page.
    1066      *  This is used for pages that has been MMIO, MMIO2 or ROM pages once.  We will
    1067      *  deal with these after pausing the VM and DevPCI have said it bit about
    1068      *  remappings. */
    1069     uint32_t    fIgnore : 1;
    1070     /** Was a ZERO page last time around. */
    1071     uint32_t    fZero : 1;
    1072     /** Was a SHARED page last time around. */
    1073     uint32_t    fShared : 1;
    1074     /** Whether the page is/was write monitored in a previous pass. */
    1075     uint32_t    fWriteMonitored : 1;
    1076     /** Whether the page is/was write monitored earlier in this pass. */
    1077     uint32_t    fWriteMonitoredJustNow : 1;
    1078     /** Bits reserved for future use.  */
    1079     uint32_t    u2Reserved : 2;
    1080 #ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
    1081     /** CRC-32 for the page. This is for internal consistency checks.  */
    1082     uint32_t    u32Crc;
    1083 #endif
    1084 } PGMLIVESAVERAMPAGE;
    1085 #ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
    1086 AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
    1087 #else
    1088 AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
    1089 #endif
    1090 /** Pointer to the per page live save tracking data. */
    1091 typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
    1092 
    1093 /** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
    1094 #define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
    1095 
    1096 
    1097 /**
    1098  * Ram range for GC Phys to HC Phys conversion.
    1099  *
    1100  * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
    1101  * conversions too, but we'll let MM handle that for now.
    1102  *
    1103  * This structure is used by linked lists in both GC and HC.
    1104  */
    1105 typedef struct PGMRAMRANGE
    1106 {
    1107     /** Start of the range. Page aligned. */
    1108     RTGCPHYS                            GCPhys;
    1109     /** Size of the range. (Page aligned of course). */
    1110     RTGCPHYS                            cb;
    1111     /** Pointer to the next RAM range - for R3. */
    1112     R3PTRTYPE(struct PGMRAMRANGE *)     pNextR3;
    1113     /** Pointer to the next RAM range - for R0. */
    1114     R0PTRTYPE(struct PGMRAMRANGE *)     pNextR0;
    1115     /** Pointer to the next RAM range - for RC. */
    1116     RCPTRTYPE(struct PGMRAMRANGE *)     pNextRC;
    1117     /** PGM_RAM_RANGE_FLAGS_* flags. */
    1118     uint32_t                            fFlags;
    1119     /** Last address in the range (inclusive). Page aligned (-1). */
    1120     RTGCPHYS                            GCPhysLast;
    1121     /** Start of the HC mapping of the range. This is only used for MMIO2. */
    1122     R3PTRTYPE(void *)                   pvR3;
    1123     /** Live save per page tracking data. */
    1124     R3PTRTYPE(PPGMLIVESAVERAMPAGE)         paLSPages;
    1125     /** The range description. */
    1126     R3PTRTYPE(const char *)             pszDesc;
    1127     /** Pointer to self - R0 pointer. */
    1128     R0PTRTYPE(struct PGMRAMRANGE *)     pSelfR0;
    1129     /** Pointer to self - RC pointer. */
    1130     RCPTRTYPE(struct PGMRAMRANGE *)     pSelfRC;
    1131     /** Padding to make aPage aligned on sizeof(PGMPAGE). */
    1132     uint32_t                            au32Alignment2[HC_ARCH_BITS == 32 ? 1 : 3];
    1133     /** Array of physical guest page tracking structures. */
    1134     PGMPAGE                             aPages[1];
    1135 } PGMRAMRANGE;
    1136 /** Pointer to Ram range for GC Phys to HC Phys conversion. */
    1137 typedef PGMRAMRANGE *PPGMRAMRANGE;
    1138 
    1139 /** @name PGMRAMRANGE::fFlags
    1140  * @{ */
    1141 /** The RAM range is floating around as an independent guest mapping. */
    1142 #define PGM_RAM_RANGE_FLAGS_FLOATING        RT_BIT(20)
    1143 /** Ad hoc RAM range for an ROM mapping. */
    1144 #define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM      RT_BIT(21)
    1145 /** Ad hoc RAM range for an MMIO mapping. */
    1146 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO     RT_BIT(22)
    1147 /** Ad hoc RAM range for an MMIO2 mapping. */
    1148 #define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2    RT_BIT(23)
    1149 /** @} */
    1150 
    1151 /** Tests if a RAM range is an ad hoc one or not.
    1152  * @returns true/false.
    1153  * @param   pRam    The RAM range.
    1154  */
    1155 #define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
    1156     (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
    1157 
    1158 
    1159 /**
    1160  * Per page tracking structure for ROM image.
    1161  *
    1162  * A ROM image may have a shadow page, in which case we may have two pages
    1163  * backing it.  This structure contains the PGMPAGE for both while
    1164  * PGMRAMRANGE have a copy of the active one.  It is important that these
    1165  * aren't out of sync in any regard other than page pool tracking data.
    1166  */
    1167 typedef struct PGMROMPAGE
    1168 {
    1169     /** The page structure for the virgin ROM page. */
    1170     PGMPAGE     Virgin;
    1171     /** The page structure for the shadow RAM page. */
    1172     PGMPAGE     Shadow;
    1173     /** The current protection setting. */
    1174     PGMROMPROT  enmProt;
    1175     /** Live save status information. Makes use of unused alignment space. */
    1176     struct
    1177     {
    1178         /** The previous protection value. */
    1179         uint8_t u8Prot;
    1180         /** Written to flag set by the handler. */
    1181         bool    fWrittenTo;
    1182         /** Whether the shadow page is dirty or not. */
    1183         bool    fDirty;
    1184         /** Whether it was dirtied in the recently. */
    1185         bool    fDirtiedRecently;
    1186     } LiveSave;
    1187 } PGMROMPAGE;
    1188 AssertCompileSizeAlignment(PGMROMPAGE, 8);
    1189 /** Pointer to a ROM page tracking structure. */
    1190 typedef PGMROMPAGE *PPGMROMPAGE;
    1191 
    1192 
    1193 /**
    1194  * A registered ROM image.
    1195  *
    1196  * This is needed to keep track of ROM image since they generally intrude
    1197  * into a PGMRAMRANGE.  It also keeps track of additional info like the
    1198  * two page sets (read-only virgin and read-write shadow), the current
    1199  * state of each page.
    1200  *
    1201  * Because access handlers cannot easily be executed in a different
    1202  * context, the ROM ranges needs to be accessible and in all contexts.
    1203  */
    1204 typedef struct PGMROMRANGE
    1205 {
    1206     /** Pointer to the next range - R3. */
    1207     R3PTRTYPE(struct PGMROMRANGE *)     pNextR3;
    1208     /** Pointer to the next range - R0. */
    1209     R0PTRTYPE(struct PGMROMRANGE *)     pNextR0;
    1210     /** Pointer to the next range - RC. */
    1211     RCPTRTYPE(struct PGMROMRANGE *)     pNextRC;
    1212     /** Pointer alignment */
    1213     RTRCPTR                             RCPtrAlignment;
    1214     /** Address of the range. */
    1215     RTGCPHYS                            GCPhys;
    1216     /** Address of the last byte in the range. */
    1217     RTGCPHYS                            GCPhysLast;
    1218     /** Size of the range. */
    1219     RTGCPHYS                            cb;
    1220     /** The flags (PGMPHYS_ROM_FLAGS_*). */
    1221     uint32_t                            fFlags;
    1222     /** The saved state range ID. */
    1223     uint8_t                             idSavedState;
    1224     /** Alignment padding. */
    1225     uint8_t                             au8Alignment[3];
    1226     /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
    1227     uint32_t                            au32Alignemnt[HC_ARCH_BITS == 32 ? 6 : 2];
    1228     /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
    1229      * This is used for strictness checks. */
    1230     R3PTRTYPE(const void *)             pvOriginal;
    1231     /** The ROM description. */
    1232     R3PTRTYPE(const char *)             pszDesc;
    1233     /** The per page tracking structures. */
    1234     PGMROMPAGE                          aPages[1];
    1235 } PGMROMRANGE;
    1236 /** Pointer to a ROM range. */
    1237 typedef PGMROMRANGE *PPGMROMRANGE;
    1238 
    1239 
    1240 /**
    1241  * Live save per page data for an MMIO2 page.
    1242  *
    1243  * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
    1244  * of MMIO2 pages.  The current approach is using some optimisitic SHA-1 +
    1245  * CRC-32 for detecting changes as well as special handling of zero pages.  This
    1246  * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
    1247  * for speeding things up.  (We're using SHA-1 and not SHA-256 or SHA-512
    1248  * because of speed (2.5x and 6x slower).)
    1249  *
    1250  * @todo Implement dirty MMIO2 page reporting that can be enabled during live
    1251  *       save but normally is disabled.  Since we can write monitore guest
    1252  *       accesses on our own, we only need this for host accesses.  Shouldn't be
    1253  *       too difficult for DevVGA, VMMDev might be doable, the planned
    1254  *       networking fun will be fun since it involves ring-0.
    1255  */
    1256 typedef struct PGMLIVESAVEMMIO2PAGE
    1257 {
    1258     /** Set if the page is considered dirty. */
    1259     bool        fDirty;
    1260     /** The number of scans this page has remained unchanged for.
    1261      * Only updated for dirty pages. */
    1262     uint8_t     cUnchangedScans;
    1263     /** Whether this page was zero at the last scan. */
    1264     bool        fZero;
    1265     /** Alignment padding. */
    1266     bool        fReserved;
    1267     /** CRC-32 for the first half of the page.
    1268      * This is used together with u32CrcH2 to quickly detect changes in the page
    1269      * during the non-final passes.  */
    1270     uint32_t    u32CrcH1;
    1271     /** CRC-32 for the second half of the page. */
    1272     uint32_t    u32CrcH2;
    1273     /** SHA-1 for the saved page.
    1274      * This is used in the final pass to skip pages without changes. */
    1275     uint8_t     abSha1Saved[RTSHA1_HASH_SIZE];
    1276 } PGMLIVESAVEMMIO2PAGE;
    1277 /** Pointer to a live save status data for an MMIO2 page. */
    1278 typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
    1279 
    1280 /**
    1281  * A registered MMIO2 (= Device RAM) range.
    1282  *
    1283  * There are a few reason why we need to keep track of these
    1284  * registrations.  One of them is the deregistration & cleanup stuff,
    1285  * while another is that the PGMRAMRANGE associated with such a region may
    1286  * have to be removed from the ram range list.
    1287  *
    1288  * Overlapping with a RAM range has to be 100% or none at all.  The pages
    1289  * in the existing RAM range must not be ROM nor MMIO.  A guru meditation
    1290  * will be raised if a partial overlap or an overlap of ROM pages is
    1291  * encountered.  On an overlap we will free all the existing RAM pages and
    1292  * put in the ram range pages instead.
    1293  */
    1294 typedef struct PGMMMIO2RANGE
    1295 {
    1296     /** The owner of the range. (a device) */
    1297     PPDMDEVINSR3                        pDevInsR3;
    1298     /** Pointer to the ring-3 mapping of the allocation. */
    1299     RTR3PTR                             pvR3;
    1300     /** Pointer to the next range - R3. */
    1301     R3PTRTYPE(struct PGMMMIO2RANGE *)   pNextR3;
    1302     /** Whether it's mapped or not. */
    1303     bool                                fMapped;
    1304     /** Whether it's overlapping or not. */
    1305     bool                                fOverlapping;
    1306     /** The PCI region number.
    1307      * @remarks This ASSUMES that nobody will ever really need to have multiple
    1308      *          PCI devices with matching MMIO region numbers on a single device. */
    1309     uint8_t                             iRegion;
    1310     /** The saved state range ID. */
    1311     uint8_t                             idSavedState;
    1312     /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
    1313     uint8_t                             abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
    1314     /** Live save per page tracking data. */
    1315     R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE)    paLSPages;
    1316     /** The associated RAM range. */
    1317     PGMRAMRANGE                         RamRange;
    1318 } PGMMMIO2RANGE;
    1319 /** Pointer to a MMIO2 range. */
    1320 typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
    1321 
    1322 
    1323 
    1324 
    1325 /**
    1326  * PGMPhysRead/Write cache entry
    1327  */
    1328 typedef struct PGMPHYSCACHEENTRY
    1329 {
    1330     /** R3 pointer to physical page. */
    1331     R3PTRTYPE(uint8_t *)                pbR3;
    1332     /** GC Physical address for cache entry */
    1333     RTGCPHYS                            GCPhys;
    1334 #if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
    1335     RTGCPHYS                            u32Padding0; /**< alignment padding. */
    1336 #endif
    1337 } PGMPHYSCACHEENTRY;
    1338 
    1339 /**
    1340  * PGMPhysRead/Write cache to reduce REM memory access overhead
    1341  */
    1342 typedef struct PGMPHYSCACHE
    1343 {
    1344     /** Bitmap of valid cache entries */
    1345     uint64_t                            aEntries;
    1346     /** Cache entries */
    1347     PGMPHYSCACHEENTRY                   Entry[PGM_MAX_PHYSCACHE_ENTRIES];
    1348 } PGMPHYSCACHE;
    1349 
    1350 
    1351 /** Pointer to an allocation chunk ring-3 mapping. */
    1352 typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
    1353 /** Pointer to an allocation chunk ring-3 mapping pointer. */
    1354 typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
    1355 
    1356 /**
    1357  * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
    1358  *
    1359  * The primary tree (Core) uses the chunk id as key.
    1360  * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
    1361  */
    1362 typedef struct PGMCHUNKR3MAP
    1363 {
    1364     /** The key is the chunk id. */
    1365     AVLU32NODECORE                      Core;
    1366     /** The key is the ageing sequence number. */
    1367     AVLLU32NODECORE                     AgeCore;
    1368     /** The current age thingy. */
    1369     uint32_t                            iAge;
    1370     /** The current reference count. */
    1371     uint32_t volatile                   cRefs;
    1372     /** The current permanent reference count. */
    1373     uint32_t volatile                   cPermRefs;
    1374     /** The mapping address. */
    1375     void                               *pv;
    1376 } PGMCHUNKR3MAP;
    1377 
    1378 /**
    1379  * Allocation chunk ring-3 mapping TLB entry.
    1380  */
    1381 typedef struct PGMCHUNKR3MAPTLBE
    1382 {
    1383     /** The chunk id. */
    1384     uint32_t volatile                   idChunk;
    1385 #if HC_ARCH_BITS == 64
    1386     uint32_t                            u32Padding; /**< alignment padding. */
    1387 #endif
    1388     /** The chunk map. */
    1389 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1390     R3PTRTYPE(PPGMCHUNKR3MAP) volatile  pChunk;
    1391 #else
    1392     R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile  pChunk;
    1393 #endif
    1394 } PGMCHUNKR3MAPTLBE;
    1395 /** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
    1396 typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
    1397 
    1398 /** The number of TLB entries in PGMCHUNKR3MAPTLB.
    1399  * @remark Must be a power of two value. */
    1400 #define PGM_CHUNKR3MAPTLB_ENTRIES   64
    1401 
    1402 /**
    1403  * Allocation chunk ring-3 mapping TLB.
    1404  *
    1405  * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
    1406  *          At first glance this might look kinda odd since AVL trees are
    1407  *          supposed to give the most optimial lookup times of all trees
    1408  *          due to their balancing. However, take a tree with 1023 nodes
    1409  *          in it, that's 10 levels, meaning that most searches has to go
    1410  *          down 9 levels before they find what they want. This isn't fast
    1411  *          compared to a TLB hit. There is the factor of cache misses,
    1412  *          and of course the problem with trees and branch prediction.
    1413  *          This is why we use TLBs in front of most of the trees.
    1414  *
    1415  * @todo    Generalize this TLB + AVL stuff, shouldn't be all that
    1416  *          difficult when we switch to the new inlined AVL trees (from kStuff).
    1417  */
    1418 typedef struct PGMCHUNKR3MAPTLB
    1419 {
    1420     /** The TLB entries. */
    1421     PGMCHUNKR3MAPTLBE   aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
    1422 } PGMCHUNKR3MAPTLB;
    1423 
    1424 /**
    1425  * Calculates the index of a guest page in the Ring-3 Chunk TLB.
    1426  * @returns Chunk TLB index.
    1427  * @param   idChunk         The Chunk ID.
    1428  */
    1429 #define PGM_CHUNKR3MAPTLB_IDX(idChunk)     ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
    1430 
    1431 
    1432 /**
    1433  * Ring-3 guest page mapping TLB entry.
    1434  * @remarks used in ring-0 as well at the moment.
    1435  */
    1436 typedef struct PGMPAGER3MAPTLBE
    1437 {
    1438     /** Address of the page. */
    1439     RTGCPHYS volatile                   GCPhys;
    1440     /** The guest page. */
    1441 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1442     R3PTRTYPE(PPGMPAGE) volatile        pPage;
    1443 #else
    1444     R3R0PTRTYPE(PPGMPAGE) volatile      pPage;
    1445 #endif
    1446     /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
    1447 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1448     R3PTRTYPE(PPGMCHUNKR3MAP) volatile  pMap;
    1449 #else
    1450     R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
    1451 #endif
    1452     /** The address */
    1453 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1454     R3PTRTYPE(void *) volatile          pv;
    1455 #else
    1456     R3R0PTRTYPE(void *) volatile        pv;
    1457 #endif
    1458 #if HC_ARCH_BITS == 32
    1459     uint32_t                            u32Padding; /**< alignment padding. */
    1460 #endif
    1461 } PGMPAGER3MAPTLBE;
    1462 /** Pointer to an entry in the HC physical TLB. */
    1463 typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
    1464 
    1465 
    1466 /** The number of entries in the ring-3 guest page mapping TLB.
    1467  * @remarks The value must be a power of two. */
    1468 #define PGM_PAGER3MAPTLB_ENTRIES 256
    1469 
    1470 /**
    1471  * Ring-3 guest page mapping TLB.
    1472  * @remarks used in ring-0 as well at the moment.
    1473  */
    1474 typedef struct PGMPAGER3MAPTLB
    1475 {
    1476     /** The TLB entries. */
    1477     PGMPAGER3MAPTLBE            aEntries[PGM_PAGER3MAPTLB_ENTRIES];
    1478 } PGMPAGER3MAPTLB;
    1479 /** Pointer to the ring-3 guest page mapping TLB. */
    1480 typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
    1481 
    1482 /**
    1483  * Calculates the index of the TLB entry for the specified guest page.
    1484  * @returns Physical TLB index.
    1485  * @param   GCPhys      The guest physical address.
    1486  */
    1487 #define PGM_PAGER3MAPTLB_IDX(GCPhys)    ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
    1488 
    1489 
    1490 /**
    1491  * Mapping cache usage set entry.
    1492  *
    1493  * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
    1494  *          the dynamic ring-0 and (to some extent) raw-mode context mapping
    1495  *          cache. If it's extended to include ring-3, well, then something will
    1496  *          have be changed here...
    1497  */
    1498 typedef struct PGMMAPSETENTRY
    1499 {
    1500     /** The mapping cache index. */
    1501     uint16_t                    iPage;
    1502     /** The number of references.
    1503      * The max is UINT16_MAX - 1. */
    1504     uint16_t                    cRefs;
    1505 #if HC_ARCH_BITS == 64
    1506     uint32_t                    alignment;
    1507 #endif
    1508     /** Pointer to the page. */
    1509     RTR0PTR                     pvPage;
    1510     /** The physical address for this entry. */
    1511     RTHCPHYS                    HCPhys;
    1512 } PGMMAPSETENTRY;
    1513 /** Pointer to a mapping cache usage set entry. */
    1514 typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
    1515 
    1516 /**
    1517  * Mapping cache usage set.
    1518  *
    1519  * This is used in ring-0 and the raw-mode context to track dynamic mappings
    1520  * done during exits / traps.  The set is
    1521  */
    1522 typedef struct PGMMAPSET
    1523 {
    1524     /** The number of occupied entries.
    1525      * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
    1526      * dynamic mappings. */
    1527     uint32_t                    cEntries;
    1528     /** The start of the current subset.
    1529      * This is UINT32_MAX if no subset is currently open. */
    1530     uint32_t                    iSubset;
    1531     /** The index of the current CPU, only valid if the set is open. */
    1532     int32_t                     iCpu;
    1533     uint32_t                    alignment;
    1534     /** The entries. */
    1535     PGMMAPSETENTRY              aEntries[64];
    1536     /** HCPhys -> iEntry fast lookup table.
    1537      * Use PGMMAPSET_HASH for hashing.
    1538      * The entries may or may not be valid, check against cEntries. */
    1539     uint8_t                     aiHashTable[128];
    1540 } PGMMAPSET;
    1541 AssertCompileSizeAlignment(PGMMAPSET, 8);
    1542 /** Pointer to the mapping cache set. */
    1543 typedef PGMMAPSET *PPGMMAPSET;
    1544 
    1545 /** PGMMAPSET::cEntries value for a closed set. */
    1546 #define PGMMAPSET_CLOSED            UINT32_C(0xdeadc0fe)
    1547 
    1548 /** Hash function for aiHashTable. */
    1549 #define PGMMAPSET_HASH(HCPhys)      (((HCPhys) >> PAGE_SHIFT) & 127)
    1550 
    1551 /** The max fill size (strict builds). */
    1552 #define PGMMAPSET_MAX_FILL          (64U * 80U / 100U)
    1553 
    1554 
    1555 /** @name Context neutrual page mapper TLB.
    1556  *
    1557  * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
    1558  * code is writting in a kind of context neutrual way. Time will show whether
    1559  * this actually makes sense or not...
    1560  *
    1561  * @todo this needs to be reconsidered and dropped/redone since the ring-0
    1562  *       context ends up using a global mapping cache on some platforms
    1563  *       (darwin).
    1564  *
    1565  * @{ */
    1566 /** @typedef PPGMPAGEMAPTLB
    1567  * The page mapper TLB pointer type for the current context. */
    1568 /** @typedef PPGMPAGEMAPTLB
    1569  * The page mapper TLB entry pointer type for the current context. */
    1570 /** @typedef PPGMPAGEMAPTLB
    1571  * The page mapper TLB entry pointer pointer type for the current context. */
    1572 /** @def PGM_PAGEMAPTLB_ENTRIES
    1573  * The number of TLB entries in the page mapper TLB for the current context. */
    1574 /** @def PGM_PAGEMAPTLB_IDX
    1575  * Calculate the TLB index for a guest physical address.
    1576  * @returns The TLB index.
    1577  * @param   GCPhys      The guest physical address. */
    1578 /** @typedef PPGMPAGEMAP
    1579  * Pointer to a page mapper unit for current context. */
    1580 /** @typedef PPPGMPAGEMAP
    1581  * Pointer to a page mapper unit pointer for current context. */
    1582 #ifdef IN_RC
    1583 // typedef PPGMPAGEGCMAPTLB               PPGMPAGEMAPTLB;
    1584 // typedef PPGMPAGEGCMAPTLBE              PPGMPAGEMAPTLBE;
    1585 // typedef PPGMPAGEGCMAPTLBE             *PPPGMPAGEMAPTLBE;
    1586 # define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGEGCMAPTLB_ENTRIES
    1587 # define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGEGCMAPTLB_IDX(GCPhys)
    1588  typedef void *                         PPGMPAGEMAP;
    1589  typedef void **                        PPPGMPAGEMAP;
    1590 //#elif IN_RING0
    1591 // typedef PPGMPAGER0MAPTLB               PPGMPAGEMAPTLB;
    1592 // typedef PPGMPAGER0MAPTLBE              PPGMPAGEMAPTLBE;
    1593 // typedef PPGMPAGER0MAPTLBE             *PPPGMPAGEMAPTLBE;
    1594 //# define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGER0MAPTLB_ENTRIES
    1595 //# define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGER0MAPTLB_IDX(GCPhys)
    1596 // typedef PPGMCHUNKR0MAP                 PPGMPAGEMAP;
    1597 // typedef PPPGMCHUNKR0MAP                PPPGMPAGEMAP;
    1598 #else
    1599  typedef PPGMPAGER3MAPTLB               PPGMPAGEMAPTLB;
    1600  typedef PPGMPAGER3MAPTLBE              PPGMPAGEMAPTLBE;
    1601  typedef PPGMPAGER3MAPTLBE             *PPPGMPAGEMAPTLBE;
    1602 # define PGM_PAGEMAPTLB_ENTRIES         PGM_PAGER3MAPTLB_ENTRIES
    1603 # define PGM_PAGEMAPTLB_IDX(GCPhys)     PGM_PAGER3MAPTLB_IDX(GCPhys)
    1604  typedef PPGMCHUNKR3MAP                 PPGMPAGEMAP;
    1605  typedef PPPGMCHUNKR3MAP                PPPGMPAGEMAP;
    1606 #endif
    1607 /** @} */
    1608 
    1609 
    1610 /** @name PGM Pool Indexes.
    1611  * Aka. the unique shadow page identifier.
    1612  * @{ */
    1613 /** NIL page pool IDX. */
    1614 #define NIL_PGMPOOL_IDX                 0
    1615 /** The first normal index. */
    1616 #define PGMPOOL_IDX_FIRST_SPECIAL       1
    1617 /** Page directory (32-bit root). */
    1618 #define PGMPOOL_IDX_PD                  1
    1619 /** Page Directory Pointer Table (PAE root). */
    1620 #define PGMPOOL_IDX_PDPT                2
    1621 /** AMD64 CR3 level index.*/
    1622 #define PGMPOOL_IDX_AMD64_CR3           3
    1623 /** Nested paging root.*/
    1624 #define PGMPOOL_IDX_NESTED_ROOT         4
    1625 /** The first normal index. */
    1626 #define PGMPOOL_IDX_FIRST               5
    1627 /** The last valid index. (inclusive, 14 bits) */
    1628 #define PGMPOOL_IDX_LAST                0x3fff
    1629 /** @} */
    1630 
    1631 /** The NIL index for the parent chain. */
    1632 #define NIL_PGMPOOL_USER_INDEX          ((uint16_t)0xffff)
    1633 #define NIL_PGMPOOL_PRESENT_INDEX       ((uint16_t)0xffff)
    1634 
    1635 /**
    1636  * Node in the chain linking a shadowed page to it's parent (user).
    1637  */
    1638 #pragma pack(1)
    1639 typedef struct PGMPOOLUSER
    1640 {
    1641     /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
    1642     uint16_t            iNext;
    1643     /** The user page index. */
    1644     uint16_t            iUser;
    1645     /** Index into the user table. */
    1646     uint32_t            iUserTable;
    1647 } PGMPOOLUSER, *PPGMPOOLUSER;
    1648 typedef const PGMPOOLUSER *PCPGMPOOLUSER;
    1649 #pragma pack()
    1650 
    1651 
    1652 /** The NIL index for the phys ext chain. */
    1653 #define NIL_PGMPOOL_PHYSEXT_INDEX       ((uint16_t)0xffff)
    1654 
    1655 /**
    1656  * Node in the chain of physical cross reference extents.
    1657  * @todo Calling this an 'extent' is not quite right, find a better name.
    1658  */
    1659 #pragma pack(1)
    1660 typedef struct PGMPOOLPHYSEXT
    1661 {
    1662     /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
    1663     uint16_t            iNext;
    1664     /** The user page index. */
    1665     uint16_t            aidx[3];
    1666 } PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
    1667 typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
    1668 #pragma pack()
    1669 
    1670 
    1671 /**
    1672  * The kind of page that's being shadowed.
    1673  */
    1674 typedef enum PGMPOOLKIND
    1675 {
    1676     /** The virtual invalid 0 entry. */
    1677     PGMPOOLKIND_INVALID = 0,
    1678     /** The entry is free (=unused). */
    1679     PGMPOOLKIND_FREE,
    1680 
    1681     /** Shw: 32-bit page table;     Gst: no paging  */
    1682     PGMPOOLKIND_32BIT_PT_FOR_PHYS,
    1683     /** Shw: 32-bit page table;     Gst: 32-bit page table.  */
    1684     PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
    1685     /** Shw: 32-bit page table;     Gst: 4MB page.  */
    1686     PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
    1687     /** Shw: PAE page table;        Gst: no paging  */
    1688     PGMPOOLKIND_PAE_PT_FOR_PHYS,
    1689     /** Shw: PAE page table;        Gst: 32-bit page table. */
    1690     PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
    1691     /** Shw: PAE page table;        Gst: Half of a 4MB page.  */
    1692     PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
    1693     /** Shw: PAE page table;        Gst: PAE page table. */
    1694     PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
    1695     /** Shw: PAE page table;        Gst: 2MB page.  */
    1696     PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
    1697 
    1698     /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
    1699     PGMPOOLKIND_32BIT_PD,
    1700     /** Shw: 32-bit page directory. Gst: no paging. */
    1701     PGMPOOLKIND_32BIT_PD_PHYS,
    1702     /** Shw: PAE page directory 0;  Gst: 32-bit page directory. */
    1703     PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
    1704     /** Shw: PAE page directory 1;  Gst: 32-bit page directory. */
    1705     PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
    1706     /** Shw: PAE page directory 2;  Gst: 32-bit page directory. */
    1707     PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
    1708     /** Shw: PAE page directory 3;  Gst: 32-bit page directory. */
    1709     PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
    1710     /** Shw: PAE page directory;    Gst: PAE page directory. */
    1711     PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
    1712     /** Shw: PAE page directory;    Gst: no paging. */
    1713     PGMPOOLKIND_PAE_PD_PHYS,
    1714 
    1715     /** Shw: PAE page directory pointer table (legacy, 4 entries);  Gst 32 bits paging. */
    1716     PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
    1717     /** Shw: PAE page directory pointer table (legacy, 4 entries);  Gst PAE PDPT. */
    1718     PGMPOOLKIND_PAE_PDPT,
    1719     /** Shw: PAE page directory pointer table (legacy, 4 entries);  Gst: no paging. */
    1720     PGMPOOLKIND_PAE_PDPT_PHYS,
    1721 
    1722     /** Shw: 64-bit page directory pointer table;   Gst: 64-bit page directory pointer table. */
    1723     PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
    1724     /** Shw: 64-bit page directory pointer table;   Gst: no paging  */
    1725     PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
    1726     /** Shw: 64-bit page directory table;           Gst: 64-bit page directory table. */
    1727     PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
    1728     /** Shw: 64-bit page directory table;           Gst: no paging  */
    1729     PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
    1730 
    1731     /** Shw: 64-bit PML4;                           Gst: 64-bit PML4. */
    1732     PGMPOOLKIND_64BIT_PML4,
    1733 
    1734     /** Shw: EPT page directory pointer table;      Gst: no paging  */
    1735     PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
    1736     /** Shw: EPT page directory table;              Gst: no paging  */
    1737     PGMPOOLKIND_EPT_PD_FOR_PHYS,
    1738     /** Shw: EPT page table;                        Gst: no paging  */
    1739     PGMPOOLKIND_EPT_PT_FOR_PHYS,
    1740 
    1741     /** Shw: Root Nested paging table. */
    1742     PGMPOOLKIND_ROOT_NESTED,
    1743 
    1744     /** The last valid entry. */
    1745     PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
    1746 } PGMPOOLKIND;
    1747 
    1748 /**
    1749  * The access attributes of the page; only applies to big pages.
    1750  */
    1751 typedef enum
    1752 {
    1753     PGMPOOLACCESS_DONTCARE = 0,
    1754     PGMPOOLACCESS_USER_RW,
    1755     PGMPOOLACCESS_USER_R,
    1756     PGMPOOLACCESS_USER_RW_NX,
    1757     PGMPOOLACCESS_USER_R_NX,
    1758     PGMPOOLACCESS_SUPERVISOR_RW,
    1759     PGMPOOLACCESS_SUPERVISOR_R,
    1760     PGMPOOLACCESS_SUPERVISOR_RW_NX,
    1761     PGMPOOLACCESS_SUPERVISOR_R_NX
    1762 } PGMPOOLACCESS;
    1763 
    1764 /**
    1765  * The tracking data for a page in the pool.
    1766  */
    1767 typedef struct PGMPOOLPAGE
    1768 {
    1769     /** AVL node code with the (R3) physical address of this page. */
    1770     AVLOHCPHYSNODECORE  Core;
    1771     /** Pointer to the R3 mapping of the page. */
    1772 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    1773     R3PTRTYPE(void *)   pvPageR3;
    1774 #else
    1775     R3R0PTRTYPE(void *) pvPageR3;
    1776 #endif
    1777     /** The guest physical address. */
    1778 #if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
    1779     uint32_t            Alignment0;
    1780 #endif
    1781     RTGCPHYS            GCPhys;
    1782 
    1783     /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
    1784     RTGCPTR             pvLastAccessHandlerRip;
    1785     RTGCPTR             pvLastAccessHandlerFault;
    1786     uint64_t            cLastAccessHandlerCount;
    1787 
    1788     /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
    1789     uint8_t             enmKind;
    1790     /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
    1791     uint8_t             enmAccess;
    1792     /** The index of this page. */
    1793     uint16_t            idx;
    1794     /** The next entry in the list this page currently resides in.
    1795      * It's either in the free list or in the GCPhys hash. */
    1796     uint16_t            iNext;
    1797     /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
    1798     uint16_t            iUserHead;
    1799     /** The number of present entries. */
    1800     uint16_t            cPresent;
    1801     /** The first entry in the table which is present. */
    1802     uint16_t            iFirstPresent;
    1803     /** The number of modifications to the monitored page. */
    1804     uint16_t            cModifications;
    1805     /** The next modified page. NIL_PGMPOOL_IDX if tail. */
    1806     uint16_t            iModifiedNext;
    1807     /** The previous modified page. NIL_PGMPOOL_IDX if head. */
    1808     uint16_t            iModifiedPrev;
    1809     /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
    1810     uint16_t            iMonitoredNext;
    1811     /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
    1812     uint16_t            iMonitoredPrev;
    1813     /** The next page in the age list. */
    1814     uint16_t            iAgeNext;
    1815     /** The previous page in the age list. */
    1816     uint16_t            iAgePrev;
    1817     /** Used to indicate that the page is zeroed. */
    1818     bool                fZeroed;
    1819     /** Used to indicate that a PT has non-global entries. */
    1820     bool                fSeenNonGlobal;
    1821     /** Used to indicate that we're monitoring writes to the guest page. */
    1822     bool                fMonitored;
    1823     /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
    1824      * (All pages are in the age list.) */
    1825     bool                fCached;
    1826     /** This is used by the R3 access handlers when invoked by an async thread.
    1827      * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
    1828     bool volatile       fReusedFlushPending;
    1829     /** Used to mark the page as dirty (write monitoring if temporarily off. */
    1830     bool                fDirty;
    1831 
    1832     /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
    1833     uint32_t            cLocked;
    1834     uint32_t            idxDirty;
    1835     RTGCPTR             pvDirtyFault;
    1836 } PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
    1837 /** Pointer to a const pool page. */
    1838 typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
    1839 
    1840 
    1841 /** The hash table size. */
    1842 # define PGMPOOL_HASH_SIZE      0x40
    1843 /** The hash function. */
    1844 # define PGMPOOL_HASH(GCPhys)   ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
    1845 
    1846 
    1847 /**
    1848  * The shadow page pool instance data.
    1849  *
    1850  * It's all one big allocation made at init time, except for the
    1851  * pages that is. The user nodes follows immediatly after the
    1852  * page structures.
    1853  */
    1854 typedef struct PGMPOOL
    1855 {
    1856     /** The VM handle - R3 Ptr. */
    1857     PVMR3                       pVMR3;
    1858     /** The VM handle - R0 Ptr. */
    1859     PVMR0                       pVMR0;
    1860     /** The VM handle - RC Ptr. */
    1861     PVMRC                       pVMRC;
    1862     /** The max pool size. This includes the special IDs.  */
    1863     uint16_t                    cMaxPages;
    1864     /** The current pool size. */
    1865     uint16_t                    cCurPages;
    1866     /** The head of the free page list. */
    1867     uint16_t                    iFreeHead;
    1868     /* Padding. */
    1869     uint16_t                    u16Padding;
    1870     /** Head of the chain of free user nodes. */
    1871     uint16_t                    iUserFreeHead;
    1872     /** The number of user nodes we've allocated. */
    1873     uint16_t                    cMaxUsers;
    1874     /** The number of present page table entries in the entire pool. */
    1875     uint32_t                    cPresent;
    1876     /** Pointer to the array of user nodes - RC pointer. */
    1877     RCPTRTYPE(PPGMPOOLUSER)     paUsersRC;
    1878     /** Pointer to the array of user nodes - R3 pointer. */
    1879     R3PTRTYPE(PPGMPOOLUSER)     paUsersR3;
    1880     /** Pointer to the array of user nodes - R0 pointer. */
    1881     R0PTRTYPE(PPGMPOOLUSER)     paUsersR0;
    1882     /** Head of the chain of free phys ext nodes. */
    1883     uint16_t                    iPhysExtFreeHead;
    1884     /** The number of user nodes we've allocated. */
    1885     uint16_t                    cMaxPhysExts;
    1886     /** Pointer to the array of physical xref extent - RC pointer. */
    1887     RCPTRTYPE(PPGMPOOLPHYSEXT)  paPhysExtsRC;
    1888     /** Pointer to the array of physical xref extent nodes - R3 pointer. */
    1889     R3PTRTYPE(PPGMPOOLPHYSEXT)  paPhysExtsR3;
    1890     /** Pointer to the array of physical xref extent nodes - R0 pointer. */
    1891     R0PTRTYPE(PPGMPOOLPHYSEXT)  paPhysExtsR0;
    1892     /** Hash table for GCPhys addresses. */
    1893     uint16_t                    aiHash[PGMPOOL_HASH_SIZE];
    1894     /** The head of the age list. */
    1895     uint16_t                    iAgeHead;
    1896     /** The tail of the age list. */
    1897     uint16_t                    iAgeTail;
    1898     /** Set if the cache is enabled. */
    1899     bool                        fCacheEnabled;
    1900     /** Alignment padding. */
    1901     bool                        afPadding1[3];
    1902     /** Head of the list of modified pages. */
    1903     uint16_t                    iModifiedHead;
    1904     /** The current number of modified pages. */
    1905     uint16_t                    cModifiedPages;
    1906     /** Access handler, RC. */
    1907     RCPTRTYPE(PFNPGMRCPHYSHANDLER)  pfnAccessHandlerRC;
    1908     /** Access handler, R0. */
    1909     R0PTRTYPE(PFNPGMR0PHYSHANDLER)  pfnAccessHandlerR0;
    1910     /** Access handler, R3. */
    1911     R3PTRTYPE(PFNPGMR3PHYSHANDLER)  pfnAccessHandlerR3;
    1912     /** The access handler description (R3 ptr). */
    1913     R3PTRTYPE(const char *)         pszAccessHandler;
    1914 # if HC_ARCH_BITS == 32
    1915     /** Alignment padding. */
    1916     uint32_t                    u32Padding2;
    1917 # endif
    1918     /* Next available slot. */
    1919     uint32_t                    idxFreeDirtyPage;
    1920     /* Number of active dirty pages. */
    1921     uint32_t                    cDirtyPages;
    1922     /* Array of current dirty pgm pool page indices. */
    1923     uint16_t                    aIdxDirtyPages[16];
    1924     uint64_t                    aDirtyPages[16][512];
    1925     /** The number of pages currently in use. */
    1926     uint16_t                    cUsedPages;
    1927 #ifdef VBOX_WITH_STATISTICS
    1928     /** The high water mark for cUsedPages. */
    1929     uint16_t                    cUsedPagesHigh;
    1930     uint32_t                    Alignment1;         /**< Align the next member on a 64-bit boundrary. */
    1931     /** Profiling pgmPoolAlloc(). */
    1932     STAMPROFILEADV              StatAlloc;
    1933     /** Profiling pgmR3PoolClearDoIt(). */
    1934     STAMPROFILE                 StatClearAll;
    1935     /** Profiling pgmR3PoolReset(). */
    1936     STAMPROFILE                 StatR3Reset;
    1937     /** Profiling pgmPoolFlushPage(). */
    1938     STAMPROFILE                 StatFlushPage;
    1939     /** Profiling pgmPoolFree(). */
    1940     STAMPROFILE                 StatFree;
    1941     /** Counting explicit flushes by PGMPoolFlushPage(). */
    1942     STAMCOUNTER                 StatForceFlushPage;
    1943     /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
    1944     STAMCOUNTER                 StatForceFlushDirtyPage;
    1945     /** Counting flushes for reused pages. */
    1946     STAMCOUNTER                 StatForceFlushReused;
    1947     /** Profiling time spent zeroing pages. */
    1948     STAMPROFILE                 StatZeroPage;
    1949     /** Profiling of pgmPoolTrackDeref. */
    1950     STAMPROFILE                 StatTrackDeref;
    1951     /** Profiling pgmTrackFlushGCPhysPT. */
    1952     STAMPROFILE                 StatTrackFlushGCPhysPT;
    1953     /** Profiling pgmTrackFlushGCPhysPTs. */
    1954     STAMPROFILE                 StatTrackFlushGCPhysPTs;
    1955     /** Profiling pgmTrackFlushGCPhysPTsSlow. */
    1956     STAMPROFILE                 StatTrackFlushGCPhysPTsSlow;
    1957     /** Number of times we've been out of user records. */
    1958     STAMCOUNTER                 StatTrackFreeUpOneUser;
    1959     /** Nr of flushed entries. */
    1960     STAMCOUNTER                 StatTrackFlushEntry;
    1961     /** Nr of updated entries. */
    1962     STAMCOUNTER                 StatTrackFlushEntryKeep;
    1963     /** Profiling deref activity related tracking GC physical pages. */
    1964     STAMPROFILE                 StatTrackDerefGCPhys;
    1965     /** Number of linear searches for a HCPhys in the ram ranges. */
    1966     STAMCOUNTER                 StatTrackLinearRamSearches;
    1967     /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
    1968     STAMCOUNTER                 StamTrackPhysExtAllocFailures;
    1969     /** Profiling the RC/R0 access handler. */
    1970     STAMPROFILE                 StatMonitorRZ;
    1971     /** Times we've failed interpreting the instruction. */
    1972     STAMCOUNTER                 StatMonitorRZEmulateInstr;
    1973     /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
    1974     STAMPROFILE                 StatMonitorRZFlushPage;
    1975     /* Times we've detected a page table reinit. */
    1976     STAMCOUNTER                 StatMonitorRZFlushReinit;
    1977     /** Counting flushes for pages that are modified too often. */
    1978     STAMCOUNTER                 StatMonitorRZFlushModOverflow;
    1979     /** Times we've detected fork(). */
    1980     STAMCOUNTER                 StatMonitorRZFork;
    1981     /** Profiling the RC/R0 access we've handled (except REP STOSD). */
    1982     STAMPROFILE                 StatMonitorRZHandled;
    1983     /** Times we've failed interpreting a patch code instruction. */
    1984     STAMCOUNTER                 StatMonitorRZIntrFailPatch1;
    1985     /** Times we've failed interpreting a patch code instruction during flushing. */
    1986     STAMCOUNTER                 StatMonitorRZIntrFailPatch2;
    1987     /** The number of times we've seen rep prefixes we can't handle. */
    1988     STAMCOUNTER                 StatMonitorRZRepPrefix;
    1989     /** Profiling the REP STOSD cases we've handled. */
    1990     STAMPROFILE                 StatMonitorRZRepStosd;
    1991     /** Nr of handled PT faults. */
    1992     STAMCOUNTER                 StatMonitorRZFaultPT;
    1993     /** Nr of handled PD faults. */
    1994     STAMCOUNTER                 StatMonitorRZFaultPD;
    1995     /** Nr of handled PDPT faults. */
    1996     STAMCOUNTER                 StatMonitorRZFaultPDPT;
    1997     /** Nr of handled PML4 faults. */
    1998     STAMCOUNTER                 StatMonitorRZFaultPML4;
    1999 
    2000     /** Profiling the R3 access handler. */
    2001     STAMPROFILE                 StatMonitorR3;
    2002     /** Times we've failed interpreting the instruction. */
    2003     STAMCOUNTER                 StatMonitorR3EmulateInstr;
    2004     /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
    2005     STAMPROFILE                 StatMonitorR3FlushPage;
    2006     /* Times we've detected a page table reinit. */
    2007     STAMCOUNTER                 StatMonitorR3FlushReinit;
    2008     /** Counting flushes for pages that are modified too often. */
    2009     STAMCOUNTER                 StatMonitorR3FlushModOverflow;
    2010     /** Times we've detected fork(). */
    2011     STAMCOUNTER                 StatMonitorR3Fork;
    2012     /** Profiling the R3 access we've handled (except REP STOSD). */
    2013     STAMPROFILE                 StatMonitorR3Handled;
    2014     /** The number of times we've seen rep prefixes we can't handle. */
    2015     STAMCOUNTER                 StatMonitorR3RepPrefix;
    2016     /** Profiling the REP STOSD cases we've handled. */
    2017     STAMPROFILE                 StatMonitorR3RepStosd;
    2018     /** Nr of handled PT faults. */
    2019     STAMCOUNTER                 StatMonitorR3FaultPT;
    2020     /** Nr of handled PD faults. */
    2021     STAMCOUNTER                 StatMonitorR3FaultPD;
    2022     /** Nr of handled PDPT faults. */
    2023     STAMCOUNTER                 StatMonitorR3FaultPDPT;
    2024     /** Nr of handled PML4 faults. */
    2025     STAMCOUNTER                 StatMonitorR3FaultPML4;
    2026     /** The number of times we're called in an async thread an need to flush. */
    2027     STAMCOUNTER                 StatMonitorR3Async;
    2028     /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
    2029     STAMCOUNTER                 StatResetDirtyPages;
    2030     /** Times we've called pgmPoolAddDirtyPage. */
    2031     STAMCOUNTER                 StatDirtyPage;
    2032     /** Times we've had to flush duplicates for dirty page management. */
    2033     STAMCOUNTER                 StatDirtyPageDupFlush;
    2034     /** Times we've had to flush because of overflow. */
    2035     STAMCOUNTER                 StatDirtyPageOverFlowFlush;
    2036 
    2037     /** The high wather mark for cModifiedPages. */
    2038     uint16_t                    cModifiedPagesHigh;
    2039     uint16_t                    Alignment2[3];      /**< Align the next member on a 64-bit boundrary. */
    2040 
    2041     /** The number of cache hits. */
    2042     STAMCOUNTER                 StatCacheHits;
    2043     /** The number of cache misses. */
    2044     STAMCOUNTER                 StatCacheMisses;
    2045     /** The number of times we've got a conflict of 'kind' in the cache. */
    2046     STAMCOUNTER                 StatCacheKindMismatches;
    2047     /** Number of times we've been out of pages. */
    2048     STAMCOUNTER                 StatCacheFreeUpOne;
    2049     /** The number of cacheable allocations. */
    2050     STAMCOUNTER                 StatCacheCacheable;
    2051     /** The number of uncacheable allocations. */
    2052     STAMCOUNTER                 StatCacheUncacheable;
    2053 #else
    2054     uint32_t                    Alignment3;         /**< Align the next member on a 64-bit boundrary. */
    2055 #endif
    2056     /** The AVL tree for looking up a page by its HC physical address. */
    2057     AVLOHCPHYSTREE              HCPhysTree;
    2058     uint32_t                    Alignment4;         /**< Align the next member on a 64-bit boundrary. */
    2059     /** Array of pages. (cMaxPages in length)
    2060      * The Id is the index into thist array.
    2061      */
    2062     PGMPOOLPAGE                 aPages[PGMPOOL_IDX_FIRST];
    2063 } PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
    2064 AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
    2065 AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
    2066 AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
    2067 #ifdef VBOX_WITH_STATISTICS
    2068 AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
    2069 #endif
    2070 AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
    2071 
    2072 
    2073 /** @def PGMPOOL_PAGE_2_PTR
    2074  * Maps a pool page pool into the current context.
    2075  *
    2076  * @returns VBox status code.
    2077  * @param   pVM     The VM handle.
    2078  * @param   pPage   The pool page.
    2079  *
    2080  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    2081  *          small page window employeed by that function. Be careful.
    2082  * @remark  There is no need to assert on the result.
    2083  */
    2084 #if defined(IN_RC)
    2085 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
    2086 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2087 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
    2088 #elif defined(VBOX_STRICT)
    2089 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  pgmPoolMapPageStrict(pPage)
    2090 DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
    2091 {
    2092     Assert(pPage && pPage->pvPageR3);
    2093     return pPage->pvPageR3;
    2094 }
    2095 #else
    2096 # define PGMPOOL_PAGE_2_PTR(pVM, pPage)  ((pPage)->pvPageR3)
    2097 #endif
    2098 
    2099 /** @def PGMPOOL_PAGE_2_PTR_BY_PGM
    2100  * Maps a pool page pool into the current context.
    2101  *
    2102  * @returns VBox status code.
    2103  * @param   pPGM    Pointer to the PGM instance data.
    2104  * @param   pPage   The pool page.
    2105  *
    2106  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    2107  *          small page window employeed by that function. Be careful.
    2108  * @remark  There is no need to assert on the result.
    2109  */
    2110 #if defined(IN_RC)
    2111 # define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage)  pgmPoolMapPageInlined(pPGM, (pPage))
    2112 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2113 # define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage)  pgmPoolMapPageInlined(pPGM, (pPage))
    2114 #else
    2115 # define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage)  PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
    2116 #endif
    2117 
    2118 /** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
    2119  * Maps a pool page pool into the current context.
    2120  *
    2121  * @returns VBox status code.
    2122  * @param   pPGM    Pointer to the PGMCPU instance data.
    2123  * @param   pPage   The pool page.
    2124  *
    2125  * @remark  In RC this uses PGMGCDynMapHCPage(), so it will consume of the
    2126  *          small page window employeed by that function. Be careful.
    2127  * @remark  There is no need to assert on the result.
    2128  */
    2129 #if defined(IN_RC)
    2130 # define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage)  pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
    2131 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2132 # define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage)  pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
    2133 #else
    2134 # define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage)  PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
    2135 #endif
    2136 
    2137 
    2138 /** @name Per guest page tracking data.
    2139  * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
    2140  * is to use more bits for it and split it up later on. But for now we'll play
    2141  * safe and change as little as possible.
    2142  *
    2143  * The 16-bit word has two parts:
    2144  *
    2145  * The first 14-bit forms the @a idx field. It is either the index of a page in
    2146  * the shadow page pool, or and index into the extent list.
    2147  *
    2148  * The 2 topmost bits makes up the @a cRefs field, which counts the number of
    2149  * shadow page pool references to the page. If cRefs equals
    2150  * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
    2151  * (misnomer) table and not the shadow page pool.
    2152  *
    2153  * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
    2154  * the 16-bit word.
    2155  *
    2156  * @{ */
    2157 /** The shift count for getting to the cRefs part. */
    2158 #define PGMPOOL_TD_CREFS_SHIFT          14
    2159 /** The mask applied after shifting the tracking data down by
    2160  * PGMPOOL_TD_CREFS_SHIFT. */
    2161 #define PGMPOOL_TD_CREFS_MASK           0x3
    2162 /** The cRef value used to indiciate that the idx is the head of a
    2163  * physical cross reference list. */
    2164 #define PGMPOOL_TD_CREFS_PHYSEXT        PGMPOOL_TD_CREFS_MASK
    2165 /** The shift used to get idx. */
    2166 #define PGMPOOL_TD_IDX_SHIFT            0
    2167 /** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
    2168 #define PGMPOOL_TD_IDX_MASK             0x3fff
    2169 /** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
    2170  * simply too many mappings of this page. */
    2171 #define PGMPOOL_TD_IDX_OVERFLOWED       PGMPOOL_TD_IDX_MASK
    2172 
    2173 /** @def PGMPOOL_TD_MAKE
    2174  * Makes a 16-bit tracking data word.
    2175  *
    2176  * @returns tracking data.
    2177  * @param   cRefs       The @a cRefs field. Must be within bounds!
    2178  * @param   idx         The @a idx field. Must also be within bounds! */
    2179 #define PGMPOOL_TD_MAKE(cRefs, idx)     ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
    2180 
    2181 /** @def PGMPOOL_TD_GET_CREFS
    2182  * Get the @a cRefs field from a tracking data word.
    2183  *
    2184  * @returns The @a cRefs field
    2185  * @param   u16         The tracking data word. */
    2186 #define PGMPOOL_TD_GET_CREFS(u16)       ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
    2187 
    2188 /** @def PGMPOOL_TD_GET_IDX
    2189  * Get the @a idx field from a tracking data word.
    2190  *
    2191  * @returns The @a idx field
    2192  * @param   u16         The tracking data word. */
    2193 #define PGMPOOL_TD_GET_IDX(u16)         ( ((u16) >> PGMPOOL_TD_IDX_SHIFT)   & PGMPOOL_TD_IDX_MASK   )
    2194 /** @} */
    2195 
    2196 
    2197 /**
    2198  * Trees are using self relative offsets as pointers.
    2199  * So, all its data, including the root pointer, must be in the heap for HC and GC
    2200  * to have the same layout.
    2201  */
    2202 typedef struct PGMTREES
    2203 {
    2204     /** Physical access handlers (AVL range+offsetptr tree). */
    2205     AVLROGCPHYSTREE                 PhysHandlers;
    2206     /** Virtual access handlers (AVL range + GC ptr tree). */
    2207     AVLROGCPTRTREE                  VirtHandlers;
    2208     /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
    2209     AVLROGCPHYSTREE                 PhysToVirtHandlers;
    2210     /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
    2211     AVLROGCPTRTREE                  HyperVirtHandlers;
    2212 } PGMTREES;
    2213 /** Pointer to PGM trees. */
    2214 typedef PGMTREES *PPGMTREES;
    2215 
    2216 
    2217 /** @name Paging mode macros
    2218  * @{ */
    2219 #ifdef IN_RC
    2220 # define PGM_CTX(a,b)                   a##RC##b
    2221 # define PGM_CTX_STR(a,b)               a "GC" b
    2222 # define PGM_CTX_DECL(type)             VMMRCDECL(type)
    2223 #else
    2224 # ifdef IN_RING3
    2225 #  define PGM_CTX(a,b)                   a##R3##b
    2226 #  define PGM_CTX_STR(a,b)               a "R3" b
    2227 #  define PGM_CTX_DECL(type)             DECLCALLBACK(type)
    2228 # else
    2229 #  define PGM_CTX(a,b)                   a##R0##b
    2230 #  define PGM_CTX_STR(a,b)               a "R0" b
    2231 #  define PGM_CTX_DECL(type)             VMMDECL(type)
    2232 # endif
    2233 #endif
    2234 
    2235 #define PGM_GST_NAME_REAL(name)         PGM_CTX(pgm,GstReal##name)
    2236 #define PGM_GST_NAME_RC_REAL_STR(name)  "pgmRCGstReal" #name
    2237 #define PGM_GST_NAME_R0_REAL_STR(name)  "pgmR0GstReal" #name
    2238 #define PGM_GST_NAME_PROT(name)         PGM_CTX(pgm,GstProt##name)
    2239 #define PGM_GST_NAME_RC_PROT_STR(name)  "pgmRCGstProt" #name
    2240 #define PGM_GST_NAME_R0_PROT_STR(name)  "pgmR0GstProt" #name
    2241 #define PGM_GST_NAME_32BIT(name)        PGM_CTX(pgm,Gst32Bit##name)
    2242 #define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
    2243 #define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
    2244 #define PGM_GST_NAME_PAE(name)          PGM_CTX(pgm,GstPAE##name)
    2245 #define PGM_GST_NAME_RC_PAE_STR(name)   "pgmRCGstPAE" #name
    2246 #define PGM_GST_NAME_R0_PAE_STR(name)   "pgmR0GstPAE" #name
    2247 #define PGM_GST_NAME_AMD64(name)        PGM_CTX(pgm,GstAMD64##name)
    2248 #define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
    2249 #define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
    2250 #define PGM_GST_PFN(name, pVCpu)        ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
    2251 #define PGM_GST_DECL(type, name)        PGM_CTX_DECL(type) PGM_GST_NAME(name)
    2252 
    2253 #define PGM_SHW_NAME_32BIT(name)        PGM_CTX(pgm,Shw32Bit##name)
    2254 #define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
    2255 #define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
    2256 #define PGM_SHW_NAME_PAE(name)          PGM_CTX(pgm,ShwPAE##name)
    2257 #define PGM_SHW_NAME_RC_PAE_STR(name)   "pgmRCShwPAE" #name
    2258 #define PGM_SHW_NAME_R0_PAE_STR(name)   "pgmR0ShwPAE" #name
    2259 #define PGM_SHW_NAME_AMD64(name)        PGM_CTX(pgm,ShwAMD64##name)
    2260 #define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
    2261 #define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
    2262 #define PGM_SHW_NAME_NESTED(name)        PGM_CTX(pgm,ShwNested##name)
    2263 #define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
    2264 #define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
    2265 #define PGM_SHW_NAME_EPT(name)          PGM_CTX(pgm,ShwEPT##name)
    2266 #define PGM_SHW_NAME_RC_EPT_STR(name)   "pgmRCShwEPT" #name
    2267 #define PGM_SHW_NAME_R0_EPT_STR(name)   "pgmR0ShwEPT" #name
    2268 #define PGM_SHW_DECL(type, name)        PGM_CTX_DECL(type) PGM_SHW_NAME(name)
    2269 #define PGM_SHW_PFN(name, pVCpu)        ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
    2270 
    2271 /*                   Shw_Gst */
    2272 #define PGM_BTH_NAME_32BIT_REAL(name)   PGM_CTX(pgm,Bth32BitReal##name)
    2273 #define PGM_BTH_NAME_32BIT_PROT(name)   PGM_CTX(pgm,Bth32BitProt##name)
    2274 #define PGM_BTH_NAME_32BIT_32BIT(name)  PGM_CTX(pgm,Bth32Bit32Bit##name)
    2275 #define PGM_BTH_NAME_PAE_REAL(name)     PGM_CTX(pgm,BthPAEReal##name)
    2276 #define PGM_BTH_NAME_PAE_PROT(name)     PGM_CTX(pgm,BthPAEProt##name)
    2277 #define PGM_BTH_NAME_PAE_32BIT(name)    PGM_CTX(pgm,BthPAE32Bit##name)
    2278 #define PGM_BTH_NAME_PAE_PAE(name)      PGM_CTX(pgm,BthPAEPAE##name)
    2279 #define PGM_BTH_NAME_AMD64_PROT(name)   PGM_CTX(pgm,BthAMD64Prot##name)
    2280 #define PGM_BTH_NAME_AMD64_AMD64(name)  PGM_CTX(pgm,BthAMD64AMD64##name)
    2281 #define PGM_BTH_NAME_NESTED_REAL(name)  PGM_CTX(pgm,BthNestedReal##name)
    2282 #define PGM_BTH_NAME_NESTED_PROT(name)  PGM_CTX(pgm,BthNestedProt##name)
    2283 #define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
    2284 #define PGM_BTH_NAME_NESTED_PAE(name)   PGM_CTX(pgm,BthNestedPAE##name)
    2285 #define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
    2286 #define PGM_BTH_NAME_EPT_REAL(name)     PGM_CTX(pgm,BthEPTReal##name)
    2287 #define PGM_BTH_NAME_EPT_PROT(name)     PGM_CTX(pgm,BthEPTProt##name)
    2288 #define PGM_BTH_NAME_EPT_32BIT(name)    PGM_CTX(pgm,BthEPT32Bit##name)
    2289 #define PGM_BTH_NAME_EPT_PAE(name)      PGM_CTX(pgm,BthEPTPAE##name)
    2290 #define PGM_BTH_NAME_EPT_AMD64(name)    PGM_CTX(pgm,BthEPTAMD64##name)
    2291 
    2292 #define PGM_BTH_NAME_RC_32BIT_REAL_STR(name)    "pgmRCBth32BitReal" #name
    2293 #define PGM_BTH_NAME_RC_32BIT_PROT_STR(name)    "pgmRCBth32BitProt" #name
    2294 #define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name)   "pgmRCBth32Bit32Bit" #name
    2295 #define PGM_BTH_NAME_RC_PAE_REAL_STR(name)      "pgmRCBthPAEReal" #name
    2296 #define PGM_BTH_NAME_RC_PAE_PROT_STR(name)      "pgmRCBthPAEProt" #name
    2297 #define PGM_BTH_NAME_RC_PAE_32BIT_STR(name)     "pgmRCBthPAE32Bit" #name
    2298 #define PGM_BTH_NAME_RC_PAE_PAE_STR(name)       "pgmRCBthPAEPAE" #name
    2299 #define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name)   "pgmRCBthAMD64AMD64" #name
    2300 #define PGM_BTH_NAME_RC_NESTED_REAL_STR(name)   "pgmRCBthNestedReal" #name
    2301 #define PGM_BTH_NAME_RC_NESTED_PROT_STR(name)   "pgmRCBthNestedProt" #name
    2302 #define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name)  "pgmRCBthNested32Bit" #name
    2303 #define PGM_BTH_NAME_RC_NESTED_PAE_STR(name)    "pgmRCBthNestedPAE" #name
    2304 #define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name)  "pgmRCBthNestedAMD64" #name
    2305 #define PGM_BTH_NAME_RC_EPT_REAL_STR(name)      "pgmRCBthEPTReal" #name
    2306 #define PGM_BTH_NAME_RC_EPT_PROT_STR(name)      "pgmRCBthEPTProt" #name
    2307 #define PGM_BTH_NAME_RC_EPT_32BIT_STR(name)     "pgmRCBthEPT32Bit" #name
    2308 #define PGM_BTH_NAME_RC_EPT_PAE_STR(name)       "pgmRCBthEPTPAE" #name
    2309 #define PGM_BTH_NAME_RC_EPT_AMD64_STR(name)     "pgmRCBthEPTAMD64" #name
    2310 #define PGM_BTH_NAME_R0_32BIT_REAL_STR(name)    "pgmR0Bth32BitReal" #name
    2311 #define PGM_BTH_NAME_R0_32BIT_PROT_STR(name)    "pgmR0Bth32BitProt" #name
    2312 #define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name)   "pgmR0Bth32Bit32Bit" #name
    2313 #define PGM_BTH_NAME_R0_PAE_REAL_STR(name)      "pgmR0BthPAEReal" #name
    2314 #define PGM_BTH_NAME_R0_PAE_PROT_STR(name)      "pgmR0BthPAEProt" #name
    2315 #define PGM_BTH_NAME_R0_PAE_32BIT_STR(name)     "pgmR0BthPAE32Bit" #name
    2316 #define PGM_BTH_NAME_R0_PAE_PAE_STR(name)       "pgmR0BthPAEPAE" #name
    2317 #define PGM_BTH_NAME_R0_AMD64_PROT_STR(name)    "pgmR0BthAMD64Prot" #name
    2318 #define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)   "pgmR0BthAMD64AMD64" #name
    2319 #define PGM_BTH_NAME_R0_NESTED_REAL_STR(name)   "pgmR0BthNestedReal" #name
    2320 #define PGM_BTH_NAME_R0_NESTED_PROT_STR(name)   "pgmR0BthNestedProt" #name
    2321 #define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name)  "pgmR0BthNested32Bit" #name
    2322 #define PGM_BTH_NAME_R0_NESTED_PAE_STR(name)    "pgmR0BthNestedPAE" #name
    2323 #define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name)  "pgmR0BthNestedAMD64" #name
    2324 #define PGM_BTH_NAME_R0_EPT_REAL_STR(name)      "pgmR0BthEPTReal" #name
    2325 #define PGM_BTH_NAME_R0_EPT_PROT_STR(name)      "pgmR0BthEPTProt" #name
    2326 #define PGM_BTH_NAME_R0_EPT_32BIT_STR(name)     "pgmR0BthEPT32Bit" #name
    2327 #define PGM_BTH_NAME_R0_EPT_PAE_STR(name)       "pgmR0BthEPTPAE" #name
    2328 #define PGM_BTH_NAME_R0_EPT_AMD64_STR(name)     "pgmR0BthEPTAMD64" #name
    2329 
    2330 #define PGM_BTH_DECL(type, name)        PGM_CTX_DECL(type) PGM_BTH_NAME(name)
    2331 #define PGM_BTH_PFN(name, pVCpu)        ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
    2332 /** @} */
    2333 
    2334 /**
    2335  * Data for each paging mode.
    2336  */
    2337 typedef struct PGMMODEDATA
    2338 {
    2339     /** The guest mode type. */
    2340     uint32_t                        uGstType;
    2341     /** The shadow mode type. */
    2342     uint32_t                        uShwType;
    2343 
    2344     /** @name Function pointers for Shadow paging.
    2345      * @{
    2346      */
    2347     DECLR3CALLBACKMEMBER(int,       pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2348     DECLR3CALLBACKMEMBER(int,       pfnR3ShwExit,(PVMCPU pVCpu));
    2349     DECLR3CALLBACKMEMBER(int,       pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2350     DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2351 
    2352     DECLRCCALLBACKMEMBER(int,       pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2353     DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2354 
    2355     DECLR0CALLBACKMEMBER(int,       pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2356     DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2357     /** @} */
    2358 
    2359     /** @name Function pointers for Guest paging.
    2360      * @{
    2361      */
    2362     DECLR3CALLBACKMEMBER(int,       pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2363     DECLR3CALLBACKMEMBER(int,       pfnR3GstExit,(PVMCPU pVCpu));
    2364     DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2365     DECLR3CALLBACKMEMBER(int,       pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2366     DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2367     DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2368     DECLRCCALLBACKMEMBER(int,       pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2369     DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2370     DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2371     DECLR0CALLBACKMEMBER(int,       pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2372     DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2373     /** @} */
    2374 
    2375     /** @name Function pointers for Both Shadow and Guest paging.
    2376      * @{
    2377      */
    2378     DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2379     /*                           no pfnR3BthTrap0eHandler */
    2380     DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2381     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2382     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2383     DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2384     DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2385 #ifdef VBOX_STRICT
    2386     DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2387 #endif
    2388     DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2389     DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVMCPU pVCpu));
    2390 
    2391     DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2392     DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2393     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2394     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2395     DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2396     DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2397 #ifdef VBOX_STRICT
    2398     DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2399 #endif
    2400     DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2401     DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVMCPU pVCpu));
    2402 
    2403     DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    2404     DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2405     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2406     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2407     DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2408     DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2409 #ifdef VBOX_STRICT
    2410     DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2411 #endif
    2412     DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2413     DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVMCPU pVCpu));
    2414     /** @} */
    2415 } PGMMODEDATA, *PPGMMODEDATA;
    2416 
    2417 
    2418 
    2419 /**
    2420  * Converts a PGM pointer into a VM pointer.
    2421  * @returns Pointer to the VM structure the PGM is part of.
    2422  * @param   pPGM   Pointer to PGM instance data.
    2423  */
    2424 #define PGM2VM(pPGM)  ( (PVM)((char*)pPGM - pPGM->offVM) )
    2425 
    2426 /**
    2427  * PGM Data (part of VM)
    2428  */
    2429 typedef struct PGM
    2430 {
    2431     /** Offset to the VM structure. */
    2432     RTINT                           offVM;
    2433     /** Offset of the PGMCPU structure relative to VMCPU. */
    2434     RTINT                           offVCpuPGM;
    2435 
    2436     /** @cfgm{RamPreAlloc, boolean, false}
    2437      * Indicates whether the base RAM should all be allocated before starting
    2438      * the VM (default), or if it should be allocated when first written to.
    2439      */
    2440     bool                            fRamPreAlloc;
    2441     /** Indicates whether write monitoring is currently in use.
    2442      * This is used to prevent conflicts between live saving and page sharing
    2443      * detection. */
    2444     bool                            fPhysWriteMonitoringEngaged;
    2445     /** Alignment padding. */
    2446     bool                            afAlignment0[2];
    2447 
    2448     /*
    2449      * This will be redefined at least two more times before we're done, I'm sure.
    2450      * The current code is only to get on with the coding.
    2451      *   - 2004-06-10: initial version, bird.
    2452      *   - 2004-07-02: 1st time, bird.
    2453      *   - 2004-10-18: 2nd time, bird.
    2454      *   - 2005-07-xx: 3rd time, bird.
    2455      */
    2456 
    2457     /** The host paging mode. (This is what SUPLib reports.) */
    2458     SUPPAGINGMODE                   enmHostMode;
    2459 
    2460     /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
    2461     RCPTRTYPE(PX86PTE)              paDynPageMap32BitPTEsGC;
    2462     /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
    2463     RCPTRTYPE(PX86PTEPAE)           paDynPageMapPaePTEsGC;
    2464 
    2465     /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
    2466     RTGCPHYS                        GCPhys4MBPSEMask;
    2467 
    2468     /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
    2469      * This is sorted by physical address and contains no overlapping ranges. */
    2470     R3PTRTYPE(PPGMRAMRANGE)         pRamRangesR3;
    2471     /** R0 pointer corresponding to PGM::pRamRangesR3. */
    2472     R0PTRTYPE(PPGMRAMRANGE)         pRamRangesR0;
    2473     /** RC pointer corresponding to PGM::pRamRangesR3. */
    2474     RCPTRTYPE(PPGMRAMRANGE)         pRamRangesRC;
    2475     /** Generation ID for the RAM ranges. This member is incremented everytime a RAM
    2476      * range is linked or unlinked. */
    2477     uint32_t volatile               idRamRangesGen;
    2478 
    2479     /** Pointer to the list of ROM ranges - for R3.
    2480      * This is sorted by physical address and contains no overlapping ranges. */
    2481     R3PTRTYPE(PPGMROMRANGE)         pRomRangesR3;
    2482     /** R0 pointer corresponding to PGM::pRomRangesR3. */
    2483     R0PTRTYPE(PPGMROMRANGE)         pRomRangesR0;
    2484     /** RC pointer corresponding to PGM::pRomRangesR3. */
    2485     RCPTRTYPE(PPGMROMRANGE)         pRomRangesRC;
    2486 #if HC_ARCH_BITS == 64
    2487     /** Alignment padding. */
    2488     RTRCPTR                         GCPtrPadding2;
    2489 #endif
    2490 
    2491     /** Pointer to the list of MMIO2 ranges - for R3.
    2492      * Registration order. */
    2493     R3PTRTYPE(PPGMMMIO2RANGE)       pMmio2RangesR3;
    2494 
    2495     /** PGM offset based trees - R3 Ptr. */
    2496     R3PTRTYPE(PPGMTREES)            pTreesR3;
    2497     /** PGM offset based trees - R0 Ptr. */
    2498     R0PTRTYPE(PPGMTREES)            pTreesR0;
    2499     /** PGM offset based trees - RC Ptr. */
    2500     RCPTRTYPE(PPGMTREES)            pTreesRC;
    2501 
    2502     /** Linked list of GC mappings - for RC.
    2503      * The list is sorted ascending on address.
    2504      */
    2505     RCPTRTYPE(PPGMMAPPING)          pMappingsRC;
    2506     /** Linked list of GC mappings - for HC.
    2507      * The list is sorted ascending on address.
    2508      */
    2509     R3PTRTYPE(PPGMMAPPING)          pMappingsR3;
    2510     /** Linked list of GC mappings - for R0.
    2511      * The list is sorted ascending on address.
    2512      */
    2513     R0PTRTYPE(PPGMMAPPING)          pMappingsR0;
    2514 
    2515     /** Pointer to the 5 page CR3 content mapping.
    2516      * The first page is always the CR3 (in some form) while the 4 other pages
    2517      * are used of the PDs in PAE mode. */
    2518     RTGCPTR                         GCPtrCR3Mapping;
    2519 #if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
    2520     uint32_t                        u32Alignment1;
    2521 #endif
    2522 
    2523     /** Indicates that PGMR3FinalizeMappings has been called and that further
    2524      * PGMR3MapIntermediate calls will be rejected. */
    2525     bool                            fFinalizedMappings;
    2526     /** If set no conflict checks are required. */
    2527     bool                            fMappingsFixed;
    2528     /** If set if restored as fixed but we were unable to re-fixate at the old
    2529      *  location because of room or address incompatibilities. */
    2530     bool                            fMappingsFixedRestored;
    2531     /** If set, then no mappings are put into the shadow page table.
    2532      * Use pgmMapAreMappingsEnabled() instead of direct access. */
    2533     bool                            fMappingsDisabled;
    2534     /** Size of fixed mapping.
    2535      * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
    2536     uint32_t                        cbMappingFixed;
    2537     /** Base address (GC) of fixed mapping.
    2538      * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
    2539     RTGCPTR                         GCPtrMappingFixed;
    2540     /** The address of the previous RAM range mapping. */
    2541     RTGCPTR                         GCPtrPrevRamRangeMapping;
    2542 
    2543     /** @name Intermediate Context
    2544      * @{ */
    2545     /** Pointer to the intermediate page directory - Normal. */
    2546     R3PTRTYPE(PX86PD)               pInterPD;
    2547     /** Pointer to the intermedate page tables - Normal.
    2548      * There are two page tables, one for the identity mapping and one for
    2549      * the host context mapping (of the core code). */
    2550     R3PTRTYPE(PX86PT)               apInterPTs[2];
    2551     /** Pointer to the intermedate page tables - PAE. */
    2552     R3PTRTYPE(PX86PTPAE)            apInterPaePTs[2];
    2553     /** Pointer to the intermedate page directory - PAE. */
    2554     R3PTRTYPE(PX86PDPAE)            apInterPaePDs[4];
    2555     /** Pointer to the intermedate page directory - PAE. */
    2556     R3PTRTYPE(PX86PDPT)             pInterPaePDPT;
    2557     /** Pointer to the intermedate page-map level 4 - AMD64. */
    2558     R3PTRTYPE(PX86PML4)             pInterPaePML4;
    2559     /** Pointer to the intermedate page directory - AMD64. */
    2560     R3PTRTYPE(PX86PDPT)             pInterPaePDPT64;
    2561     /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
    2562     RTHCPHYS                        HCPhysInterPD;
    2563     /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
    2564     RTHCPHYS                        HCPhysInterPaePDPT;
    2565     /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
    2566     RTHCPHYS                        HCPhysInterPaePML4;
    2567     /** @} */
    2568 
    2569     /** Base address of the dynamic page mapping area.
    2570      * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
    2571      */
    2572     RCPTRTYPE(uint8_t *)            pbDynPageMapBaseGC;
    2573     /** The index of the last entry used in the dynamic page mapping area. */
    2574     RTUINT                          iDynPageMapLast;
    2575     /** Cache containing the last entries in the dynamic page mapping area.
    2576      * The cache size is covering half of the mapping area. */
    2577     RTHCPHYS                        aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
    2578     /** Keep a lock counter for the full (!) mapping area. */
    2579     uint32_t                        aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
    2580 
    2581     /** The address of the ring-0 mapping cache if we're making use of it.  */
    2582     RTR0PTR                         pvR0DynMapUsed;
    2583 #if HC_ARCH_BITS == 32
    2584     /** Alignment padding that makes the next member start on a 8 byte boundrary. */
    2585     uint32_t                        u32Alignment2;
    2586 #endif
    2587 
    2588     /** PGM critical section.
    2589      * This protects the physical & virtual access handlers, ram ranges,
    2590      * and the page flag updating (some of it anyway).
    2591      */
    2592     PDMCRITSECT                     CritSect;
    2593 
    2594     /** Pointer to SHW+GST mode data (function pointers).
    2595      * The index into this table is made up from */
    2596     R3PTRTYPE(PPGMMODEDATA)         paModeData;
    2597 
    2598     /** Shadow Page Pool - R3 Ptr. */
    2599     R3PTRTYPE(PPGMPOOL)             pPoolR3;
    2600     /** Shadow Page Pool - R0 Ptr. */
    2601     R0PTRTYPE(PPGMPOOL)             pPoolR0;
    2602     /** Shadow Page Pool - RC Ptr. */
    2603     RCPTRTYPE(PPGMPOOL)             pPoolRC;
    2604 
    2605     /** We're not in a state which permits writes to guest memory.
    2606      * (Only used in strict builds.) */
    2607     bool                            fNoMorePhysWrites;
    2608     /** Alignment padding that makes the next member start on a 8 byte boundrary. */
    2609     bool                            afAlignment3[HC_ARCH_BITS == 32 ? 7: 3];
    2610 
    2611     /**
    2612      * Data associated with managing the ring-3 mappings of the allocation chunks.
    2613      */
    2614     struct
    2615     {
    2616         /** The chunk tree, ordered by chunk id. */
    2617 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    2618         R3PTRTYPE(PAVLU32NODECORE)  pTree;
    2619 #else
    2620         R3R0PTRTYPE(PAVLU32NODECORE) pTree;
    2621 #endif
    2622         /** The chunk age tree, ordered by ageing sequence number. */
    2623         R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
    2624         /** The chunk mapping TLB. */
    2625         PGMCHUNKR3MAPTLB            Tlb;
    2626         /** The number of mapped chunks. */
    2627         uint32_t                    c;
    2628         /** The maximum number of mapped chunks.
    2629          * @cfgm    PGM/MaxRing3Chunks */
    2630         uint32_t                    cMax;
    2631         /** The current time. */
    2632         uint32_t                    iNow;
    2633         /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
    2634         uint32_t                    AgeingCountdown;
    2635     }                               ChunkR3Map;
    2636 
    2637     /**
    2638      * The page mapping TLB for ring-3 and (for the time being) ring-0.
    2639      */
    2640     PGMPAGER3MAPTLB                 PhysTlbHC;
    2641 
    2642     /** @name   The zero page.
    2643      * @{ */
    2644     /** The host physical address of the zero page. */
    2645     RTHCPHYS                        HCPhysZeroPg;
    2646     /** The ring-3 mapping of the zero page. */
    2647     RTR3PTR                         pvZeroPgR3;
    2648     /** The ring-0 mapping of the zero page. */
    2649     RTR0PTR                         pvZeroPgR0;
    2650     /** The GC mapping of the zero page. */
    2651     RTGCPTR                         pvZeroPgRC;
    2652 #if GC_ARCH_BITS != 32
    2653     uint32_t                        u32ZeroAlignment; /**< Alignment padding. */
    2654 #endif
    2655     /** @}*/
    2656 
    2657     /** The number of handy pages. */
    2658     uint32_t                        cHandyPages;
    2659     /**
    2660      * Array of handy pages.
    2661      *
    2662      * This array is used in a two way communication between pgmPhysAllocPage
    2663      * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
    2664      * an intermediary.
    2665      *
    2666      * The size of this array is important, see pgmPhysEnsureHandyPage for details.
    2667      * (The current size of 32 pages, means 128 KB of handy memory.)
    2668      */
    2669     GMMPAGEDESC                     aHandyPages[PGM_HANDY_PAGES];
    2670 
    2671     /**
    2672      * Live save data.
    2673      */
    2674     struct
    2675     {
    2676         /** Per type statistics. */
    2677         struct
    2678         {
    2679             /** The number of ready pages.  */
    2680             uint32_t                cReadyPages;
    2681             /** The number of dirty pages. */
    2682             uint32_t                cDirtyPages;
    2683             /** The number of ready zero pages.  */
    2684             uint32_t                cZeroPages;
    2685             /** The number of write monitored pages. */
    2686             uint32_t                cMonitoredPages;
    2687         }                           Rom,
    2688                                     Mmio2,
    2689                                     Ram;
    2690         /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
    2691         uint32_t                    cIgnoredPages;
    2692         /** Indicates that a live save operation is active.  */
    2693         bool                        fActive;
    2694         /** Padding. */
    2695         bool                        afReserved[2];
    2696         /** The next history index. */
    2697         uint8_t                     iDirtyPagesHistory;
    2698         /** History of the total amount of dirty pages. */
    2699         uint32_t                    acDirtyPagesHistory[64];
    2700         /** Short term dirty page average. */
    2701         uint32_t                    cDirtyPagesShort;
    2702         /** Long term dirty page average. */
    2703         uint32_t                    cDirtyPagesLong;
    2704         /** The number of saved pages.  This is used to get some kind of estimate of the
    2705          * link speed so we can decide when we're done.  It is reset after the first
    2706          * 7 passes so the speed estimate doesn't get inflated by the initial set of
    2707          * zero pages.   */
    2708         uint64_t                    cSavedPages;
    2709         /** The nanosecond timestamp when cSavedPages was 0. */
    2710         uint64_t                    uSaveStartNS;
    2711         /** Pages per second (for statistics). */
    2712         uint32_t                    cPagesPerSecond;
    2713         uint32_t                    cAlignment;
    2714     } LiveSave;
    2715 
    2716     /** @name   Error injection.
    2717      * @{ */
    2718     /** Inject handy page allocation errors pretending we're completely out of
    2719      * memory. */
    2720     bool volatile                   fErrInjHandyPages;
    2721     /** Padding. */
    2722     bool                            afReserved[3];
    2723     /** @} */
    2724 
    2725     /** @name Release Statistics
    2726      * @{ */
    2727     uint32_t                        cAllPages;          /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
    2728     uint32_t                        cPrivatePages;      /**< The number of private pages. */
    2729     uint32_t                        cSharedPages;       /**< The number of shared pages. */
    2730     uint32_t                        cZeroPages;         /**< The number of zero backed pages. */
    2731     uint32_t                        cPureMmioPages;     /**< The number of pure MMIO pages. */
    2732     uint32_t                        cMonitoredPages;    /**< The number of write monitored pages. */
    2733     uint32_t                        cWrittenToPages;    /**< The number of previously write monitored pages. */
    2734     uint32_t                        cWriteLockedPages;  /**< The number of write locked pages. */
    2735     uint32_t                        cReadLockedPages;   /**< The number of read locked pages. */
    2736 
    2737     /** The number of times we were forced to change the hypervisor region location. */
    2738     STAMCOUNTER                     cRelocations;
    2739     /** @} */
    2740 
    2741 #ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap.  */
    2742     /* R3 only: */
    2743     STAMCOUNTER StatR3DetectedConflicts;            /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
    2744     STAMPROFILE StatR3ResolveConflict;              /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
    2745 
    2746     STAMCOUNTER StatRZChunkR3MapTlbHits;            /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
    2747     STAMCOUNTER StatRZChunkR3MapTlbMisses;          /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
    2748     STAMCOUNTER StatRZPageMapTlbHits;               /**< RC/R0: Ring-3/0 page mapper TLB hits. */
    2749     STAMCOUNTER StatRZPageMapTlbMisses;             /**< RC/R0: Ring-3/0 page mapper TLB misses. */
    2750     STAMCOUNTER StatPageMapTlbFlushes;              /**< ALL: Ring-3/0 page mapper TLB flushes. */
    2751     STAMCOUNTER StatPageMapTlbFlushEntry;           /**< ALL: Ring-3/0 page mapper TLB flushes. */
    2752     STAMCOUNTER StatR3ChunkR3MapTlbHits;            /**< R3: Ring-3/0 chunk mapper TLB hits. */
    2753     STAMCOUNTER StatR3ChunkR3MapTlbMisses;          /**< R3: Ring-3/0 chunk mapper TLB misses. */
    2754     STAMCOUNTER StatR3PageMapTlbHits;               /**< R3: Ring-3/0 page mapper TLB hits. */
    2755     STAMCOUNTER StatR3PageMapTlbMisses;             /**< R3: Ring-3/0 page mapper TLB misses. */
    2756     STAMPROFILE StatRZSyncCR3HandlerVirtualReset;   /**< RC/R0: Profiling of the virtual handler resets. */
    2757     STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate;  /**< RC/R0: Profiling of the virtual handler updates. */
    2758     STAMPROFILE StatR3SyncCR3HandlerVirtualReset;   /**< R3: Profiling of the virtual handler resets. */
    2759     STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate;  /**< R3: Profiling of the virtual handler updates. */
    2760     STAMCOUNTER StatR3PhysHandlerReset;             /**< R3: The number of times PGMHandlerPhysicalReset is called. */
    2761     STAMCOUNTER StatRZPhysHandlerReset;             /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
    2762     STAMPROFILE StatRZVirtHandlerSearchByPhys;      /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
    2763     STAMPROFILE StatR3VirtHandlerSearchByPhys;      /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
    2764     STAMCOUNTER StatRZPageReplaceShared;            /**< RC/R0: Times a shared page has been replaced by a private one. */
    2765     STAMCOUNTER StatRZPageReplaceZero;              /**< RC/R0: Times the zero page has been replaced by a private one. */
    2766 /// @todo    STAMCOUNTER StatRZPageHandyAllocs;              /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
    2767     STAMCOUNTER StatR3PageReplaceShared;            /**< R3: Times a shared page has been replaced by a private one. */
    2768     STAMCOUNTER StatR3PageReplaceZero;              /**< R3: Times the zero page has been replaced by a private one. */
    2769 /// @todo    STAMCOUNTER StatR3PageHandyAllocs;              /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
    2770 
    2771     /* RC only: */
    2772     STAMCOUNTER StatRCDynMapCacheMisses;            /**< RC: The number of dynamic page mapping cache misses */
    2773     STAMCOUNTER StatRCDynMapCacheHits;              /**< RC: The number of dynamic page mapping cache hits */
    2774     STAMCOUNTER StatRCInvlPgConflict;               /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
    2775     STAMCOUNTER StatRCInvlPgSyncMonCR3;             /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
    2776 
    2777     STAMCOUNTER StatRZPhysRead;
    2778     STAMCOUNTER StatRZPhysReadBytes;
    2779     STAMCOUNTER StatRZPhysWrite;
    2780     STAMCOUNTER StatRZPhysWriteBytes;
    2781     STAMCOUNTER StatR3PhysRead;
    2782     STAMCOUNTER StatR3PhysReadBytes;
    2783     STAMCOUNTER StatR3PhysWrite;
    2784     STAMCOUNTER StatR3PhysWriteBytes;
    2785     STAMCOUNTER StatRCPhysRead;
    2786     STAMCOUNTER StatRCPhysReadBytes;
    2787     STAMCOUNTER StatRCPhysWrite;
    2788     STAMCOUNTER StatRCPhysWriteBytes;
    2789 
    2790     STAMCOUNTER StatRZPhysSimpleRead;
    2791     STAMCOUNTER StatRZPhysSimpleReadBytes;
    2792     STAMCOUNTER StatRZPhysSimpleWrite;
    2793     STAMCOUNTER StatRZPhysSimpleWriteBytes;
    2794     STAMCOUNTER StatR3PhysSimpleRead;
    2795     STAMCOUNTER StatR3PhysSimpleReadBytes;
    2796     STAMCOUNTER StatR3PhysSimpleWrite;
    2797     STAMCOUNTER StatR3PhysSimpleWriteBytes;
    2798     STAMCOUNTER StatRCPhysSimpleRead;
    2799     STAMCOUNTER StatRCPhysSimpleReadBytes;
    2800     STAMCOUNTER StatRCPhysSimpleWrite;
    2801     STAMCOUNTER StatRCPhysSimpleWriteBytes;
    2802 
    2803     STAMCOUNTER StatTrackVirgin;                    /**< The number of first time shadowings. */
    2804     STAMCOUNTER StatTrackAliased;                   /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
    2805     STAMCOUNTER StatTrackAliasedMany;               /**< The number of times we're tracking using cRef2. */
    2806     STAMCOUNTER StatTrackAliasedLots;               /**< The number of times we're hitting pages which has overflowed cRef2. */
    2807     STAMCOUNTER StatTrackOverflows;                 /**< The number of times the extent list grows to long. */
    2808     STAMPROFILE StatTrackDeref;                     /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
    2809 #endif
    2810 } PGM;
    2811 #ifndef IN_TSTVMSTRUCTGC /* HACK */
    2812 AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);
    2813 AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
    2814 AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
    2815 AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
    2816 AssertCompileMemberAlignment(PGM, CritSect, 8);
    2817 AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
    2818 AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
    2819 AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
    2820 AssertCompileMemberAlignment(PGM, aHandyPages, 8);
    2821 AssertCompileMemberAlignment(PGM, cRelocations, 8);
    2822 #endif /* !IN_TSTVMSTRUCTGC */
    2823 /** Pointer to the PGM instance data. */
    2824 typedef PGM *PPGM;
    2825 
    2826 
    2827 /**
    2828  * Converts a PGMCPU pointer into a VM pointer.
    2829  * @returns Pointer to the VM structure the PGM is part of.
    2830  * @param   pPGM   Pointer to PGMCPU instance data.
    2831  */
    2832 #define PGMCPU2VM(pPGM)  ( (PVM)((char*)pPGM - pPGM->offVM) )
    2833 
    2834 /**
    2835  * Converts a PGMCPU pointer into a PGM pointer.
    2836  * @returns Pointer to the VM structure the PGM is part of.
    2837  * @param   pPGM   Pointer to PGMCPU instance data.
    2838  */
    2839 #define PGMCPU2PGM(pPGMCpu)  ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
    2840 
    2841 /**
    2842  * PGMCPU Data (part of VMCPU).
    2843  */
    2844 typedef struct PGMCPU
    2845 {
    2846     /** Offset to the VM structure. */
    2847     RTINT                           offVM;
    2848     /** Offset to the VMCPU structure. */
    2849     RTINT                           offVCpu;
    2850     /** Offset of the PGM structure relative to VMCPU. */
    2851     RTINT                           offPGM;
    2852     RTINT                           uPadding0;      /**< structure size alignment. */
    2853 
    2854 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
    2855     /** Automatically tracked physical memory mapping set.
    2856      * Ring-0 and strict raw-mode builds. */
    2857     PGMMAPSET                       AutoSet;
    2858 #endif
    2859 
    2860     /** A20 gate mask.
    2861      * Our current approach to A20 emulation is to let REM do it and don't bother
    2862      * anywhere else. The interesting Guests will be operating with it enabled anyway.
    2863      * But whould need arrise, we'll subject physical addresses to this mask. */
    2864     RTGCPHYS                        GCPhysA20Mask;
    2865     /** A20 gate state - boolean! */
    2866     bool                            fA20Enabled;
    2867 
    2868     /** What needs syncing (PGM_SYNC_*).
    2869      * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
    2870      * PGMFlushTLB, and PGMR3Load. */
    2871     RTUINT                          fSyncFlags;
    2872 
    2873     /** The shadow paging mode. */
    2874     PGMMODE                         enmShadowMode;
    2875     /** The guest paging mode. */
    2876     PGMMODE                         enmGuestMode;
    2877 
    2878     /** The current physical address representing in the guest CR3 register. */
    2879     RTGCPHYS                        GCPhysCR3;
    2880 
    2881     /** @name 32-bit Guest Paging.
    2882      * @{ */
    2883     /** The guest's page directory, R3 pointer. */
    2884     R3PTRTYPE(PX86PD)               pGst32BitPdR3;
    2885 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2886     /** The guest's page directory, R0 pointer. */
    2887     R0PTRTYPE(PX86PD)               pGst32BitPdR0;
    2888 #endif
    2889     /** The guest's page directory, static RC mapping. */
    2890     RCPTRTYPE(PX86PD)               pGst32BitPdRC;
    2891     /** @} */
    2892 
    2893     /** @name PAE Guest Paging.
    2894      * @{ */
    2895     /** The guest's page directory pointer table, static RC mapping. */
    2896     RCPTRTYPE(PX86PDPT)             pGstPaePdptRC;
    2897     /** The guest's page directory pointer table, R3 pointer. */
    2898     R3PTRTYPE(PX86PDPT)             pGstPaePdptR3;
    2899 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2900     /** The guest's page directory pointer table, R0 pointer. */
    2901     R0PTRTYPE(PX86PDPT)             pGstPaePdptR0;
    2902 #endif
    2903 
    2904     /** The guest's page directories, R3 pointers.
    2905      * These are individual pointers and don't have to be adjecent.
    2906      * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
    2907     R3PTRTYPE(PX86PDPAE)            apGstPaePDsR3[4];
    2908     /** The guest's page directories, R0 pointers.
    2909      * Same restrictions as apGstPaePDsR3. */
    2910 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2911     R0PTRTYPE(PX86PDPAE)            apGstPaePDsR0[4];
    2912 #endif
    2913     /** The guest's page directories, static GC mapping.
    2914      * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
    2915      * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
    2916     RCPTRTYPE(PX86PDPAE)            apGstPaePDsRC[4];
    2917     /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
    2918     RTGCPHYS                        aGCPhysGstPaePDs[4];
    2919     /** The physical addresses of the monitored guest page directories (PAE). */
    2920     RTGCPHYS                        aGCPhysGstPaePDsMonitored[4];
    2921     /** @} */
    2922 
    2923     /** @name AMD64 Guest Paging.
    2924      * @{ */
    2925     /** The guest's page directory pointer table, R3 pointer. */
    2926     R3PTRTYPE(PX86PML4)             pGstAmd64Pml4R3;
    2927 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2928     /** The guest's page directory pointer table, R0 pointer. */
    2929     R0PTRTYPE(PX86PML4)             pGstAmd64Pml4R0;
    2930 #else
    2931     RTR0PTR                         alignment6b; /**< alignment equalizer. */
    2932 #endif
    2933     /** @} */
    2934 
    2935     /** Pointer to the page of the current active CR3 - R3 Ptr. */
    2936     R3PTRTYPE(PPGMPOOLPAGE)         pShwPageCR3R3;
    2937     /** Pointer to the page of the current active CR3 - R0 Ptr. */
    2938     R0PTRTYPE(PPGMPOOLPAGE)         pShwPageCR3R0;
    2939     /** Pointer to the page of the current active CR3 - RC Ptr. */
    2940     RCPTRTYPE(PPGMPOOLPAGE)         pShwPageCR3RC;
    2941     /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
    2942     uint32_t                        iShwUser;
    2943     /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
    2944     uint32_t                        iShwUserTable;
    2945 # if HC_ARCH_BITS == 64
    2946     RTRCPTR                         alignment6; /**< structure size alignment. */
    2947 # endif
    2948     /** @} */
    2949 
    2950     /** @name Function pointers for Shadow paging.
    2951      * @{
    2952      */
    2953     DECLR3CALLBACKMEMBER(int,       pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2954     DECLR3CALLBACKMEMBER(int,       pfnR3ShwExit,(PVMCPU pVCpu));
    2955     DECLR3CALLBACKMEMBER(int,       pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2956     DECLR3CALLBACKMEMBER(int,       pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2957 
    2958     DECLRCCALLBACKMEMBER(int,       pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2959     DECLRCCALLBACKMEMBER(int,       pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2960 
    2961     DECLR0CALLBACKMEMBER(int,       pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
    2962     DECLR0CALLBACKMEMBER(int,       pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2963 
    2964     /** @} */
    2965 
    2966     /** @name Function pointers for Guest paging.
    2967      * @{
    2968      */
    2969     DECLR3CALLBACKMEMBER(int,       pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2970     DECLR3CALLBACKMEMBER(int,       pfnR3GstExit,(PVMCPU pVCpu));
    2971     DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2972     DECLR3CALLBACKMEMBER(int,       pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2973     DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2974     DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2975     DECLRCCALLBACKMEMBER(int,       pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2976     DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2977 #if HC_ARCH_BITS == 64
    2978     RTRCPTR                         alignment3; /**< structure size alignment. */
    2979 #endif
    2980 
    2981     DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    2982     DECLR0CALLBACKMEMBER(int,       pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    2983     DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2984     /** @} */
    2985 
    2986     /** @name Function pointers for Both Shadow and Guest paging.
    2987      * @{
    2988      */
    2989     DECLR3CALLBACKMEMBER(int,       pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
    2990     /*                           no pfnR3BthTrap0eHandler */
    2991     DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2992     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2993     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    2994     DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    2995     DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    2996     DECLR3CALLBACKMEMBER(unsigned,  pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    2997     DECLR3CALLBACKMEMBER(int,       pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    2998     DECLR3CALLBACKMEMBER(int,       pfnR3BthUnmapCR3,(PVMCPU pVCpu));
    2999 
    3000     DECLR0CALLBACKMEMBER(int,       pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    3001     DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    3002     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    3003     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    3004     DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    3005     DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    3006     DECLR0CALLBACKMEMBER(unsigned,  pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    3007     DECLR0CALLBACKMEMBER(int,       pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    3008     DECLR0CALLBACKMEMBER(int,       pfnR0BthUnmapCR3,(PVMCPU pVCpu));
    3009 
    3010     DECLRCCALLBACKMEMBER(int,       pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
    3011     DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    3012     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    3013     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    3014     DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    3015     DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
    3016     DECLRCCALLBACKMEMBER(unsigned,  pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
    3017     DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    3018     DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVMCPU pVCpu));
    3019     RTRCPTR                         alignment2; /**< structure size alignment. */
    3020     /** @} */
    3021 
    3022     /** For saving stack space, the disassembler state is allocated here instead of
    3023      * on the stack.
    3024      * @note The DISCPUSTATE structure is not R3/R0/RZ clean!  */
    3025     union
    3026     {
    3027         /** The disassembler scratch space. */
    3028         DISCPUSTATE                 DisState;
    3029         /** Padding. */
    3030         uint8_t                     abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
    3031     };
    3032 
    3033     /* Count the number of pgm pool access handler calls. */
    3034     uint64_t                        cPoolAccessHandler;
    3035 
    3036     /** @name Release Statistics
    3037      * @{ */
    3038     /** The number of times the guest has switched mode since last reset or statistics reset. */
    3039     STAMCOUNTER                     cGuestModeChanges;
    3040     /** @} */
    3041 
    3042 #ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap.  */
    3043     /** @name Statistics
    3044      * @{ */
    3045     /** RC: Which statistic this \#PF should be attributed to. */
    3046     RCPTRTYPE(PSTAMPROFILE)         pStatTrap0eAttributionRC;
    3047     RTRCPTR                         padding0;
    3048     /** R0: Which statistic this \#PF should be attributed to. */
    3049     R0PTRTYPE(PSTAMPROFILE)         pStatTrap0eAttributionR0;
    3050     RTR0PTR                         padding1;
    3051 
    3052     /* Common */
    3053     STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES];       /**< SyncPT - PD distribution. */
    3054     STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES];     /**< SyncPage - PD distribution. */
    3055 
    3056     /* R0 only: */
    3057     STAMCOUNTER StatR0DynMapMigrateInvlPg;          /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
    3058     STAMPROFILE StatR0DynMapGCPageInl;              /**< R0: Calls to pgmR0DynMapGCPageInlined. */
    3059     STAMCOUNTER StatR0DynMapGCPageInlHits;          /**< R0: Hash table lookup hits. */
    3060     STAMCOUNTER StatR0DynMapGCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
    3061     STAMCOUNTER StatR0DynMapGCPageInlRamHits;       /**< R0: 1st ram range hits. */
    3062     STAMCOUNTER StatR0DynMapGCPageInlRamMisses;     /**< R0: 1st ram range misses, takes slow path. */
    3063     STAMPROFILE StatR0DynMapHCPageInl;              /**< R0: Calls to pgmR0DynMapHCPageInlined. */
    3064     STAMCOUNTER StatR0DynMapHCPageInlHits;          /**< R0: Hash table lookup hits. */
    3065     STAMCOUNTER StatR0DynMapHCPageInlMisses;        /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
    3066     STAMPROFILE StatR0DynMapHCPage;                 /**< R0: Calls to PGMDynMapHCPage. */
    3067     STAMCOUNTER StatR0DynMapSetOptimize;            /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
    3068     STAMCOUNTER StatR0DynMapSetSearchFlushes;       /**< R0: Set search restorting to subset flushes. */
    3069     STAMCOUNTER StatR0DynMapSetSearchHits;          /**< R0: Set search hits. */
    3070     STAMCOUNTER StatR0DynMapSetSearchMisses;        /**< R0: Set search misses. */
    3071     STAMCOUNTER StatR0DynMapPage;                   /**< R0: Calls to pgmR0DynMapPage. */
    3072     STAMCOUNTER StatR0DynMapPageHits0;              /**< R0: Hits at iPage+0. */
    3073     STAMCOUNTER StatR0DynMapPageHits1;              /**< R0: Hits at iPage+1. */
    3074     STAMCOUNTER StatR0DynMapPageHits2;              /**< R0: Hits at iPage+2. */
    3075     STAMCOUNTER StatR0DynMapPageInvlPg;             /**< R0: invlpg. */
    3076     STAMCOUNTER StatR0DynMapPageSlow;               /**< R0: Calls to pgmR0DynMapPageSlow. */
    3077     STAMCOUNTER StatR0DynMapPageSlowLoopHits;       /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
    3078     STAMCOUNTER StatR0DynMapPageSlowLoopMisses;     /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
    3079     //STAMCOUNTER StatR0DynMapPageSlowLostHits;       /**< R0: Lost hits. */
    3080     STAMCOUNTER StatR0DynMapSubsets;                /**< R0: Times PGMDynMapPushAutoSubset was called. */
    3081     STAMCOUNTER StatR0DynMapPopFlushes;             /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
    3082     STAMCOUNTER aStatR0DynMapSetSize[11];           /**< R0: Set size distribution. */
    3083 
    3084     /* RZ only: */
    3085     STAMPROFILE StatRZTrap0e;                       /**< RC/R0: PGMTrap0eHandler() profiling. */
    3086     STAMPROFILE StatRZTrap0eTimeCheckPageFault;
    3087     STAMPROFILE StatRZTrap0eTimeSyncPT;
    3088     STAMPROFILE StatRZTrap0eTimeMapping;
    3089     STAMPROFILE StatRZTrap0eTimeOutOfSync;
    3090     STAMPROFILE StatRZTrap0eTimeHandlers;
    3091     STAMPROFILE StatRZTrap0eTime2CSAM;              /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
    3092     STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed;  /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
    3093     STAMPROFILE StatRZTrap0eTime2GuestTrap;         /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
    3094     STAMPROFILE StatRZTrap0eTime2HndPhys;           /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
    3095     STAMPROFILE StatRZTrap0eTime2HndVirt;           /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
    3096     STAMPROFILE StatRZTrap0eTime2HndUnhandled;      /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
    3097     STAMPROFILE StatRZTrap0eTime2Misc;              /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
    3098     STAMPROFILE StatRZTrap0eTime2OutOfSync;         /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
    3099     STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys;  /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
    3100     STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt;  /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
    3101     STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs;   /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
    3102     STAMPROFILE StatRZTrap0eTime2SyncPT;            /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
    3103     STAMCOUNTER StatRZTrap0eConflicts;              /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
    3104     STAMCOUNTER StatRZTrap0eHandlersMapping;        /**< RC/R0: Number of traps due to access handlers in mappings. */
    3105     STAMCOUNTER StatRZTrap0eHandlersOutOfSync;      /**< RC/R0: Number of out-of-sync handled pages. */
    3106     STAMCOUNTER StatRZTrap0eHandlersPhysical;       /**< RC/R0: Number of traps due to physical access handlers. */
    3107     STAMCOUNTER StatRZTrap0eHandlersVirtual;        /**< RC/R0: Number of traps due to virtual access handlers. */
    3108     STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys;  /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
    3109     STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
    3110     STAMCOUNTER StatRZTrap0eHandlersUnhandled;      /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
    3111     STAMCOUNTER StatRZTrap0eHandlersInvalid;        /**< RC/R0: Number of traps due to access to invalid physical memory. */
    3112     STAMCOUNTER StatRZTrap0eUSNotPresentRead;       /**< RC/R0: \#PF err kind */
    3113     STAMCOUNTER StatRZTrap0eUSNotPresentWrite;      /**< RC/R0: \#PF err kind */
    3114     STAMCOUNTER StatRZTrap0eUSWrite;                /**< RC/R0: \#PF err kind */
    3115     STAMCOUNTER StatRZTrap0eUSReserved;             /**< RC/R0: \#PF err kind */
    3116     STAMCOUNTER StatRZTrap0eUSNXE;                  /**< RC/R0: \#PF err kind */
    3117     STAMCOUNTER StatRZTrap0eUSRead;                 /**< RC/R0: \#PF err kind */
    3118     STAMCOUNTER StatRZTrap0eSVNotPresentRead;       /**< RC/R0: \#PF err kind */
    3119     STAMCOUNTER StatRZTrap0eSVNotPresentWrite;      /**< RC/R0: \#PF err kind */
    3120     STAMCOUNTER StatRZTrap0eSVWrite;                /**< RC/R0: \#PF err kind */
    3121     STAMCOUNTER StatRZTrap0eSVReserved;             /**< RC/R0: \#PF err kind */
    3122     STAMCOUNTER StatRZTrap0eSNXE;                   /**< RC/R0: \#PF err kind */
    3123     STAMCOUNTER StatRZTrap0eGuestPF;                /**< RC/R0: Real guest \#PFs. */
    3124     STAMCOUNTER StatRZTrap0eGuestPFUnh;             /**< RC/R0: Real guest \#PF ending up at the end of the \#PF code. */
    3125     STAMCOUNTER StatRZTrap0eGuestPFMapping;         /**< RC/R0: Real guest \#PF to HMA or other mapping. */
    3126     STAMCOUNTER StatRZTrap0eWPEmulInRZ;             /**< RC/R0: WP=0 virtualization trap, handled. */
    3127     STAMCOUNTER StatRZTrap0eWPEmulToR3;             /**< RC/R0: WP=0 virtualization trap, chickened out. */
    3128     STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES];     /**< RC/R0: PD distribution of the \#PFs. */
    3129     STAMCOUNTER StatRZGuestCR3WriteHandled;         /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
    3130     STAMCOUNTER StatRZGuestCR3WriteUnhandled;       /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
    3131     STAMCOUNTER StatRZGuestCR3WriteConflict;        /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
    3132     STAMCOUNTER StatRZGuestROMWriteHandled;         /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
    3133     STAMCOUNTER StatRZGuestROMWriteUnhandled;       /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
    3134 
    3135     /* HC - R3 and (maybe) R0: */
    3136 
    3137     /* RZ & R3: */
    3138     STAMPROFILE StatRZSyncCR3;                      /**< RC/R0: PGMSyncCR3() profiling. */
    3139     STAMPROFILE StatRZSyncCR3Handlers;              /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
    3140     STAMCOUNTER StatRZSyncCR3Global;                /**< RC/R0: The number of global CR3 syncs. */
    3141     STAMCOUNTER StatRZSyncCR3NotGlobal;             /**< RC/R0: The number of non-global CR3 syncs. */
    3142     STAMCOUNTER StatRZSyncCR3DstCacheHit;           /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
    3143     STAMCOUNTER StatRZSyncCR3DstFreed;              /**< RC/R0: The number of times we've had to free a shadow entry. */
    3144     STAMCOUNTER StatRZSyncCR3DstFreedSrcNP;         /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
    3145     STAMCOUNTER StatRZSyncCR3DstNotPresent;         /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
    3146     STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD;    /**< RC/R0: The number of times a global page directory wasn't flushed. */
    3147     STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT;    /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
    3148     STAMPROFILE StatRZSyncPT;                       /**< RC/R0: PGMSyncPT() profiling. */
    3149     STAMCOUNTER StatRZSyncPTFailed;                 /**< RC/R0: The number of times PGMSyncPT() failed. */
    3150     STAMCOUNTER StatRZSyncPT4K;                     /**< RC/R0: Number of 4KB syncs. */
    3151     STAMCOUNTER StatRZSyncPT4M;                     /**< RC/R0: Number of 4MB syncs. */
    3152     STAMCOUNTER StatRZSyncPagePDNAs;                /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
    3153     STAMCOUNTER StatRZSyncPagePDOutOfSync;          /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
    3154     STAMCOUNTER StatRZAccessedPage;                 /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
    3155     STAMPROFILE StatRZDirtyBitTracking;             /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
    3156     STAMCOUNTER StatRZDirtyPage;                    /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
    3157     STAMCOUNTER StatRZDirtyPageBig;                 /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
    3158     STAMCOUNTER StatRZDirtyPageSkipped;             /**< RC/R0: The number of pages already dirty or readonly. */
    3159     STAMCOUNTER StatRZDirtyPageTrap;                /**< RC/R0: The number of traps generated for dirty bit tracking. */
    3160     STAMCOUNTER StatRZDirtyPageStale;               /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
    3161     STAMCOUNTER StatRZDirtyTrackRealPF;             /**< RC/R0: The number of real pages faults during dirty bit tracking. */
    3162     STAMCOUNTER StatRZDirtiedPage;                  /**< RC/R0: The number of pages marked dirty because of write accesses. */
    3163     STAMCOUNTER StatRZPageAlreadyDirty;             /**< RC/R0: The number of pages already marked dirty because of write accesses. */
    3164     STAMPROFILE StatRZInvalidatePage;               /**< RC/R0: PGMInvalidatePage() profiling. */
    3165     STAMCOUNTER StatRZInvalidatePage4KBPages;       /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
    3166     STAMCOUNTER StatRZInvalidatePage4MBPages;       /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
    3167     STAMCOUNTER StatRZInvalidatePage4MBPagesSkip;   /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
    3168     STAMCOUNTER StatRZInvalidatePagePDMappings;     /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
    3169     STAMCOUNTER StatRZInvalidatePagePDNAs;          /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
    3170     STAMCOUNTER StatRZInvalidatePagePDNPs;          /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
    3171     STAMCOUNTER StatRZInvalidatePagePDOutOfSync;    /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
    3172     STAMCOUNTER StatRZInvalidatePageSkipped;        /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
    3173     STAMCOUNTER StatRZPageOutOfSyncUser;            /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
    3174     STAMCOUNTER StatRZPageOutOfSyncSupervisor;      /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
    3175     STAMCOUNTER StatRZPageOutOfSyncUserWrite;       /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
    3176     STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
    3177     STAMPROFILE StatRZPrefetch;                     /**< RC/R0: PGMPrefetchPage. */
    3178     STAMPROFILE StatRZFlushTLB;                     /**< RC/R0: Profiling of the PGMFlushTLB() body. */
    3179     STAMCOUNTER StatRZFlushTLBNewCR3;               /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
    3180     STAMCOUNTER StatRZFlushTLBNewCR3Global;         /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
    3181     STAMCOUNTER StatRZFlushTLBSameCR3;              /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
    3182     STAMCOUNTER StatRZFlushTLBSameCR3Global;        /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
    3183     STAMPROFILE StatRZGstModifyPage;                /**< RC/R0: Profiling of the PGMGstModifyPage() body */
    3184 
    3185     STAMPROFILE StatR3SyncCR3;                      /**< R3: PGMSyncCR3() profiling. */
    3186     STAMPROFILE StatR3SyncCR3Handlers;              /**< R3: Profiling of the PGMSyncCR3() update handler section. */
    3187     STAMCOUNTER StatR3SyncCR3Global;                /**< R3: The number of global CR3 syncs. */
    3188     STAMCOUNTER StatR3SyncCR3NotGlobal;             /**< R3: The number of non-global CR3 syncs. */
    3189     STAMCOUNTER StatR3SyncCR3DstFreed;              /**< R3: The number of times we've had to free a shadow entry. */
    3190     STAMCOUNTER StatR3SyncCR3DstFreedSrcNP;         /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
    3191     STAMCOUNTER StatR3SyncCR3DstNotPresent;         /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
    3192     STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD;    /**< R3: The number of times a global page directory wasn't flushed. */
    3193     STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT;    /**< R3: The number of times a page table with only global entries wasn't flushed. */
    3194     STAMCOUNTER StatR3SyncCR3DstCacheHit;           /**< R3: The number of times we got some kind of cache hit on a page table. */
    3195     STAMPROFILE StatR3SyncPT;                       /**< R3: PGMSyncPT() profiling. */
    3196     STAMCOUNTER StatR3SyncPTFailed;                 /**< R3: The number of times PGMSyncPT() failed. */
    3197     STAMCOUNTER StatR3SyncPT4K;                     /**< R3: Number of 4KB syncs. */
    3198     STAMCOUNTER StatR3SyncPT4M;                     /**< R3: Number of 4MB syncs. */
    3199     STAMCOUNTER StatR3SyncPagePDNAs;                /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
    3200     STAMCOUNTER StatR3SyncPagePDOutOfSync;          /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
    3201     STAMCOUNTER StatR3AccessedPage;                 /**< R3: The number of pages marked not present for accessed bit emulation. */
    3202     STAMPROFILE StatR3DirtyBitTracking;             /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
    3203     STAMCOUNTER StatR3DirtyPage;                    /**< R3: The number of pages marked read-only for dirty bit tracking. */
    3204     STAMCOUNTER StatR3DirtyPageBig;                 /**< R3: The number of pages marked read-only for dirty bit tracking. */
    3205     STAMCOUNTER StatR3DirtyPageSkipped;             /**< R3: The number of pages already dirty or readonly. */
    3206     STAMCOUNTER StatR3DirtyPageTrap;                /**< R3: The number of traps generated for dirty bit tracking. */
    3207     STAMCOUNTER StatR3DirtyTrackRealPF;             /**< R3: The number of real pages faults during dirty bit tracking. */
    3208     STAMCOUNTER StatR3DirtiedPage;                  /**< R3: The number of pages marked dirty because of write accesses. */
    3209     STAMCOUNTER StatR3PageAlreadyDirty;             /**< R3: The number of pages already marked dirty because of write accesses. */
    3210     STAMPROFILE StatR3InvalidatePage;               /**< R3: PGMInvalidatePage() profiling. */
    3211     STAMCOUNTER StatR3InvalidatePage4KBPages;       /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
    3212     STAMCOUNTER StatR3InvalidatePage4MBPages;       /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
    3213     STAMCOUNTER StatR3InvalidatePage4MBPagesSkip;   /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
    3214     STAMCOUNTER StatR3InvalidatePagePDNAs;          /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
    3215     STAMCOUNTER StatR3InvalidatePagePDNPs;          /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
    3216     STAMCOUNTER StatR3InvalidatePagePDMappings;     /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
    3217     STAMCOUNTER StatR3InvalidatePagePDOutOfSync;    /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
    3218     STAMCOUNTER StatR3InvalidatePageSkipped;        /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
    3219     STAMCOUNTER StatR3PageOutOfSyncUser;            /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
    3220     STAMCOUNTER StatR3PageOutOfSyncSupervisor;      /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
    3221     STAMCOUNTER StatR3PageOutOfSyncUserWrite;       /**< R3: The number of times user page is out of sync was detected in \#PF. */
    3222     STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
    3223     STAMPROFILE StatR3Prefetch;                     /**< R3: PGMPrefetchPage. */
    3224     STAMPROFILE StatR3FlushTLB;                     /**< R3: Profiling of the PGMFlushTLB() body. */
    3225     STAMCOUNTER StatR3FlushTLBNewCR3;               /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
    3226     STAMCOUNTER StatR3FlushTLBNewCR3Global;         /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
    3227     STAMCOUNTER StatR3FlushTLBSameCR3;              /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
    3228     STAMCOUNTER StatR3FlushTLBSameCR3Global;        /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
    3229     STAMPROFILE StatR3GstModifyPage;                /**< R3: Profiling of the PGMGstModifyPage() body */
    3230     /** @} */
    3231 #endif /* VBOX_WITH_STATISTICS */
    3232 } PGMCPU;
    3233 /** Pointer to the per-cpu PGM data. */
    3234 typedef PGMCPU *PPGMCPU;
    3235 
    3236 
    3237 /** @name PGM::fSyncFlags Flags
    3238  * @{
    3239  */
    3240 /** Updates the virtual access handler state bit in PGMPAGE. */
    3241 #define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL        RT_BIT(0)
    3242 /** Always sync CR3. */
    3243 #define PGM_SYNC_ALWAYS                         RT_BIT(1)
    3244 /** Check monitoring on next CR3 (re)load and invalidate page.
    3245  * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
    3246 #define PGM_SYNC_MONITOR_CR3                    RT_BIT(2)
    3247 /** Check guest mapping in SyncCR3. */
    3248 #define PGM_SYNC_MAP_CR3                        RT_BIT(3)
    3249 /** Clear the page pool (a light weight flush). */
    3250 #define PGM_SYNC_CLEAR_PGM_POOL_BIT             8
    3251 #define PGM_SYNC_CLEAR_PGM_POOL                 RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
    3252 /** @} */
    3253 
    3254 
    3255 RT_C_DECLS_BEGIN
    3256 
    3257 int             pgmLock(PVM pVM);
    3258 void            pgmUnlock(PVM pVM);
    3259 
    3260 int             pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
    3261 int             pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
    3262 int             pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
    3263 PPGMMAPPING     pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
    3264 int             pgmMapResolveConflicts(PVM pVM);
    3265 DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    3266 
    3267 void            pgmR3HandlerPhysicalUpdateAll(PVM pVM);
    3268 bool            pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
    3269 void            pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
    3270 int             pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
    3271 DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
    3272 #if defined(VBOX_STRICT) || defined(LOG_ENABLED)
    3273 void            pgmHandlerVirtualDumpPhysPages(PVM pVM);
    3274 #else
    3275 # define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
    3276 #endif
    3277 DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
    3278 int             pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
    3279 
    3280 int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    3281 int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    3282 int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    3283 void            pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage);
    3284 int             pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    3285 int             pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    3286 int             pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
    3287 int             pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
    3288 int             pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
    3289 int             pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
    3290 int             pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
    3291 int             pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
    3292 VMMDECL(int)    pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    3293 #ifdef IN_RING3
    3294 void            pgmR3PhysRelinkRamRanges(PVM pVM);
    3295 int             pgmR3PhysRamPreAllocate(PVM pVM);
    3296 int             pgmR3PhysRamReset(PVM pVM);
    3297 int             pgmR3PhysRomReset(PVM pVM);
    3298 int             pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
    3299 
    3300 int             pgmR3PoolInit(PVM pVM);
    3301 void            pgmR3PoolRelocate(PVM pVM);
    3302 void            pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
    3303 void            pgmR3PoolReset(PVM pVM);
    3304 void            pgmR3PoolClearAll(PVM pVM);
    3305 
    3306 #endif /* IN_RING3 */
    3307 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3308 int             pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
    3309 #endif
    3310 int             pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
    3311 
    3312 DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
    3313 {
    3314     return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
    3315 }
    3316 
    3317 void            pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
    3318 void            pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
    3319 int             pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
    3320 void            pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
    3321 PPGMPOOLPAGE    pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
    3322 int             pgmPoolSyncCR3(PVMCPU pVCpu);
    3323 bool            pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys);
    3324 int             pgmPoolTrackUpdateGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
    3325 void            pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT);
    3326 DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
    3327 {
    3328     return pgmPoolTrackUpdateGCPhys(pVM, pPhysPage, true /* flush PTEs */, pfFlushTLBs);
    3329 }
    3330 
    3331 uint16_t        pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
    3332 void            pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
    3333 void            pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint);
    3334 void            pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
    3335 int             pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
    3336 void            pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
    3337 
    3338 void            pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
    3339 void            pgmPoolResetDirtyPages(PVM pVM);
    3340 
    3341 int             pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
    3342 int             pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
    3343 
    3344 void            pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
    3345 void            pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
    3346 int             pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    3347 int             pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    3348 
    3349 int             pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    3350 #ifndef IN_RC
    3351 int             pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    3352 #endif
    3353 int             pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
    3354 
    3355 PX86PD          pgmGstLazyMap32BitPD(PPGMCPU pPGM);
    3356 PX86PDPT        pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
    3357 PX86PDPAE       pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
    3358 PX86PML4        pgmGstLazyMapPml4(PPGMCPU pPGM);
    3359 
    3360 RT_C_DECLS_END
    336151
    336252/** @todo Split out all the inline stuff into a separate file.  Then we can
     
    49501640#endif
    49511641
    4952 
  • trunk/src/VBox/VMM/PGMInternal.h

    r26107 r26150  
    33603360RT_C_DECLS_END
    33613361
    3362 /** @todo Split out all the inline stuff into a separate file.  Then we can
    3363  *        include it later when VM and VMCPU are defined and so avoid all that
    3364  *        &pVM->pgm.s and &pVCpu->pgm.s stuff.  It also chops ~1600 lines off
    3365  *        this file and will make it somewhat easier to navigate... */
    3366 
    3367 /**
    3368  * Gets the PGMRAMRANGE structure for a guest page.
    3369  *
    3370  * @returns Pointer to the RAM range on success.
    3371  * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
    3372  *
    3373  * @param   pPGM        PGM handle.
    3374  * @param   GCPhys      The GC physical address.
    3375  */
    3376 DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
    3377 {
    3378     /*
    3379      * Optimize for the first range.
    3380      */
    3381     PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
    3382     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3383     if (RT_UNLIKELY(off >= pRam->cb))
    3384     {
    3385         do
    3386         {
    3387             pRam = pRam->CTX_SUFF(pNext);
    3388             if (RT_UNLIKELY(!pRam))
    3389                 break;
    3390             off = GCPhys - pRam->GCPhys;
    3391         } while (off >= pRam->cb);
    3392     }
    3393     return pRam;
    3394 }
    3395 
    3396 
    3397 /**
    3398  * Gets the PGMPAGE structure for a guest page.
    3399  *
    3400  * @returns Pointer to the page on success.
    3401  * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
    3402  *
    3403  * @param   pPGM        PGM handle.
    3404  * @param   GCPhys      The GC physical address.
    3405  */
    3406 DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
    3407 {
    3408     /*
    3409      * Optimize for the first range.
    3410      */
    3411     PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
    3412     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3413     if (RT_UNLIKELY(off >= pRam->cb))
    3414     {
    3415         do
    3416         {
    3417             pRam = pRam->CTX_SUFF(pNext);
    3418             if (RT_UNLIKELY(!pRam))
    3419                 return NULL;
    3420             off = GCPhys - pRam->GCPhys;
    3421         } while (off >= pRam->cb);
    3422     }
    3423     return &pRam->aPages[off >> PAGE_SHIFT];
    3424 }
    3425 
    3426 
    3427 /**
    3428  * Gets the PGMPAGE structure for a guest page.
    3429  *
    3430  * Old Phys code: Will make sure the page is present.
    3431  *
    3432  * @returns VBox status code.
    3433  * @retval  VINF_SUCCESS and a valid *ppPage on success.
    3434  * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
    3435  *
    3436  * @param   pPGM        PGM handle.
    3437  * @param   GCPhys      The GC physical address.
    3438  * @param   ppPage      Where to store the page pointer on success.
    3439  */
    3440 DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
    3441 {
    3442     /*
    3443      * Optimize for the first range.
    3444      */
    3445     PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
    3446     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3447     if (RT_UNLIKELY(off >= pRam->cb))
    3448     {
    3449         do
    3450         {
    3451             pRam = pRam->CTX_SUFF(pNext);
    3452             if (RT_UNLIKELY(!pRam))
    3453             {
    3454                 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
    3455                 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    3456             }
    3457             off = GCPhys - pRam->GCPhys;
    3458         } while (off >= pRam->cb);
    3459     }
    3460     *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3461     return VINF_SUCCESS;
    3462 }
    3463 
    3464 
    3465 
    3466 
    3467 /**
    3468  * Gets the PGMPAGE structure for a guest page.
    3469  *
    3470  * Old Phys code: Will make sure the page is present.
    3471  *
    3472  * @returns VBox status code.
    3473  * @retval  VINF_SUCCESS and a valid *ppPage on success.
    3474  * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
    3475  *
    3476  * @param   pPGM        PGM handle.
    3477  * @param   GCPhys      The GC physical address.
    3478  * @param   ppPage      Where to store the page pointer on success.
    3479  * @param   ppRamHint   Where to read and store the ram list hint.
    3480  *                      The caller initializes this to NULL before the call.
    3481  */
    3482 DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
    3483 {
    3484     RTGCPHYS off;
    3485     PPGMRAMRANGE pRam = *ppRamHint;
    3486     if (    !pRam
    3487         ||  RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
    3488     {
    3489         pRam = pPGM->CTX_SUFF(pRamRanges);
    3490         off = GCPhys - pRam->GCPhys;
    3491         if (RT_UNLIKELY(off >= pRam->cb))
    3492         {
    3493             do
    3494             {
    3495                 pRam = pRam->CTX_SUFF(pNext);
    3496                 if (RT_UNLIKELY(!pRam))
    3497                 {
    3498                     *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
    3499                     return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    3500                 }
    3501                 off = GCPhys - pRam->GCPhys;
    3502             } while (off >= pRam->cb);
    3503         }
    3504         *ppRamHint = pRam;
    3505     }
    3506     *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3507     return VINF_SUCCESS;
    3508 }
    3509 
    3510 
    3511 /**
    3512  * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
    3513  *
    3514  * @returns Pointer to the page on success.
    3515  * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
    3516  *
    3517  * @param   pPGM        PGM handle.
    3518  * @param   GCPhys      The GC physical address.
    3519  * @param   ppRam       Where to store the pointer to the PGMRAMRANGE.
    3520  */
    3521 DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
    3522 {
    3523     /*
    3524      * Optimize for the first range.
    3525      */
    3526     PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
    3527     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3528     if (RT_UNLIKELY(off >= pRam->cb))
    3529     {
    3530         do
    3531         {
    3532             pRam = pRam->CTX_SUFF(pNext);
    3533             if (RT_UNLIKELY(!pRam))
    3534                 return NULL;
    3535             off = GCPhys - pRam->GCPhys;
    3536         } while (off >= pRam->cb);
    3537     }
    3538     *ppRam = pRam;
    3539     return &pRam->aPages[off >> PAGE_SHIFT];
    3540 }
    3541 
    3542 
    3543 /**
    3544  * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
    3545  *
    3546  * @returns Pointer to the page on success.
    3547  * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
    3548  *
    3549  * @param   pPGM        PGM handle.
    3550  * @param   GCPhys      The GC physical address.
    3551  * @param   ppPage      Where to store the pointer to the PGMPAGE structure.
    3552  * @param   ppRam       Where to store the pointer to the PGMRAMRANGE structure.
    3553  */
    3554 DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
    3555 {
    3556     /*
    3557      * Optimize for the first range.
    3558      */
    3559     PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
    3560     RTGCPHYS off = GCPhys - pRam->GCPhys;
    3561     if (RT_UNLIKELY(off >= pRam->cb))
    3562     {
    3563         do
    3564         {
    3565             pRam = pRam->CTX_SUFF(pNext);
    3566             if (RT_UNLIKELY(!pRam))
    3567             {
    3568                 *ppRam = NULL;  /* Shut up silly GCC warnings. */
    3569                 *ppPage = NULL; /* ditto */
    3570                 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
    3571             }
    3572             off = GCPhys - pRam->GCPhys;
    3573         } while (off >= pRam->cb);
    3574     }
    3575     *ppRam = pRam;
    3576     *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
    3577     return VINF_SUCCESS;
    3578 }
    3579 
    3580 
    3581 /**
    3582  * Convert GC Phys to HC Phys.
    3583  *
    3584  * @returns VBox status.
    3585  * @param   pPGM        PGM handle.
    3586  * @param   GCPhys      The GC physical address.
    3587  * @param   pHCPhys     Where to store the corresponding HC physical address.
    3588  *
    3589  * @deprecated  Doesn't deal with zero, shared or write monitored pages.
    3590  *              Avoid when writing new code!
    3591  */
    3592 DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
    3593 {
    3594     PPGMPAGE pPage;
    3595     int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
    3596     if (RT_FAILURE(rc))
    3597         return rc;
    3598     *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
    3599     return VINF_SUCCESS;
    3600 }
    3601 
    3602 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3603 
    3604 /**
    3605  * Inlined version of the ring-0 version of PGMDynMapHCPage that
    3606  * optimizes access to pages already in the set.
    3607  *
    3608  * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
    3609  * @param   pPGM        Pointer to the PVM instance data.
    3610  * @param   HCPhys      The physical address of the page.
    3611  * @param   ppv         Where to store the mapping address.
    3612  */
    3613 DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
    3614 {
    3615     PVM         pVM     = PGM2VM(pPGM);
    3616     PPGMCPU     pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
    3617     PPGMMAPSET  pSet    = &pPGMCPU->AutoSet;
    3618 
    3619     STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a);
    3620     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    3621     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    3622 
    3623     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    3624     unsigned    iEntry  = pSet->aiHashTable[iHash];
    3625     if (    iEntry < pSet->cEntries
    3626         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    3627     {
    3628         *ppv = pSet->aEntries[iEntry].pvPage;
    3629         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlHits);
    3630     }
    3631     else
    3632     {
    3633         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlMisses);
    3634         pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
    3635     }
    3636 
    3637     STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapHCPageInl, a);
    3638     return VINF_SUCCESS;
    3639 }
    3640 
    3641 
    3642 /**
    3643  * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
    3644  * access to pages already in the set.
    3645  *
    3646  * @returns See PGMDynMapGCPage.
    3647  * @param   pPGM        Pointer to the PVM instance data.
    3648  * @param   GCPhys      The guest physical address of the page.
    3649  * @param   ppv         Where to store the mapping address.
    3650  */
    3651 DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
    3652 {
    3653     PVM     pVM     = PGM2VM(pPGM);
    3654     PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
    3655 
    3656     STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
    3657     AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
    3658 
    3659     /*
    3660      * Get the ram range.
    3661      */
    3662     PPGMRAMRANGE    pRam = pPGM->CTX_SUFF(pRamRanges);
    3663     RTGCPHYS        off = GCPhys - pRam->GCPhys;
    3664     if (RT_UNLIKELY(off >= pRam->cb
    3665         /** @todo   || page state stuff */))
    3666     {
    3667         /* This case is not counted into StatR0DynMapGCPageInl. */
    3668         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
    3669         return PGMDynMapGCPage(pVM, GCPhys, ppv);
    3670     }
    3671 
    3672     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    3673     STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
    3674 
    3675     /*
    3676      * pgmR0DynMapHCPageInlined with out stats.
    3677      */
    3678     PPGMMAPSET pSet = &pPGMCPU->AutoSet;
    3679     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    3680     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    3681 
    3682     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    3683     unsigned    iEntry  = pSet->aiHashTable[iHash];
    3684     if (    iEntry < pSet->cEntries
    3685         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    3686     {
    3687         *ppv = pSet->aEntries[iEntry].pvPage;
    3688         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
    3689     }
    3690     else
    3691     {
    3692         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
    3693         pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
    3694     }
    3695 
    3696     STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
    3697     return VINF_SUCCESS;
    3698 }
    3699 
    3700 
    3701 /**
    3702  * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
    3703  * access to pages already in the set.
    3704  *
    3705  * @returns See PGMDynMapGCPage.
    3706  * @param   pPGM        Pointer to the PVM instance data.
    3707  * @param   HCPhys      The physical address of the page.
    3708  * @param   ppv         Where to store the mapping address.
    3709  */
    3710 DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
    3711 {
    3712     PVM     pVM     = PGM2VM(pPGM);
    3713     PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
    3714 
    3715     STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
    3716 
    3717     /*
    3718      * Get the ram range.
    3719      */
    3720     PPGMRAMRANGE    pRam = pPGM->CTX_SUFF(pRamRanges);
    3721     RTGCPHYS        off = GCPhys - pRam->GCPhys;
    3722     if (RT_UNLIKELY(off >= pRam->cb
    3723         /** @todo   || page state stuff */))
    3724     {
    3725         /* This case is not counted into StatR0DynMapGCPageInl. */
    3726         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
    3727         return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
    3728     }
    3729 
    3730     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    3731     STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
    3732 
    3733     /*
    3734      * pgmR0DynMapHCPageInlined with out stats.
    3735      */
    3736     PPGMMAPSET pSet = &pPGMCPU->AutoSet;
    3737     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    3738     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    3739 
    3740     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    3741     unsigned    iEntry  = pSet->aiHashTable[iHash];
    3742     if (    iEntry < pSet->cEntries
    3743         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    3744     {
    3745         *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    3746         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
    3747     }
    3748     else
    3749     {
    3750         STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
    3751         pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
    3752         *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
    3753     }
    3754 
    3755     STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
    3756     return VINF_SUCCESS;
    3757 }
    3758 
    3759 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    3760 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    3761 
    3762 /**
    3763  * Maps the page into current context (RC and maybe R0).
    3764  *
    3765  * @returns pointer to the mapping.
    3766  * @param   pVM         Pointer to the PGM instance data.
    3767  * @param   pPage       The page.
    3768  */
    3769 DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
    3770 {
    3771     if (pPage->idx >= PGMPOOL_IDX_FIRST)
    3772     {
    3773         Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
    3774         void *pv;
    3775 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3776         pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
    3777 # else
    3778         PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
    3779 # endif
    3780         return pv;
    3781     }
    3782     AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
    3783 }
    3784 
    3785 /**
    3786  * Temporarily maps one host page specified by HC physical address, returning
    3787  * pointer within the page.
    3788  *
    3789  * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
    3790  * reused after 8 mappings (or perhaps a few more if you score with the cache).
    3791  *
    3792  * @returns The address corresponding to HCPhys.
    3793  * @param   pPGM        Pointer to the PVM instance data.
    3794  * @param   HCPhys      HC Physical address of the page.
    3795  */
    3796 DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
    3797 {
    3798     void *pv;
    3799 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3800     pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
    3801 # else
    3802     PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
    3803 # endif
    3804     pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
    3805     return pv;
    3806 }
    3807 
    3808 #endif /*  VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
    3809 #ifndef IN_RC
    3810 
    3811 /**
    3812  * Queries the Physical TLB entry for a physical guest page,
    3813  * attempting to load the TLB entry if necessary.
    3814  *
    3815  * @returns VBox status code.
    3816  * @retval  VINF_SUCCESS on success
    3817  * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
    3818  *
    3819  * @param   pPGM        The PGM instance handle.
    3820  * @param   GCPhys      The address of the guest page.
    3821  * @param   ppTlbe      Where to store the pointer to the TLB entry.
    3822  */
    3823 DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
    3824 {
    3825     int rc;
    3826     PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
    3827     if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
    3828     {
    3829         STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
    3830         rc = VINF_SUCCESS;
    3831     }
    3832     else
    3833         rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
    3834     *ppTlbe = pTlbe;
    3835     return rc;
    3836 }
    3837 
    3838 
    3839 /**
    3840  * Queries the Physical TLB entry for a physical guest page,
    3841  * attempting to load the TLB entry if necessary.
    3842  *
    3843  * @returns VBox status code.
    3844  * @retval  VINF_SUCCESS on success
    3845  * @retval  VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
    3846  *
    3847  * @param   pPGM        The PGM instance handle.
    3848  * @param   pPage       Pointer to the PGMPAGE structure corresponding to
    3849  *                      GCPhys.
    3850  * @param   GCPhys      The address of the guest page.
    3851  * @param   ppTlbe      Where to store the pointer to the TLB entry.
    3852  */
    3853 DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
    3854 {
    3855     int rc;
    3856     PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
    3857     if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
    3858     {
    3859         STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
    3860         rc = VINF_SUCCESS;
    3861     }
    3862     else
    3863         rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
    3864     *ppTlbe = pTlbe;
    3865     return rc;
    3866 }
    3867 
    3868 #endif /* !IN_RC */
    3869 
    3870 /**
    3871  * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
    3872  * Takes PSE-36 into account.
    3873  *
    3874  * @returns guest physical address
    3875  * @param   pPGM        Pointer to the PGM instance data.
    3876  * @param   Pde         Guest Pde
    3877  */
    3878 DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
    3879 {
    3880     RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
    3881     GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
    3882 
    3883     return GCPhys & pPGM->GCPhys4MBPSEMask;
    3884 }
    3885 
    3886 
    3887 /**
    3888  * Gets the page directory entry for the specified address (32-bit paging).
    3889  *
    3890  * @returns The page directory entry in question.
    3891  * @param   pPGM        Pointer to the PGM instance data.
    3892  * @param   GCPtr       The address.
    3893  */
    3894 DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
    3895 {
    3896 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3897     PCX86PD pGuestPD = NULL;
    3898     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
    3899     if (RT_FAILURE(rc))
    3900     {
    3901         X86PDE ZeroPde = {0};
    3902         AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
    3903     }
    3904 #else
    3905     PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3906 # ifdef IN_RING3
    3907     if (!pGuestPD)
    3908         pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3909 # endif
    3910 #endif
    3911     return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
    3912 }
    3913 
    3914 
    3915 /**
    3916  * Gets the address of a specific page directory entry (32-bit paging).
    3917  *
    3918  * @returns Pointer the page directory entry in question.
    3919  * @param   pPGM        Pointer to the PGM instance data.
    3920  * @param   GCPtr       The address.
    3921  */
    3922 DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    3923 {
    3924 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3925     PX86PD  pGuestPD = NULL;
    3926     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
    3927     AssertRCReturn(rc, NULL);
    3928 #else
    3929     PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3930 # ifdef IN_RING3
    3931     if (!pGuestPD)
    3932         pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3933 # endif
    3934 #endif
    3935     return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
    3936 }
    3937 
    3938 
    3939 /**
    3940  * Gets the address the guest page directory (32-bit paging).
    3941  *
    3942  * @returns Pointer the page directory entry in question.
    3943  * @param   pPGM        Pointer to the PGM instance data.
    3944  */
    3945 DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
    3946 {
    3947 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3948     PX86PD  pGuestPD = NULL;
    3949     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
    3950     AssertRCReturn(rc, NULL);
    3951 #else
    3952     PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
    3953 # ifdef IN_RING3
    3954     if (!pGuestPD)
    3955         pGuestPD = pgmGstLazyMap32BitPD(pPGM);
    3956 # endif
    3957 #endif
    3958     return pGuestPD;
    3959 }
    3960 
    3961 
    3962 /**
    3963  * Gets the guest page directory pointer table.
    3964  *
    3965  * @returns Pointer to the page directory in question.
    3966  * @returns NULL if the page directory is not present or on an invalid page.
    3967  * @param   pPGM        Pointer to the PGM instance data.
    3968  */
    3969 DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
    3970 {
    3971 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3972     PX86PDPT pGuestPDPT = NULL;
    3973     int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
    3974     AssertRCReturn(rc, NULL);
    3975 #else
    3976     PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
    3977 # ifdef IN_RING3
    3978     if (!pGuestPDPT)
    3979         pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
    3980 # endif
    3981 #endif
    3982     return pGuestPDPT;
    3983 }
    3984 
    3985 
    3986 /**
    3987  * Gets the guest page directory pointer table entry for the specified address.
    3988  *
    3989  * @returns Pointer to the page directory in question.
    3990  * @returns NULL if the page directory is not present or on an invalid page.
    3991  * @param   pPGM        Pointer to the PGM instance data.
    3992  * @param   GCPtr       The address.
    3993  */
    3994 DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    3995 {
    3996     AssertGCPtr32(GCPtr);
    3997 
    3998 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    3999     PX86PDPT pGuestPDPT = 0;
    4000     int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
    4001     AssertRCReturn(rc, 0);
    4002 #else
    4003     PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
    4004 # ifdef IN_RING3
    4005     if (!pGuestPDPT)
    4006         pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
    4007 # endif
    4008 #endif
    4009     return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
    4010 }
    4011 
    4012 
    4013 /**
    4014  * Gets the page directory for the specified address.
    4015  *
    4016  * @returns Pointer to the page directory in question.
    4017  * @returns NULL if the page directory is not present or on an invalid page.
    4018  * @param   pPGM        Pointer to the PGM instance data.
    4019  * @param   GCPtr       The address.
    4020  */
    4021 DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
    4022 {
    4023     AssertGCPtr32(GCPtr);
    4024 
    4025     PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
    4026     AssertReturn(pGuestPDPT, NULL);
    4027     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4028     if (pGuestPDPT->a[iPdpt].n.u1Present)
    4029     {
    4030 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4031         PX86PDPAE   pGuestPD = NULL;
    4032         int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
    4033         AssertRCReturn(rc, NULL);
    4034 #else
    4035         PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    4036         if (    !pGuestPD
    4037             ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
    4038             pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
    4039 #endif
    4040         return pGuestPD;
    4041         /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
    4042     }
    4043     return NULL;
    4044 }
    4045 
    4046 
    4047 /**
    4048  * Gets the page directory entry for the specified address.
    4049  *
    4050  * @returns Pointer to the page directory entry in question.
    4051  * @returns NULL if the page directory is not present or on an invalid page.
    4052  * @param   pPGM        Pointer to the PGM instance data.
    4053  * @param   GCPtr       The address.
    4054  */
    4055 DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    4056 {
    4057     AssertGCPtr32(GCPtr);
    4058 
    4059     PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
    4060     AssertReturn(pGuestPDPT, NULL);
    4061     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4062     if (pGuestPDPT->a[iPdpt].n.u1Present)
    4063     {
    4064         const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4065 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4066         PX86PDPAE   pGuestPD = NULL;
    4067         int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
    4068         AssertRCReturn(rc, NULL);
    4069 #else
    4070         PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    4071         if (    !pGuestPD
    4072             ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
    4073             pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
    4074 #endif
    4075         return &pGuestPD->a[iPD];
    4076         /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
    4077     }
    4078     return NULL;
    4079 }
    4080 
    4081 
    4082 /**
    4083  * Gets the page directory entry for the specified address.
    4084  *
    4085  * @returns The page directory entry in question.
    4086  * @returns A non-present entry if the page directory is not present or on an invalid page.
    4087  * @param   pPGM        Pointer to the PGM instance data.
    4088  * @param   GCPtr       The address.
    4089  */
    4090 DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
    4091 {
    4092     AssertGCPtr32(GCPtr);
    4093     X86PDEPAE   ZeroPde = {0};
    4094     PX86PDPT    pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
    4095     if (RT_LIKELY(pGuestPDPT))
    4096     {
    4097         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4098         if (pGuestPDPT->a[iPdpt].n.u1Present)
    4099         {
    4100             const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4101 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4102             PX86PDPAE   pGuestPD = NULL;
    4103             int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
    4104             AssertRCReturn(rc, ZeroPde);
    4105 #else
    4106             PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    4107             if (    !pGuestPD
    4108                 ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
    4109                 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
    4110 #endif
    4111             return pGuestPD->a[iPD];
    4112         }
    4113     }
    4114     return ZeroPde;
    4115 }
    4116 
    4117 
    4118 /**
    4119  * Gets the page directory pointer table entry for the specified address
    4120  * and returns the index into the page directory
    4121  *
    4122  * @returns Pointer to the page directory in question.
    4123  * @returns NULL if the page directory is not present or on an invalid page.
    4124  * @param   pPGM        Pointer to the PGM instance data.
    4125  * @param   GCPtr       The address.
    4126  * @param   piPD        Receives the index into the returned page directory
    4127  * @param   pPdpe       Receives the page directory pointer entry. Optional.
    4128  */
    4129 DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
    4130 {
    4131     AssertGCPtr32(GCPtr);
    4132 
    4133     PX86PDPT        pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
    4134     AssertReturn(pGuestPDPT, NULL);
    4135     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4136     if (pPdpe)
    4137         *pPdpe = pGuestPDPT->a[iPdpt];
    4138     if (pGuestPDPT->a[iPdpt].n.u1Present)
    4139     {
    4140         const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4141 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4142         PX86PDPAE   pGuestPD = NULL;
    4143         int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
    4144         AssertRCReturn(rc, NULL);
    4145 #else
    4146         PX86PDPAE   pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
    4147         if (    !pGuestPD
    4148             ||  (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
    4149             pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
    4150 #endif
    4151         *piPD = iPD;
    4152         return pGuestPD;
    4153         /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
    4154     }
    4155     return NULL;
    4156 }
    4157 
    4158 #ifndef IN_RC
    4159 
    4160 /**
    4161  * Gets the page map level-4 pointer for the guest.
    4162  *
    4163  * @returns Pointer to the PML4 page.
    4164  * @param   pPGM        Pointer to the PGM instance data.
    4165  */
    4166 DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
    4167 {
    4168 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4169     PX86PML4 pGuestPml4;
    4170     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
    4171     AssertRCReturn(rc, NULL);
    4172 #else
    4173     PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    4174 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    4175     if (!pGuestPml4)
    4176         pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    4177 # endif
    4178     Assert(pGuestPml4);
    4179 #endif
    4180     return pGuestPml4;
    4181 }
    4182 
    4183 
    4184 /**
    4185  * Gets the pointer to a page map level-4 entry.
    4186  *
    4187  * @returns Pointer to the PML4 entry.
    4188  * @param   pPGM        Pointer to the PGM instance data.
    4189  * @param   iPml4       The index.
    4190  */
    4191 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
    4192 {
    4193 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4194     PX86PML4 pGuestPml4;
    4195     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
    4196     AssertRCReturn(rc, NULL);
    4197 #else
    4198     PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    4199 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    4200     if (!pGuestPml4)
    4201         pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    4202 # endif
    4203     Assert(pGuestPml4);
    4204 #endif
    4205     return &pGuestPml4->a[iPml4];
    4206 }
    4207 
    4208 
    4209 /**
    4210  * Gets a page map level-4 entry.
    4211  *
    4212  * @returns The PML4 entry.
    4213  * @param   pPGM        Pointer to the PGM instance data.
    4214  * @param   iPml4       The index.
    4215  */
    4216 DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
    4217 {
    4218 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4219     PX86PML4 pGuestPml4;
    4220     int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
    4221     if (RT_FAILURE(rc))
    4222     {
    4223         X86PML4E ZeroPml4e = {0};
    4224         AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
    4225     }
    4226 #else
    4227     PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
    4228 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
    4229     if (!pGuestPml4)
    4230         pGuestPml4 = pgmGstLazyMapPml4(pPGM);
    4231 # endif
    4232     Assert(pGuestPml4);
    4233 #endif
    4234     return pGuestPml4->a[iPml4];
    4235 }
    4236 
    4237 
    4238 /**
    4239  * Gets the page directory pointer entry for the specified address.
    4240  *
    4241  * @returns Pointer to the page directory pointer entry in question.
    4242  * @returns NULL if the page directory is not present or on an invalid page.
    4243  * @param   pPGM        Pointer to the PGM instance data.
    4244  * @param   GCPtr       The address.
    4245  * @param   ppPml4e     Page Map Level-4 Entry (out)
    4246  */
    4247 DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
    4248 {
    4249     PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4250     const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4251     PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
    4252     if (pPml4e->n.u1Present)
    4253     {
    4254         PX86PDPT pPdpt;
    4255         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
    4256         AssertRCReturn(rc, NULL);
    4257 
    4258         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4259         return &pPdpt->a[iPdpt];
    4260     }
    4261     return NULL;
    4262 }
    4263 
    4264 
    4265 /**
    4266  * Gets the page directory entry for the specified address.
    4267  *
    4268  * @returns The page directory entry in question.
    4269  * @returns A non-present entry if the page directory is not present or on an invalid page.
    4270  * @param   pPGM        Pointer to the PGM instance data.
    4271  * @param   GCPtr       The address.
    4272  * @param   ppPml4e     Page Map Level-4 Entry (out)
    4273  * @param   pPdpe       Page directory pointer table entry (out)
    4274  */
    4275 DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
    4276 {
    4277     X86PDEPAE       ZeroPde = {0};
    4278     PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4279     const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4280     PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
    4281     if (pPml4e->n.u1Present)
    4282     {
    4283         PCX86PDPT   pPdptTemp;
    4284         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
    4285         AssertRCReturn(rc, ZeroPde);
    4286 
    4287         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4288         *pPdpe = pPdptTemp->a[iPdpt];
    4289         if (pPdptTemp->a[iPdpt].n.u1Present)
    4290         {
    4291             PCX86PDPAE pPD;
    4292             rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    4293             AssertRCReturn(rc, ZeroPde);
    4294 
    4295             const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4296             return pPD->a[iPD];
    4297         }
    4298     }
    4299 
    4300     return ZeroPde;
    4301 }
    4302 
    4303 
    4304 /**
    4305  * Gets the page directory entry for the specified address.
    4306  *
    4307  * @returns The page directory entry in question.
    4308  * @returns A non-present entry if the page directory is not present or on an invalid page.
    4309  * @param   pPGM        Pointer to the PGM instance data.
    4310  * @param   GCPtr       The address.
    4311  */
    4312 DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
    4313 {
    4314     X86PDEPAE       ZeroPde = {0};
    4315     PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4316     const unsigned  iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4317     if (pGuestPml4->a[iPml4].n.u1Present)
    4318     {
    4319         PCX86PDPT   pPdptTemp;
    4320         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
    4321         AssertRCReturn(rc, ZeroPde);
    4322 
    4323         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4324         if (pPdptTemp->a[iPdpt].n.u1Present)
    4325         {
    4326             PCX86PDPAE pPD;
    4327             rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    4328             AssertRCReturn(rc, ZeroPde);
    4329 
    4330             const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4331             return pPD->a[iPD];
    4332         }
    4333     }
    4334     return ZeroPde;
    4335 }
    4336 
    4337 
    4338 /**
    4339  * Gets the page directory entry for the specified address.
    4340  *
    4341  * @returns Pointer to the page directory entry in question.
    4342  * @returns NULL if the page directory is not present or on an invalid page.
    4343  * @param   pPGM        Pointer to the PGM instance data.
    4344  * @param   GCPtr       The address.
    4345  */
    4346 DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
    4347 {
    4348     PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4349     const unsigned  iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4350     if (pGuestPml4->a[iPml4].n.u1Present)
    4351     {
    4352         PCX86PDPT   pPdptTemp;
    4353         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
    4354         AssertRCReturn(rc, NULL);
    4355 
    4356         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4357         if (pPdptTemp->a[iPdpt].n.u1Present)
    4358         {
    4359             PX86PDPAE pPD;
    4360             rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    4361             AssertRCReturn(rc, NULL);
    4362 
    4363             const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4364             return &pPD->a[iPD];
    4365         }
    4366     }
    4367     return NULL;
    4368 }
    4369 
    4370 
    4371 /**
    4372  * Gets the GUEST page directory pointer for the specified address.
    4373  *
    4374  * @returns The page directory in question.
    4375  * @returns NULL if the page directory is not present or on an invalid page.
    4376  * @param   pPGM        Pointer to the PGM instance data.
    4377  * @param   GCPtr       The address.
    4378  * @param   ppPml4e     Page Map Level-4 Entry (out)
    4379  * @param   pPdpe       Page directory pointer table entry (out)
    4380  * @param   piPD        Receives the index into the returned page directory
    4381  */
    4382 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
    4383 {
    4384     PX86PML4        pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4385     const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4386     PCX86PML4E      pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
    4387     if (pPml4e->n.u1Present)
    4388     {
    4389         PCX86PDPT   pPdptTemp;
    4390         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
    4391         AssertRCReturn(rc, NULL);
    4392 
    4393         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4394         *pPdpe = pPdptTemp->a[iPdpt];
    4395         if (pPdptTemp->a[iPdpt].n.u1Present)
    4396         {
    4397             PX86PDPAE pPD;
    4398             rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    4399             AssertRCReturn(rc, NULL);
    4400 
    4401             *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4402             return pPD;
    4403         }
    4404     }
    4405     return 0;
    4406 }
    4407 
    4408 #endif /* !IN_RC */
    4409 
    4410 /**
    4411  * Gets the shadow page directory, 32-bit.
    4412  *
    4413  * @returns Pointer to the shadow 32-bit PD.
    4414  * @param   pPGM        Pointer to the PGM instance data.
    4415  */
    4416 DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGMCPU pPGM)
    4417 {
    4418     return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4419 }
    4420 
    4421 
    4422 /**
    4423  * Gets the shadow page directory entry for the specified address, 32-bit.
    4424  *
    4425  * @returns Shadow 32-bit PDE.
    4426  * @param   pPGM        Pointer to the PGM instance data.
    4427  * @param   GCPtr       The address.
    4428  */
    4429 DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
    4430 {
    4431     const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
    4432 
    4433     PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
    4434     if (!pShwPde)
    4435     {
    4436         X86PDE ZeroPde = {0};
    4437         return ZeroPde;
    4438     }
    4439     return pShwPde->a[iPd];
    4440 }
    4441 
    4442 
    4443 /**
    4444  * Gets the pointer to the shadow page directory entry for the specified
    4445  * address, 32-bit.
    4446  *
    4447  * @returns Pointer to the shadow 32-bit PDE.
    4448  * @param   pPGM        Pointer to the PGM instance data.
    4449  * @param   GCPtr       The address.
    4450  */
    4451 DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    4452 {
    4453     const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
    4454 
    4455     PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
    4456     AssertReturn(pPde, NULL);
    4457     return &pPde->a[iPd];
    4458 }
    4459 
    4460 
    4461 /**
    4462  * Gets the shadow page pointer table, PAE.
    4463  *
    4464  * @returns Pointer to the shadow PAE PDPT.
    4465  * @param   pPGM        Pointer to the PGM instance data.
    4466  */
    4467 DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGMCPU pPGM)
    4468 {
    4469     return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4470 }
    4471 
    4472 
    4473 /**
    4474  * Gets the shadow page directory for the specified address, PAE.
    4475  *
    4476  * @returns Pointer to the shadow PD.
    4477  * @param   pPGM        Pointer to the PGM instance data.
    4478  * @param   GCPtr       The address.
    4479  */
    4480 DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    4481 {
    4482     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4483     PX86PDPT        pPdpt = pgmShwGetPaePDPTPtr(pPGM);
    4484 
    4485     if (!pPdpt->a[iPdpt].n.u1Present)
    4486         return NULL;
    4487 
    4488     /* Fetch the pgm pool shadow descriptor. */
    4489     PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
    4490     AssertReturn(pShwPde, NULL);
    4491 
    4492     return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
    4493 }
    4494 
    4495 
    4496 /**
    4497  * Gets the shadow page directory for the specified address, PAE.
    4498  *
    4499  * @returns Pointer to the shadow PD.
    4500  * @param   pPGM        Pointer to the PGM instance data.
    4501  * @param   GCPtr       The address.
    4502  */
    4503 DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
    4504 {
    4505     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4506 
    4507     if (!pPdpt->a[iPdpt].n.u1Present)
    4508         return NULL;
    4509 
    4510     /* Fetch the pgm pool shadow descriptor. */
    4511     PPGMPOOLPAGE    pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
    4512     AssertReturn(pShwPde, NULL);
    4513 
    4514     return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
    4515 }
    4516 
    4517 
    4518 /**
    4519  * Gets the shadow page directory entry, PAE.
    4520  *
    4521  * @returns PDE.
    4522  * @param   pPGM        Pointer to the PGM instance data.
    4523  * @param   GCPtr       The address.
    4524  */
    4525 DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
    4526 {
    4527     const unsigned  iPd   = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4528 
    4529     PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
    4530     if (!pShwPde)
    4531     {
    4532         X86PDEPAE ZeroPde = {0};
    4533         return ZeroPde;
    4534     }
    4535     return pShwPde->a[iPd];
    4536 }
    4537 
    4538 
    4539 /**
    4540  * Gets the pointer to the shadow page directory entry for an address, PAE.
    4541  *
    4542  * @returns Pointer to the PDE.
    4543  * @param   pPGM        Pointer to the PGM instance data.
    4544  * @param   GCPtr       The address.
    4545  */
    4546 DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
    4547 {
    4548     const unsigned  iPd   = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4549 
    4550     PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
    4551     AssertReturn(pPde, NULL);
    4552     return &pPde->a[iPd];
    4553 }
    4554 
    4555 #ifndef IN_RC
    4556 
    4557 /**
    4558  * Gets the shadow page map level-4 pointer.
    4559  *
    4560  * @returns Pointer to the shadow PML4.
    4561  * @param   pPGM        Pointer to the PGM instance data.
    4562  */
    4563 DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGMCPU pPGM)
    4564 {
    4565     return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4566 }
    4567 
    4568 
    4569 /**
    4570  * Gets the shadow page map level-4 entry for the specified address.
    4571  *
    4572  * @returns The entry.
    4573  * @param   pPGM        Pointer to the PGM instance data.
    4574  * @param   GCPtr       The address.
    4575  */
    4576 DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGMCPU pPGM, RTGCPTR GCPtr)
    4577 {
    4578     const unsigned  iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4579     PX86PML4        pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
    4580 
    4581     if (!pShwPml4)
    4582     {
    4583         X86PML4E ZeroPml4e = {0};
    4584         return ZeroPml4e;
    4585     }
    4586     return pShwPml4->a[iPml4];
    4587 }
    4588 
    4589 
    4590 /**
    4591  * Gets the pointer to the specified shadow page map level-4 entry.
    4592  *
    4593  * @returns The entry.
    4594  * @param   pPGM        Pointer to the PGM instance data.
    4595  * @param   iPml4       The PML4 index.
    4596  */
    4597 DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
    4598 {
    4599     PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
    4600     if (!pShwPml4)
    4601         return NULL;
    4602     return &pShwPml4->a[iPml4];
    4603 }
    4604 
    4605 
    4606 /**
    4607  * Gets the GUEST page directory pointer for the specified address.
    4608  *
    4609  * @returns The page directory in question.
    4610  * @returns NULL if the page directory is not present or on an invalid page.
    4611  * @param   pPGM        Pointer to the PGM instance data.
    4612  * @param   GCPtr       The address.
    4613  * @param   piPD        Receives the index into the returned page directory
    4614  */
    4615 DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
    4616 {
    4617     PCX86PML4       pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
    4618     const unsigned  iPml4  = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    4619     if (pGuestPml4->a[iPml4].n.u1Present)
    4620     {
    4621         PCX86PDPT   pPdptTemp;
    4622         int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
    4623         AssertRCReturn(rc, NULL);
    4624 
    4625         const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    4626         if (pPdptTemp->a[iPdpt].n.u1Present)
    4627         {
    4628             PX86PDPAE pPD;
    4629             rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
    4630             AssertRCReturn(rc, NULL);
    4631 
    4632             *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
    4633             return pPD;
    4634         }
    4635     }
    4636     return NULL;
    4637 }
    4638 
    4639 #endif /* !IN_RC */
    4640 
    4641 /**
    4642  * Gets the page state for a physical handler.
    4643  *
    4644  * @returns The physical handler page state.
    4645  * @param   pCur    The physical handler in question.
    4646  */
    4647 DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
    4648 {
    4649     switch (pCur->enmType)
    4650     {
    4651         case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
    4652             return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
    4653 
    4654         case PGMPHYSHANDLERTYPE_MMIO:
    4655         case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
    4656             return PGM_PAGE_HNDL_PHYS_STATE_ALL;
    4657 
    4658         default:
    4659             AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
    4660     }
    4661 }
    4662 
    4663 
    4664 /**
    4665  * Gets the page state for a virtual handler.
    4666  *
    4667  * @returns The virtual handler page state.
    4668  * @param   pCur    The virtual handler in question.
    4669  * @remarks This should never be used on a hypervisor access handler.
    4670  */
    4671 DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
    4672 {
    4673     switch (pCur->enmType)
    4674     {
    4675         case PGMVIRTHANDLERTYPE_WRITE:
    4676             return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
    4677         case PGMVIRTHANDLERTYPE_ALL:
    4678             return PGM_PAGE_HNDL_VIRT_STATE_ALL;
    4679         default:
    4680             AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
    4681     }
    4682 }
    4683 
    4684 
    4685 /**
    4686  * Clears one physical page of a virtual handler
    4687  *
    4688  * @param   pPGM    Pointer to the PGM instance.
    4689  * @param   pCur    Virtual handler structure
    4690  * @param   iPage   Physical page index
    4691  *
    4692  * @remark  Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
    4693  *          need to care about other handlers in the same page.
    4694  */
    4695 DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
    4696 {
    4697     const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
    4698 
    4699     /*
    4700      * Remove the node from the tree (it's supposed to be in the tree if we get here!).
    4701      */
    4702 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    4703     AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
    4704                      ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    4705                       pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
    4706 #endif
    4707     if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
    4708     {
    4709         /* We're the head of the alias chain. */
    4710         PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
    4711 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    4712         AssertReleaseMsg(pRemove != NULL,
    4713                          ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    4714                           pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
    4715         AssertReleaseMsg(pRemove == pPhys2Virt,
    4716                          ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
    4717                           "   got:    pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    4718                           pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
    4719                           pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
    4720 #endif
    4721         if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
    4722         {
    4723             /* Insert the next list in the alias chain into the tree. */
    4724             PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
    4725 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    4726             AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
    4727                              ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
    4728                              pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
    4729 #endif
    4730             pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
    4731             bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
    4732             AssertRelease(fRc);
    4733         }
    4734     }
    4735     else
    4736     {
    4737         /* Locate the previous node in the alias chain. */
    4738         PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
    4739 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    4740         AssertReleaseMsg(pPrev != pPhys2Virt,
    4741                          ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
    4742                           pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
    4743 #endif
    4744         for (;;)
    4745         {
    4746             PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
    4747             if (pNext == pPhys2Virt)
    4748             {
    4749                 /* unlink. */
    4750                 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
    4751                          pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
    4752                 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
    4753                     pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
    4754                 else
    4755                 {
    4756                     PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
    4757                     pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
    4758                                         | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
    4759                 }
    4760                 break;
    4761             }
    4762 
    4763             /* next */
    4764             if (pNext == pPrev)
    4765             {
    4766 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
    4767                 AssertReleaseMsg(pNext != pPrev,
    4768                                  ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
    4769                                   pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
    4770 #endif
    4771                 break;
    4772             }
    4773             pPrev = pNext;
    4774         }
    4775     }
    4776     Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
    4777           pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
    4778     pPhys2Virt->offNextAlias = 0;
    4779     pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
    4780 
    4781     /*
    4782      * Clear the ram flags for this page.
    4783      */
    4784     PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
    4785     AssertReturnVoid(pPage);
    4786     PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
    4787 }
    4788 
    4789 
    4790 /**
    4791  * Internal worker for finding a 'in-use' shadow page give by it's physical address.
    4792  *
    4793  * @returns Pointer to the shadow page structure.
    4794  * @param   pPool       The pool.
    4795  * @param   idx         The pool page index.
    4796  */
    4797 DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
    4798 {
    4799     AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
    4800     return &pPool->aPages[idx];
    4801 }
    4802 
    4803 
    4804 /**
    4805  * Clear references to guest physical memory.
    4806  *
    4807  * @param   pPool       The pool.
    4808  * @param   pPoolPage   The pool page.
    4809  * @param   pPhysPage   The physical guest page tracking structure.
    4810  */
    4811 DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
    4812 {
    4813     /*
    4814      * Just deal with the simple case here.
    4815      */
    4816 # ifdef LOG_ENABLED
    4817     const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
    4818 # endif
    4819     const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
    4820     if (cRefs == 1)
    4821     {
    4822         Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
    4823         PGM_PAGE_SET_TRACKING(pPhysPage, 0);
    4824     }
    4825     else
    4826         pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
    4827     Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
    4828 }
    4829 
    4830 
    4831 /**
    4832  * Moves the page to the head of the age list.
    4833  *
    4834  * This is done when the cached page is used in one way or another.
    4835  *
    4836  * @param   pPool       The pool.
    4837  * @param   pPage       The cached page.
    4838  */
    4839 DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
    4840 {
    4841     PVM pVM = pPool->CTX_SUFF(pVM);
    4842     pgmLock(pVM);
    4843 
    4844     /*
    4845      * Move to the head of the age list.
    4846      */
    4847     if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
    4848     {
    4849         /* unlink */
    4850         pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
    4851         if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
    4852             pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
    4853         else
    4854             pPool->iAgeTail = pPage->iAgePrev;
    4855 
    4856         /* insert at head */
    4857         pPage->iAgePrev = NIL_PGMPOOL_IDX;
    4858         pPage->iAgeNext = pPool->iAgeHead;
    4859         Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
    4860         pPool->iAgeHead = pPage->idx;
    4861         pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
    4862     }
    4863     pgmUnlock(pVM);
    4864 }
    4865 
    4866 /**
    4867  * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
    4868  *
    4869  * @param   pVM         VM Handle.
    4870  * @param   pPage       PGM pool page
    4871  */
    4872 DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
    4873 {
    4874     Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
    4875     ASMAtomicIncU32(&pPage->cLocked);
    4876 }
    4877 
    4878 
    4879 /**
    4880  * Unlocks a page to allow flushing again
    4881  *
    4882  * @param   pVM         VM Handle.
    4883  * @param   pPage       PGM pool page
    4884  */
    4885 DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
    4886 {
    4887     Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
    4888     Assert(pPage->cLocked);
    4889     ASMAtomicDecU32(&pPage->cLocked);
    4890 }
    4891 
    4892 
    4893 /**
    4894  * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
    4895  *
    4896  * @returns VBox status code.
    4897  * @param   pPage       PGM pool page
    4898  */
    4899 DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
    4900 {
    4901     if (pPage->cLocked)
    4902     {
    4903         LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
    4904         if (pPage->cModifications)
    4905             pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
    4906         return true;
    4907     }
    4908     return false;
    4909 }
    4910 
    4911 
    4912 /**
    4913  * Tells if mappings are to be put into the shadow page table or not.
    4914  *
    4915  * @returns boolean result
    4916  * @param   pVM         VM handle.
    4917  */
    4918 DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
    4919 {
    4920 #ifdef PGM_WITHOUT_MAPPINGS
    4921     /* There are no mappings in VT-x and AMD-V mode. */
    4922     Assert(pPGM->fMappingsDisabled);
    4923     return false;
    4924 #else
    4925     return !pPGM->fMappingsDisabled;
    4926 #endif
    4927 }
    4928 
    4929 
    4930 /**
    4931  * Checks if the mappings are floating and enabled.
    4932  *
    4933  * @returns true / false.
    4934  * @param   pVM         The VM handle.
    4935  */
    4936 DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
    4937 {
    4938 #ifdef PGM_WITHOUT_MAPPINGS
    4939     /* There are no mappings in VT-x and AMD-V mode. */
    4940     Assert(pPGM->fMappingsDisabled);
    4941     return false;
    4942 #else
    4943     return !pPGM->fMappingsDisabled
    4944         && !pPGM->fMappingsFixed;
    4945 #endif
    4946 }
    4947 
    49483362/** @} */
    49493363
  • trunk/src/VBox/VMM/PGMMap.cpp

    r25935 r26150  
    2929#include "PGMInternal.h"
    3030#include <VBox/vm.h>
     31#include "PGMInline.h"
    3132
    3233#include <VBox/log.h>
  • trunk/src/VBox/VMM/PGMPhys.cpp

    r26107 r26150  
    3333#include "PGMInternal.h"
    3434#include <VBox/vm.h>
     35#include "PGMInline.h"
    3536#include <VBox/sup.h>
    3637#include <VBox/param.h>
  • trunk/src/VBox/VMM/PGMPool.cpp

    r26066 r26150  
    105105#include "PGMInternal.h"
    106106#include <VBox/vm.h>
     107#include "PGMInline.h"
    107108
    108109#include <VBox/log.h>
  • trunk/src/VBox/VMM/PGMSavedState.cpp

    r25935 r26150  
    2828#include <VBox/stam.h>
    2929#include <VBox/ssm.h>
    30 #include <VBox/pdm.h>
     30#include <VBox/pdmdrv.h>
     31#include <VBox/pdmdev.h>
    3132#include "PGMInternal.h"
    3233#include <VBox/vm.h>
     34#include "PGMInline.h"
    3335
    3436#include <VBox/param.h>
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r25958 r26150  
    3838#include <VBox/hwaccm.h>
    3939#include <VBox/hwacc_vmx.h>
    40 #include "PGMInternal.h"
     40#include "../PGMInternal.h"
    4141#include <VBox/vm.h>
     42#include "../PGMInline.h"
    4243#include <iprt/assert.h>
    4344#include <iprt/asm.h>
     
    773774
    774775    /* Ignore all irrelevant error codes. */
    775     if (    rc == VERR_PAGE_NOT_PRESENT                 
    776         ||  rc == VERR_PAGE_TABLE_NOT_PRESENT           
    777         ||  rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT   
    778         ||  rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)     
     776    if (    rc == VERR_PAGE_NOT_PRESENT
     777        ||  rc == VERR_PAGE_TABLE_NOT_PRESENT
     778        ||  rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
     779        ||  rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
    779780        rc = VINF_SUCCESS;
    780781
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r25647 r26150  
    3434#include <VBox/dbgf.h>
    3535#include <VBox/rem.h>
    36 #include "PGMInternal.h"
     36#include "../PGMInternal.h"
    3737#include <VBox/vm.h>
     38#include "../PGMInline.h"
    3839
    3940#include <VBox/log.h>
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r25935 r26150  
    2525#define LOG_GROUP LOG_GROUP_PGM
    2626#include <VBox/pgm.h>
    27 #include "PGMInternal.h"
     27#include "../PGMInternal.h"
    2828#include <VBox/vm.h>
     29#include "../PGMInline.h"
     30#include <VBox/err.h>
     31#include <iprt/asm.h>
    2932#include <iprt/assert.h>
    30 #include <iprt/asm.h>
    31 #include <VBox/err.h>
    3233
    3334
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r25586 r26150  
    3030#include <VBox/em.h>
    3131#include <VBox/rem.h>
    32 #include "PGMInternal.h"
     32#include "../PGMInternal.h"
    3333#include <VBox/vm.h>
     34#include "../PGMInline.h"
    3435#include <VBox/param.h>
    3536#include <VBox/err.h>
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r26066 r26150  
    3232# include <VBox/patm.h>
    3333#endif
    34 #include "PGMInternal.h"
     34#include "../PGMInternal.h"
    3535#include <VBox/vm.h>
     36#include "../PGMInline.h"
    3637#include <VBox/disopcode.h>
    3738#include <VBox/hwacc_vmx.h>
  • trunk/src/VBox/VMM/VMMGC/PGMGC.cpp

    r13235 r26150  
    3333#include <VBox/trpm.h>
    3434#include <VBox/rem.h>
    35 #include "PGMInternal.h"
     35#include "../PGMInternal.h"
    3636#include <VBox/vm.h>
     37#include "../PGMInline.h"
    3738
    3839#include <iprt/asm.h>
  • trunk/src/VBox/VMM/VMMR0/PGMR0.cpp

    r24763 r26150  
    2525#define LOG_GROUP LOG_GROUP_PGM
    2626#include <VBox/pgm.h>
    27 #include "PGMInternal.h"
     27#include "../PGMInternal.h"
    2828#include <VBox/vm.h>
     29#include "../PGMInline.h"
    2930#include <VBox/log.h>
    3031#include <VBox/err.h>
  • trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp

    r25528 r26150  
    2727#include "../PGMInternal.h"
    2828#include <VBox/vm.h>
     29#include "../PGMInline.h"
    2930#include <VBox/sup.h>
    3031#include <VBox/err.h>
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette