VirtualBox

Changeset 31402 in vbox for trunk/src/VBox/VMM/VMMRZ


Ignore:
Timestamp:
Aug 5, 2010 12:28:18 PM (14 years ago)
Author:
vboxsync
Message:

PGM: Replaced the hazzardous raw-mode context dynamic mapping code with the PGMR0DynMap code used by darwin/x86. This is a risky change but it should pay off once stable by providing 100% certainty that dynamically mapped pages aren't resued behind our back (this has been observed in seemingly benign code paths recently).

File:
1 moved

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp

    r31270 r31402  
    11/* $Id$ */
    22/** @file
    3  * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
     3 * PGM - Page Manager and Monitor, dynamic mapping cache.
    44 */
    55
    66/*
    7  * Copyright (C) 2008 Oracle Corporation
     7 * Copyright (C) 2008-2010 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    1616 */
    1717
     18
    1819/*******************************************************************************
    1920*   Internal Functions                                                         *
    2021*******************************************************************************/
    21 #define LOG_GROUP LOG_GROUP_PGM
     22#define LOG_GROUP LOG_GROUP_PGM_DYNMAP
    2223#include <VBox/pgm.h>
    2324#include "../PGMInternal.h"
    2425#include <VBox/vm.h>
    2526#include "../PGMInline.h"
     27#include <VBox/err.h>
     28#include <VBox/param.h>
    2629#include <VBox/sup.h>
    27 #include <VBox/err.h>
    2830#include <iprt/asm.h>
    2931#include <iprt/asm-amd64-x86.h>
    30 #include <iprt/alloc.h>
    3132#include <iprt/assert.h>
    32 #include <iprt/cpuset.h>
    33 #include <iprt/memobj.h>
    34 #include <iprt/mp.h>
    35 #include <iprt/semaphore.h>
    36 #include <iprt/spinlock.h>
     33#ifndef IN_RC
     34# include <iprt/cpuset.h>
     35# include <iprt/mem.h>
     36# include <iprt/memobj.h>
     37# include <iprt/mp.h>
     38# include <iprt/semaphore.h>
     39# include <iprt/spinlock.h>
     40#endif
    3741#include <iprt/string.h>
    3842
     
    4145*   Defined Constants And Macros                                               *
    4246*******************************************************************************/
     47#ifdef IN_RING0
    4348/** The max size of the mapping cache (in pages). */
    44 #define PGMR0DYNMAP_MAX_PAGES               ((16*_1M) >> PAGE_SHIFT)
     49# define PGMR0DYNMAP_MAX_PAGES              ((16*_1M) >> PAGE_SHIFT)
    4550/** The small segment size that is adopted on out-of-memory conditions with a
    4651 * single big segment. */
    47 #define PGMR0DYNMAP_SMALL_SEG_PAGES         128
     52# define PGMR0DYNMAP_SMALL_SEG_PAGES        128
    4853/** The number of pages we reserve per CPU. */
    49 #define PGMR0DYNMAP_PAGES_PER_CPU           256
     54# define PGMR0DYNMAP_PAGES_PER_CPU          256
    5055/** The minimum number of pages we reserve per CPU.
    5156 * This must be equal or larger than the autoset size.  */
    52 #define PGMR0DYNMAP_PAGES_PER_CPU_MIN       64
     57# define PGMR0DYNMAP_PAGES_PER_CPU_MIN      64
     58/** Calcs the overload threshold (safety margin).  Current set at 50%. */
     59# define PGMR0DYNMAP_CALC_OVERLOAD(cPages)  ((cPages) / 2)
    5360/** The number of guard pages.
    5461 * @remarks Never do tuning of the hashing or whatnot with a strict build!  */
    55 #if defined(VBOX_STRICT)
    56 # define PGMR0DYNMAP_GUARD_PAGES            1
    57 #else
    58 # define PGMR0DYNMAP_GUARD_PAGES            0
    59 #endif
     62# if defined(VBOX_STRICT)
     63#  define PGMR0DYNMAP_GUARD_PAGES           1
     64# else
     65#  define PGMR0DYNMAP_GUARD_PAGES           0
     66# endif
     67#endif /* IN_RING0 */
    6068/** The dummy physical address of guard pages. */
    6169#define PGMR0DYNMAP_GUARD_PAGE_HCPHYS       UINT32_C(0x7777feed)
     
    6674 * The alternative is to replace the entire PTE with an bad not-present
    6775 * PTE. Either way, XNU will screw us. :-/   */
    68 #define PGMR0DYNMAP_GUARD_NP
     76# define PGMR0DYNMAP_GUARD_NP
    6977#endif
    7078/** The dummy PTE value for a page. */
     
    7280/** The dummy PTE value for a page. */
    7381#define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE      UINT64_MAX /*X86_PTE_PAE_PG_MASK*/
    74 /** Calcs the overload threshold. Current set at 50%. */
    75 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages)   ((cPages) / 2)
    76 
    77 #if 0
    78 /* Assertions causes panics if preemption is disabled, this can be used to work around that. */
    79 //#define RTSpinlockAcquire(a,b) do {} while (0)
    80 //#define RTSpinlockRelease(a,b) do {} while (0)
    81 #endif
     82
     83#ifdef IN_RING0 /* Note! Assertions causes panics if preemption is disabled,
     84                 *       disable this to work around that. */
     85/**
     86 * Acquire the spinlock.
     87 * This will declare a temporary variable and expands to two statements!
     88 */
     89# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) \
     90    RTSPINLOCKTMP   MySpinlockTmp = RTSPINLOCKTMP_INITIALIZER; \
     91    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
     92/**
     93 * Releases the spinlock.
     94 */
     95# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) \
     96    RTSpinlockRelease((pThis)->hSpinlock, &MySpinlockTmp)
     97
     98/**
     99 * Re-acquires the spinlock.
     100 */
     101# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) \
     102    RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp)
     103#else
     104# define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis)   do { } while (0)
     105# define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis)   do { } while (0)
     106# define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) do { } while (0)
     107#endif
     108
    82109
    83110/** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */
    84 #define PGMR0DYNMAP_2_VMCPU(pSet)           (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
     111#define PGMRZDYNMAP_SET_2_VMCPU(pSet)       (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))
    85112
    86113/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
    87 #define PGMR0DYNMAP_2_VM(pSet)              (PGMR0DYNMAP_2_VMCPU(pSet)->CTX_SUFF(pVM))
     114#define PGMRZDYNMAP_SET_2_VM(pSet)          (PGMRZDYNMAP_SET_2_VMCPU(pSet)->CTX_SUFF(pVM))
     115
     116/** Converts a PGMCPUM::AutoSet pointer into a PVM. */
     117#ifdef IN_RC
     118# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (PGMRZDYNMAP_SET_2_VM(pSet)->pgm.s.pRCDynMap)
     119#else
     120# define PGMRZDYNMAP_SET_2_DYNMAP(pSet)     (g_pPGMR0DynMap)
     121#endif
     122
     123/**
     124 * Gets the set index of the current CPU.
     125 *
     126 * This always returns 0 when in raw-mode context because there is only ever
     127 * one EMT in that context (at least presently).
     128 */
     129#ifdef IN_RC
     130# define PGMRZDYNMAP_CUR_CPU()              (0)
     131#else
     132# define PGMRZDYNMAP_CUR_CPU()              RTMpCpuIdToSetIndex(RTMpCpuId())
     133#endif
     134
     135/** PGMRZDYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
     136#define PGMRZDYNMAP_MAGIC                   UINT32_C(0x19640201)
     137
     138
     139/** Zaps an set entry. */
     140#define PGMRZDYNMAP_ZAP_ENTRY(pEntry) \
     141    do \
     142    { \
     143        (pEntry)->iPage        = UINT16_MAX; \
     144        (pEntry)->cRefs        = 0; \
     145        (pEntry)->cInlinedRefs = 0; \
     146        (pEntry)->cUnrefs      = 0; \
     147    } while (0)
    88148
    89149
     
    91151*   Structures and Typedefs                                                    *
    92152*******************************************************************************/
     153#ifdef IN_RING0
    93154/**
    94155 * Ring-0 dynamic mapping cache segment.
     
    125186 * Ring-0 dynamic mapping cache entry.
    126187 *
    127  * This structure tracks
     188 * @sa PGMRZDYNMAPENTRY, PGMRCDYNMAPENTRY.
    128189 */
    129190typedef struct PGMR0DYNMAPENTRY
     
    147208        void                   *pv;
    148209    } uPte;
     210# ifndef IN_RC
    149211    /** CPUs that haven't invalidated this entry after it's last update. */
    150212    RTCPUSET                    PendingSet;
     213# endif
    151214} PGMR0DYNMAPENTRY;
    152 /** Pointer to a ring-0 dynamic mapping cache entry. */
     215/** Pointer a mapping cache entry for the ring-0.
     216 * @sa PPGMRZDYNMAPENTRY, PPGMRCDYNMAPENTRY,  */
    153217typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
    154218
    155219
    156220/**
    157  * Ring-0 dynamic mapping cache.
    158  *
    159  * This is initialized during VMMR0 module init but no segments are allocated at
    160  * that time.  Segments will be added when the first VM is started and removed
    161  * again when the last VM shuts down, thus avoid consuming memory while dormant.
    162  * At module termination, the remaining bits will be freed up.
     221 * Dynamic mapping cache for ring-0.
     222 *
     223 * This is initialized during VMMR0 module init but no segments are allocated
     224 * at that time.  Segments will be added when the first VM is started and
     225 * removed again when the last VM shuts down, thus avoid consuming memory while
     226 * dormant. At module termination, the remaining bits will be freed up.
     227 *
     228 * @sa PPGMRZDYNMAP, PGMRCDYNMAP.
    163229 */
    164230typedef struct PGMR0DYNMAP
    165231{
    166     /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
     232    /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */
    167233    uint32_t                    u32Magic;
     234# ifndef IN_RC
    168235    /** Spinlock serializing the normal operation of the cache. */
    169236    RTSPINLOCK                  hSpinlock;
     237# endif
    170238    /** Array for tracking and managing the pages.  */
    171239    PPGMR0DYNMAPENTRY           paPages;
     
    180248     * This is maintained to get trigger adding of more mapping space. */
    181249    uint32_t                    cMaxLoad;
     250# ifndef IN_RC
    182251    /** Initialization / termination lock. */
    183252    RTSEMFASTMUTEX              hInitLock;
     253# endif
    184254    /** The number of guard pages. */
    185255    uint32_t                    cGuardPages;
    186256    /** The number of users (protected by hInitLock). */
    187257    uint32_t                    cUsers;
     258# ifndef IN_RC
    188259    /** Array containing a copy of the original page tables.
    189260     * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
     
    193264    /** The paging mode. */
    194265    SUPPAGINGMODE               enmPgMode;
     266# endif
    195267} PGMR0DYNMAP;
    196 /** Pointer to the ring-0 dynamic mapping cache */
    197 typedef PGMR0DYNMAP *PPGMR0DYNMAP;
    198 
    199 /** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
    200 #define PGMR0DYNMAP_MAGIC       0x19640201
    201268
    202269
     
    228295/** Pointer to paging level data. */
    229296typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
     297#endif
     298
     299/** Mapping cache entry for the current context.
     300 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
     301typedef CTX_MID(PGM,DYNMAPENTRY) PGMRZDYNMAPENTRY;
     302/** Pointer a mapping cache entry for the current context.
     303 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY  */
     304typedef PGMRZDYNMAPENTRY *PPGMRZDYNMAPENTRY;
     305
     306/** Pointer the mapping cache instance for the current context.
     307 * @sa PGMR0DYNMAP, PGMRCDYNMAP  */
     308typedef CTX_MID(PGM,DYNMAP) *PPGMRZDYNMAP;
     309
    230310
    231311
     
    233313*   Global Variables                                                           *
    234314*******************************************************************************/
     315#ifdef IN_RING0
    235316/** Pointer to the ring-0 dynamic mapping cache. */
    236 static PPGMR0DYNMAP g_pPGMR0DynMap;
     317static PGMR0DYNMAP *g_pPGMR0DynMap;
     318#endif
    237319/** For overflow testing. */
    238320static bool         g_fPGMR0DynMapTestRunning = false;
     
    242324*   Internal Functions                                                         *
    243325*******************************************************************************/
    244 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
    245 static int  pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
    246 static int  pgmR0DynMapExpand(PPGMR0DYNMAP pThis);
    247 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
     326static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs);
     327#ifdef IN_RING0
     328static int  pgmR0DynMapSetup(PPGMRZDYNMAP pThis);
     329static int  pgmR0DynMapExpand(PPGMRZDYNMAP pThis);
     330static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis);
     331#endif
    248332#if 0 /*def DEBUG*/
    249333static int  pgmR0DynMapTest(PVM pVM);
     
    252336
    253337/**
     338 * Initializes the auto mapping sets for a VM.
     339 *
     340 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure.
     341 * @param   pVM         The VM in question.
     342 */
     343static int pgmRZDynMapInitAutoSetsForVM(PVM pVM)
     344{
     345    VMCPUID idCpu = pVM->cCpus;
     346    AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
     347    while (idCpu-- > 0)
     348    {
     349        PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
     350        uint32_t j = RT_ELEMENTS(pSet->aEntries);
     351        while (j-- > 0)
     352        {
     353            pSet->aEntries[j].pvPage        = NULL;
     354            pSet->aEntries[j].HCPhys        = NIL_RTHCPHYS;
     355            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
     356        }
     357        pSet->cEntries = PGMMAPSET_CLOSED;
     358        pSet->iSubset = UINT32_MAX;
     359        pSet->iCpu = -1;
     360        memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
     361    }
     362
     363    return VINF_SUCCESS;
     364}
     365
     366
     367#ifdef IN_RING0
     368
     369/**
    254370 * Initializes the ring-0 dynamic mapping cache.
    255371 *
     
    263379     * Create and initialize the cache instance.
    264380     */
    265     PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
     381    PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)RTMemAllocZ(sizeof(*pThis));
    266382    AssertLogRelReturn(pThis, VERR_NO_MEMORY);
    267383    int             rc = VINF_SUCCESS;
     
    295411            if (RT_SUCCESS(rc))
    296412            {
    297                 pThis->u32Magic = PGMR0DYNMAP_MAGIC;
     413                pThis->u32Magic = PGMRZDYNMAP_MAGIC;
    298414                g_pPGMR0DynMap = pThis;
    299415                return VINF_SUCCESS;
     
    322438     * is just a mirror image of PGMR0DynMapInit.
    323439     */
    324     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     440    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    325441    if (pThis)
    326442    {
     
    359475     * Initialize the auto sets.
    360476     */
    361     VMCPUID idCpu = pVM->cCpus;
    362     AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
    363     while (idCpu-- > 0)
    364     {
    365         PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
    366         uint32_t j = RT_ELEMENTS(pSet->aEntries);
    367         while (j-- > 0)
    368         {
    369             pSet->aEntries[j].iPage  = UINT16_MAX;
    370             pSet->aEntries[j].cRefs  = 0;
    371             pSet->aEntries[j].pvPage = NULL;
    372             pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
    373         }
    374         pSet->cEntries = PGMMAPSET_CLOSED;
    375         pSet->iSubset = UINT32_MAX;
    376         pSet->iCpu = -1;
    377         memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
    378     }
     477    int rc = pgmRZDynMapInitAutoSetsForVM(pVM);
     478    if (RT_FAILURE(rc))
     479        return rc;
    379480
    380481    /*
     
    387488     * Reference and if necessary setup or expand the cache.
    388489     */
    389     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     490    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    390491    AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
    391     int rc = RTSemFastMutexRequest(pThis->hInitLock);
     492    rc = RTSemFastMutexRequest(pThis->hInitLock);
    392493    AssertLogRelRCReturn(rc, rc);
    393494
     
    430531        return;
    431532
    432     PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
     533    PPGMRZDYNMAP pThis = g_pPGMR0DynMap;
    433534    AssertPtrReturnVoid(pThis);
    434535
     
    463564                    LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
    464565                    if (iPage < pThis->cPages && cRefs > 0)
    465                         pgmR0DynMapReleasePage(pThis, iPage, cRefs);
     566                        pgmRZDynMapReleasePage(pThis, iPage, cRefs);
    466567                    else
    467568                        AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
    468569
    469                     pSet->aEntries[j].iPage  = UINT16_MAX;
    470                     pSet->aEntries[j].cRefs  = 0;
    471                     pSet->aEntries[j].pvPage = NULL;
    472                     pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
     570                    PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
    473571                }
    474572                pSet->cEntries = PGMMAPSET_CLOSED;
     
    512610{
    513611    Assert(!pvUser2);
    514     PPGMR0DYNMAP        pThis   = (PPGMR0DYNMAP)pvUser1;
     612    PPGMRZDYNMAP        pThis   = (PPGMRZDYNMAP)pvUser1;
    515613    Assert(pThis == g_pPGMR0DynMap);
    516     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     614    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    517615    uint32_t            iPage   = pThis->cPages;
    518616    while (iPage-- > 0)
     
    527625 * @param   pThis       The dynamic mapping cache instance.
    528626 */
    529 static int pgmR0DynMapTlbShootDown(PPGMR0DYNMAP pThis)
     627static int pgmR0DynMapTlbShootDown(PPGMRZDYNMAP pThis)
    530628{
    531629    int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
     
    548646 * @param   pcMinPages  The minimal size in pages.
    549647 */
    550 static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis, uint32_t *pcMinPages)
     648static uint32_t pgmR0DynMapCalcNewSize(PPGMRZDYNMAP pThis, uint32_t *pcMinPages)
    551649{
    552650    Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES);
     
    594692 * @param   pPgLvl      The paging level data.
    595693 */
    596 void pgmR0DynMapPagingArrayInit(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
     694void pgmR0DynMapPagingArrayInit(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
    597695{
    598696    RTCCUINTREG     cr4 = ASMGetCR4();
     
    704802 * @param   ppvPTE      Where to store the PTE address.
    705803 */
    706 static int pgmR0DynMapPagingArrayMapPte(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
     804static int pgmR0DynMapPagingArrayMapPte(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
    707805                                        PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE)
    708806{
     
    791889 * @param   pPage       The page.
    792890 */
    793 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)
     891DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMRZDYNMAP pThis, PPGMRZDYNMAPENTRY pPage)
    794892{
    795893    memset(pPage->pvPage, 0xfd, PAGE_SIZE);
     
    815913 * @param   cPages      The size of the new segment, give as a page count.
    816914 */
    817 static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages)
     915static int pgmR0DynMapAddSeg(PPGMRZDYNMAP pThis, uint32_t cPages)
    818916{
    819917    int rc2;
     
    838936    }
    839937
    840     RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    841     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     938    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    842939
    843940    memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages);
    844941    void *pvToFree = pThis->paPages;
    845     pThis->paPages = (PPGMR0DYNMAPENTRY)pvPages;
    846 
    847     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     942    pThis->paPages = (PPGMRZDYNMAPENTRY)pvPages;
     943
     944    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    848945    RTMemFree(pvToFree);
    849946
     
    882979            pThis->paPages[iPage].cRefs  = 0;
    883980            pThis->paPages[iPage].uPte.pPae = 0;
     981#ifndef IN_RC
    884982            RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
     983#endif
    885984
    886985            /* Map its page table, retry until we've got a clean run (paranoia). */
     
    9831082 * @param   pThis       The dynamic mapping cache instance.
    9841083 */
    985 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
     1084static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis)
    9861085{
    9871086    /*
     
    10261125 * @param   pThis       The dynamic mapping cache instance.
    10271126 */
    1028 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis)
     1127static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis)
    10291128{
    10301129    /*
     
    10691168 * @param   pThis       The dynamic mapping cache instance.
    10701169 */
    1071 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
     1170static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis)
    10721171{
    10731172    /*
    10741173     * Restore the original page table entries
    10751174     */
    1076     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1175    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    10771176    uint32_t            iPage   = pThis->cPages;
    10781177    if (pThis->fLegacyMode)
     
    11451244}
    11461245
     1246#endif /* IN_RING0 */
     1247#ifdef IN_RC
     1248
     1249/**
     1250 * Initializes the dynamic mapping cache in raw-mode context.
     1251 *
     1252 * @returns VBox status code.
     1253 * @param   pVM                 The VM handle.
     1254 */
     1255VMMRCDECL(int) PGMRCDynMapInit(PVM pVM)
     1256{
     1257    /*
     1258     * Allocate and initialize the instance data and page array.
     1259     */
     1260    PPGMRZDYNMAP    pThis;
     1261    size_t const    cPages = MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE;
     1262    size_t const    cb     = RT_ALIGN_Z(sizeof(*pThis), 32)
     1263                           + sizeof(PGMRZDYNMAPENTRY) * cPages;
     1264    int rc = MMHyperAlloc(pVM, cb, 32, MM_TAG_PGM, (void **)&pThis);
     1265    if (RT_FAILURE(rc))
     1266        return rc;
     1267
     1268    pThis->u32Magic     = PGMRZDYNMAP_MAGIC;
     1269    pThis->paPages      = RT_ALIGN_PT(pThis + 1, 32, PPGMRZDYNMAPENTRY);
     1270    pThis->cPages       = cPages;
     1271    pThis->fLegacyMode  = PGMGetHostMode(pVM) == PGMMODE_32_BIT;
     1272    pThis->cLoad        = 0;
     1273    pThis->cMaxLoad     = 0;
     1274    pThis->cGuardPages  = 0;
     1275    pThis->cUsers       = 1;
     1276
     1277    for (size_t iPage = 0; iPage < cPages; iPage++)
     1278    {
     1279        pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
     1280        pThis->paPages[iPage].pvPage = pVM->pgm.s.pbDynPageMapBaseGC + iPage * PAGE_SIZE;
     1281        pThis->paPages[iPage].cRefs  = 0;
     1282        if (pThis->fLegacyMode)
     1283            pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage];
     1284        else
     1285            pThis->paPages[iPage].uPte.pPae    = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage];
     1286    }
     1287
     1288    pVM->pgm.s.pRCDynMap = pThis;
     1289
     1290    /*
     1291     * Initialize the autosets the VM.
     1292     */
     1293    rc = pgmRZDynMapInitAutoSetsForVM(pVM);
     1294    if (RT_FAILURE(rc))
     1295        return rc;
     1296
     1297    return VINF_SUCCESS;
     1298}
     1299
     1300#endif /* IN_RC */
    11471301
    11481302/**
     
    11531307 * @param   cRefs       The number of references to release.
    11541308 */
    1155 DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
     1309DECLINLINE(void) pgmRZDynMapReleasePageLocked(PPGMRZDYNMAP pThis, uint32_t iPage, int32_t cRefs)
    11561310{
    11571311    cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs;
     
    11691323 * @param   cRefs       The number of references to release.
    11701324 */
    1171 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
    1172 {
    1173     RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
    1174     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
    1175     pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1176     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1325static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs)
     1326{
     1327    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
     1328    pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     1329    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    11771330}
    11781331
     
    11861339 * @param   iPage       The page index pgmR0DynMapPage hashed HCPhys to.
    11871340 * @param   pVCpu       The current CPU, for statistics.
    1188  */
    1189 static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu)
    1190 {
    1191     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlow);
     1341 * @param   pfNew       Set to @c true if a new entry was made and @c false if
     1342 *                      an old entry was found and reused.
     1343 */
     1344static uint32_t pgmR0DynMapPageSlow(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu, bool *pfNew)
     1345{
     1346    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlow);
    11921347
    11931348    /*
     
    11991354#endif
    12001355    uint32_t const      cPages  = pThis->cPages;
    1201     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1356    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    12021357    uint32_t            iFreePage;
    12031358    if (!paPages[iPage].cRefs)
     
    12171372            if (paPages[iFreePage].HCPhys == HCPhys)
    12181373            {
    1219                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopHits);
     1374                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopHits);
     1375                *pfNew = false;
    12201376                return iFreePage;
    12211377            }
     
    12281384                return UINT32_MAX;
    12291385        }
    1230         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopMisses);
     1386        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopMisses);
    12311387#ifdef VBOX_WITH_STATISTICS
    12321388        fLooped = true;
     
    12401396        for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages)
    12411397            if (paPages[iPage2].HCPhys == HCPhys)
    1242                 STAM_COUNTER_INC(&pVCpu->pgm.s.StatR0DynMapPageSlowLostHits);
     1398                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZDynMapPageSlowLostHits);
    12431399#endif
    12441400
     
    12461402     * Setup the new entry.
    12471403     */
     1404    *pfNew = true;
    12481405    /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
    12491406    paPages[iFreePage].HCPhys = HCPhys;
     1407#ifndef IN_RC
    12501408    RTCpuSetFill(&paPages[iFreePage].PendingSet);
     1409#endif
    12511410    if (pThis->fLegacyMode)
    12521411    {
     
    12861445 * @param   ppvPage     Where to the page address.
    12871446 */
    1288 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
    1289 {
    1290     RTSPINLOCKTMP       Tmp     = RTSPINLOCKTMP_INITIALIZER;
    1291     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1447DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage)
     1448{
     1449    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    12921450    AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    1293     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPage);
     1451    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPage);
    12941452
    12951453    /*
     
    13011459     * to pgmR0DynMapPageSlow().
    13021460     */
     1461    bool                fNew    = false;
    13031462    uint32_t const      cPages  = pThis->cPages;
    13041463    uint32_t            iPage   = (HCPhys >> PAGE_SHIFT) % cPages;
    1305     PPGMR0DYNMAPENTRY   paPages = pThis->paPages;
     1464    PPGMRZDYNMAPENTRY   paPages = pThis->paPages;
    13061465    if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys))
    1307         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits0);
     1466        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits0);
    13081467    else
    13091468    {
     
    13121471        {
    13131472            iPage = iPage2;
    1314             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits1);
     1473            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits1);
    13151474        }
    13161475        else
     
    13201479            {
    13211480                iPage = iPage2;
    1322                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageHits2);
     1481                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits2);
    13231482            }
    13241483            else
    13251484            {
    1326                 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu);
     1485                iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu, &fNew);
    13271486                if (RT_UNLIKELY(iPage == UINT32_MAX))
    13281487                {
    1329                     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1488                    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13301489                    *ppvPage = NULL;
    13311490                    return iPage;
     
    13491508    {
    13501509        ASMAtomicDecS32(&paPages[iPage].cRefs);
    1351         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1510        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13521511        *ppvPage = NULL;
    13531512        AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX);
     
    13551514    void *pvPage = paPages[iPage].pvPage;
    13561515
     1516#ifndef IN_RC
    13571517    /*
    13581518     * Invalidate the entry?
     
    13611521    if (RT_UNLIKELY(fInvalidateIt))
    13621522        RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu);
    1363 
    1364     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1523#endif
     1524
     1525    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    13651526
    13661527    /*
    13671528     * Do the actual invalidation outside the spinlock.
    13681529     */
     1530#ifdef IN_RC
     1531    if (RT_UNLIKELY(fNew))
     1532#else
    13691533    if (RT_UNLIKELY(fInvalidateIt))
    1370     {
    1371         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageInvlPg);
     1534#endif
     1535    {
     1536        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageInvlPg);
    13721537        ASMInvalidatePage(pvPage);
    13731538    }
     
    13831548 * @returns VBox status code.
    13841549 */
    1385 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
     1550static int pgmRZDynMapAssertIntegrity(PPGMRZDYNMAP pThis)
    13861551{
    13871552    /*
    13881553     * Basic pool stuff that doesn't require any lock, just assumes we're a user.
    13891554     */
    1390     PPGMR0DYNMAP        pThis       = g_pPGMR0DynMap;
    13911555    if (!pThis)
    13921556        return VINF_SUCCESS;
    13931557    AssertPtrReturn(pThis, VERR_INVALID_POINTER);
    1394     AssertReturn(pThis->u32Magic == PGMR0DYNMAP_MAGIC, VERR_INVALID_MAGIC);
     1558    AssertReturn(pThis->u32Magic == PGMRZDYNMAP_MAGIC, VERR_INVALID_MAGIC);
    13951559    if (!pThis->cUsers)
    13961560        return VERR_INVALID_PARAMETER;
     
    13981562
    13991563    int                 rc          = VINF_SUCCESS;
    1400     RTSPINLOCKTMP       Tmp         = RTSPINLOCKTMP_INITIALIZER;
    1401     RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1564    PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    14021565
    14031566#define CHECK_RET(expr, a) \
     
    14051568        if (RT_UNLIKELY(!(expr))) \
    14061569        { \
    1407             RTSpinlockRelease(pThis->hSpinlock, &Tmp); \
     1570            PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); \
    14081571            RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
    14091572            RTAssertMsg2Weak a; \
     
    14171580    uint32_t            cGuard      = 0;
    14181581    uint32_t            cLoad       = 0;
    1419     PPGMR0DYNMAPENTRY   paPages     = pThis->paPages;
     1582    PPGMRZDYNMAPENTRY   paPages     = pThis->paPages;
    14201583    uint32_t            iPage       = pThis->cPages;
    14211584    if (pThis->fLegacyMode)
    14221585    {
     1586#ifdef IN_RING0
    14231587        PCX86PGUINT     paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
     1588#endif
    14241589        while (iPage-- > 0)
    14251590        {
     
    14401605            {
    14411606                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
    1442                 X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
    1443                                | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1607                X86PGUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1608#ifdef IN_RING0
     1609                               | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
     1610#endif
    14441611                               | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
    14451612                CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte,
     
    14481615                    cLoad++;
    14491616            }
     1617#ifdef IN_RING0
    14501618            else
    14511619                CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage],
    14521620                          ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
     1621#endif
    14531622        }
    14541623    }
    14551624    else
    14561625    {
     1626#ifdef IN_RING0
    14571627        PCX86PGPAEUINT  paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
     1628#endif
    14581629        while (iPage-- > 0)
    14591630        {
     
    14741645            {
    14751646                CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
    1476                 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
    1477                                   | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1647                X86PGPAEUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
     1648#ifdef IN_RING0
     1649                                  | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
     1650#endif
    14781651                                  | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
    14791652                CHECK_RET(paPages[iPage].uPte.pPae->u == uPte,
     
    14821655                    cLoad++;
    14831656            }
     1657#ifdef IN_RING0
    14841658            else
    14851659                CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage],
    14861660                          ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
     1661#endif
    14871662        }
    14881663    }
     
    14921667
    14931668#undef CHECK_RET
    1494     RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1669    PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    14951670    return VINF_SUCCESS;
    14961671}
     1672
     1673#ifdef IN_RING0
     1674/**
     1675 * Assert the the integrity of the pool.
     1676 *
     1677 * @returns VBox status code.
     1678 */
     1679VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
     1680{
     1681    return pgmRZDynMapAssertIntegrity(g_pPGMR0DynMap);
     1682}
     1683#endif /* IN_RING0 */
     1684
     1685#ifdef IN_RC
     1686/**
     1687 * Assert the the integrity of the pool.
     1688 *
     1689 * @returns VBox status code.
     1690 */
     1691VMMRCDECL(int) PGMRCDynMapAssertIntegrity(PVM pVM)
     1692{
     1693    return pgmRZDynMapAssertIntegrity((PPGMRZDYNMAP)pVM->pgm.s.pRCDynMap);
     1694}
     1695#endif /* IN_RC */
     1696
     1697
     1698/**
     1699 * As a final resort for a (somewhat) full auto set or full cache, try merge
     1700 * duplicate entries and flush the ones we can.
     1701 *
     1702 * @param   pSet        The set.
     1703 */
     1704static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
     1705{
     1706    LogFlow(("pgmDynMapOptimizeAutoSet\n"));
     1707
     1708    for (uint32_t i = 0 ; i < pSet->cEntries; i++)
     1709    {
     1710        /*
     1711         * Try merge entries.
     1712         */
     1713        uint16_t const  iPage = pSet->aEntries[i].iPage;
     1714        uint32_t        j     = i + 1;
     1715        while (   j < pSet->cEntries
     1716               && (   pSet->iSubset == UINT32_MAX
     1717                   || pSet->iSubset < pSet->cEntries) )
     1718        {
     1719            if (pSet->aEntries[j].iPage != iPage)
     1720                j++;
     1721            else
     1722            {
     1723                uint32_t const  cHardRefs    = (uint32_t)pSet->aEntries[i].cRefs
     1724                                             + (uint32_t)pSet->aEntries[j].cRefs;
     1725                uint32_t        cInlinedRefs = (uint32_t)pSet->aEntries[i].cInlinedRefs
     1726                                             + (uint32_t)pSet->aEntries[j].cInlinedRefs;
     1727                uint32_t        cUnrefs      = (uint32_t)pSet->aEntries[i].cUnrefs
     1728                                             + (uint32_t)pSet->aEntries[j].cUnrefs;
     1729                uint32_t        cSub         = RT_MIN(cUnrefs, cInlinedRefs);
     1730                cInlinedRefs -= cSub;
     1731                cUnrefs      -= cSub;
     1732
     1733                if (    cHardRefs    < UINT16_MAX
     1734                    &&  cInlinedRefs < UINT16_MAX
     1735                    &&  cUnrefs      < UINT16_MAX)
     1736                {
     1737                    /* merge j into i removing j. */
     1738                    Log2(("pgmDynMapOptimizeAutoSet: Merging #%u into #%u\n", j, i));
     1739                    pSet->aEntries[i].cRefs        = cHardRefs;
     1740                    pSet->aEntries[i].cInlinedRefs = cInlinedRefs;
     1741                    pSet->aEntries[i].cUnrefs      = cUnrefs;
     1742                    pSet->cEntries--;
     1743                    if (j < pSet->cEntries)
     1744                    {
     1745                        pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
     1746                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
     1747                    }
     1748                    else
     1749                        PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]);
     1750                }
     1751#if 0 /* too complicated, skip it. */
     1752                else
     1753                {
     1754                    /* migrate the max number of refs from j into i and quit the inner loop. */
     1755                    uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
     1756                    Assert(pSet->aEntries[j].cRefs > cMigrate);
     1757                    pSet->aEntries[j].cRefs -= cMigrate;
     1758                    pSet->aEntries[i].cRefs = UINT16_MAX - 1;
     1759                    break;
     1760                }
     1761#endif
     1762            }
     1763        }
     1764
     1765        /*
     1766         * Try make use of the unused hinting (cUnrefs) to evict entries
     1767         * from both the set as well as the mapping cache.
     1768         */
     1769
     1770        uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[i].cRefs + pSet->aEntries[i].cInlinedRefs;
     1771        Log2(("pgmDynMapOptimizeAutoSet: #%u/%u/%u pvPage=%p iPage=%u cRefs=%u cInlinedRefs=%u cUnrefs=%u cTotalRefs=%u\n",
     1772              i,
     1773              pSet->iSubset,
     1774              pSet->cEntries,
     1775              pSet->aEntries[i].pvPage,
     1776              pSet->aEntries[i].iPage,
     1777              pSet->aEntries[i].cRefs,
     1778              pSet->aEntries[i].cInlinedRefs,
     1779              pSet->aEntries[i].cUnrefs,
     1780              cTotalRefs));
     1781        Assert(cTotalRefs >= pSet->aEntries[i].cUnrefs);
     1782
     1783        if (    cTotalRefs == pSet->aEntries[i].cUnrefs
     1784            &&  (   pSet->iSubset == UINT32_MAX
     1785                 || pSet->iSubset < pSet->cEntries)
     1786           )
     1787        {
     1788            Log2(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
     1789            //LogFlow(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage));
     1790            pgmRZDynMapReleasePage(PGMRZDYNMAP_SET_2_DYNMAP(pSet),
     1791                                   pSet->aEntries[i].iPage,
     1792                                   pSet->aEntries[i].cRefs);
     1793            pSet->cEntries--;
     1794            if (i < pSet->cEntries)
     1795            {
     1796                pSet->aEntries[i] = pSet->aEntries[pSet->cEntries];
     1797                PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]);
     1798            }
     1799
     1800            i--;
     1801        }
     1802    }
     1803}
     1804
     1805
    14971806
    14981807
     
    15051814 * @param   pVCpu       The shared data for the current virtual CPU.
    15061815 */
    1507 VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
    1508 {
     1816VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu)
     1817{
     1818    LogFlow(("PGMRZDynMapStartAutoSet:\n"));
    15091819    Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
    15101820    Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX);
    15111821    pVCpu->pgm.s.AutoSet.cEntries = 0;
    1512     pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
    1513 }
    1514 
    1515 
     1822    pVCpu->pgm.s.AutoSet.iCpu = PGMRZDYNMAP_CUR_CPU();
     1823}
     1824
     1825
     1826#ifdef IN_RING0
    15161827/**
    15171828 * Starts or migrates the autoset of a virtual CPU.
     
    15261837 * @thread  EMT
    15271838 */
    1528 VMMDECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
     1839VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu)
    15291840{
    15301841    bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED;
    15311842    if (fStartIt)
    1532         PGMDynMapStartAutoSet(pVCpu);
     1843        PGMRZDynMapStartAutoSet(pVCpu);
    15331844    else
    1534         PGMDynMapMigrateAutoSet(pVCpu);
     1845        PGMR0DynMapMigrateAutoSet(pVCpu);
    15351846    return fStartIt;
    15361847}
     1848#endif /* IN_RING0 */
    15371849
    15381850
     
    15511863        &&  RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries)))
    15521864    {
    1553         PPGMR0DYNMAP    pThis   = g_pPGMR0DynMap;
    1554         RTSPINLOCKTMP   Tmp     = RTSPINLOCKTMP_INITIALIZER;
    1555         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1865        PPGMRZDYNMAP    pThis   = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     1866        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    15561867
    15571868        uint32_t i = cEntries;
     
    15621873            int32_t  cRefs = pSet->aEntries[i].cRefs;
    15631874            Assert(cRefs > 0);
    1564             pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1565 
    1566             pSet->aEntries[i].iPage = UINT16_MAX;
    1567             pSet->aEntries[i].cRefs = 0;
     1875            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     1876
     1877            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
    15681878        }
    15691879
    15701880        Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages);
    1571         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1881        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    15721882    }
    15731883}
     
    15801890 * @param   pVCpu       The shared data for the current virtual CPU.
    15811891 */
    1582 VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
     1892VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu)
    15831893{
    15841894    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
     
    15931903    pSet->iCpu = -1;
    15941904
    1595     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     1905#ifdef IN_RC
     1906    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     1907        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     1908    else
     1909#endif
     1910        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    15961911    AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries));
    15971912    if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100)
    1598         Log(("PGMDynMapReleaseAutoSet: cEntries=%d\n", pSet->cEntries));
     1913        Log(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
     1914    else
     1915        LogFlow(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries));
    15991916
    16001917    pgmDynMapFlushAutoSetWorker(pSet, cEntries);
     
    16071924 * @param   pVCpu       The shared data for the current virtual CPU.
    16081925 */
    1609 VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu)
     1926VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu)
    16101927{
    16111928    PPGMMAPSET  pSet = &pVCpu->pgm.s.AutoSet;
    1612     AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
     1929    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
    16131930
    16141931    /*
     
    16171934    uint32_t cEntries = pSet->cEntries;
    16181935    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
    1619     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     1936#ifdef IN_RC
     1937    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     1938        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     1939    else
     1940#endif
     1941        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    16201942    if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100)
    16211943    {
     
    16261948
    16271949        pgmDynMapFlushAutoSetWorker(pSet, cEntries);
    1628         AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
    1629     }
    1630 }
    1631 
    1632 
     1950        AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
     1951    }
     1952}
     1953
     1954
     1955#ifndef IN_RC
    16331956/**
    16341957 * Migrates the automatic mapping set of the current vCPU if it's active and
     
    16441967 * @thread  EMT
    16451968 */
    1646 VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
    1647 {
     1969VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu)
     1970{
     1971    LogFlow(("PGMR0DynMapMigrateAutoSet\n"));
    16481972    PPGMMAPSET      pSet     = &pVCpu->pgm.s.AutoSet;
    1649     int32_t         iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
     1973    int32_t         iRealCpu = PGMRZDYNMAP_CUR_CPU();
    16501974    if (pSet->iCpu != iRealCpu)
    16511975    {
     
    16561980            if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries)))
    16571981            {
    1658                 PPGMR0DYNMAP    pThis  = g_pPGMR0DynMap;
    1659                 RTSPINLOCKTMP   Tmp    = RTSPINLOCKTMP_INITIALIZER;
    1660                 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1982                PPGMRZDYNMAP    pThis  = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     1983                PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    16611984
    16621985                while (i-- > 0)
     
    16681991                    {
    16691992                        RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu);
    1670                         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     1993                        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    16711994
    16721995                        ASMInvalidatePage(pThis->paPages[iPage].pvPage);
    1673                         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapMigrateInvlPg);
    1674 
    1675                         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     1996                        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapMigrateInvlPg);
     1997
     1998                        PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis);
    16761999                    }
    16772000                }
    16782001
    1679                 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     2002                PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    16802003            }
    16812004        }
     
    16832006    }
    16842007}
     2008#endif /* !IN_RC */
    16852009
    16862010
     
    17062030        pSet->cEntries = iSubset;
    17072031
    1708         PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
    1709         RTSPINLOCKTMP   Tmp   = RTSPINLOCKTMP_INITIALIZER;
    1710         RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
     2032        PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     2033        PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis);
    17112034
    17122035        while (i-- > iSubset)
     
    17162039            int32_t  cRefs = pSet->aEntries[i].cRefs;
    17172040            Assert(cRefs > 0);
    1718             pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
    1719 
    1720             pSet->aEntries[i].iPage = UINT16_MAX;
    1721             pSet->aEntries[i].cRefs = 0;
    1722         }
    1723 
    1724         RTSpinlockRelease(pThis->hSpinlock, &Tmp);
     2041            pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs);
     2042
     2043            PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]);
     2044        }
     2045
     2046        PGMRZDYNMAP_SPINLOCK_RELEASE(pThis);
    17252047    }
    17262048}
     
    17382060 *
    17392061 * @returns The index of the previous subset. Pass this to
    1740  *        PGMDynMapPopAutoSubset when poping it.
     2062 *          PGMDynMapPopAutoSubset when popping it.
    17412063 * @param   pVCpu           Pointer to the virtual cpu data.
    17422064 */
    1743 VMMDECL(uint32_t) PGMDynMapPushAutoSubset(PVMCPU pVCpu)
     2065VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu)
    17442066{
    17452067    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
    17462068    AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX);
    17472069    uint32_t        iPrevSubset = pSet->iSubset;
    1748     LogFlow(("PGMDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
     2070    LogFlow(("PGMRZDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset));
     2071
     2072#ifdef IN_RC
     2073    /* kludge */
     2074    if (pSet->cEntries > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE / 2)
     2075    {
     2076        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
     2077        pgmDynMapOptimizeAutoSet(pSet);
     2078    }
     2079#endif
    17492080
    17502081    pSet->iSubset = pSet->cEntries;
    1751     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSubsets);
     2082    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSubsets);
     2083
    17522084    return iPrevSubset;
    17532085}
     
    17602092 * @param   iPrevSubset     What PGMDynMapPushAutoSubset returned.
    17612093 */
    1762 VMMDECL(void) PGMDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
     2094VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)
    17632095{
    17642096    PPGMMAPSET      pSet = &pVCpu->pgm.s.AutoSet;
    17652097    uint32_t        cEntries = pSet->cEntries;
    1766     LogFlow(("PGMDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
     2098    LogFlow(("PGMRZDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));
    17672099    AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
    17682100    AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX);
    1769     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     2101#ifdef IN_RC
     2102    if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)
     2103        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]);
     2104    else
     2105#endif
     2106        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    17702107    if (    cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100
    17712108        &&  cEntries != pSet->iSubset)
     
    17792116
    17802117/**
    1781  * As a final resort for a full auto set, try merge duplicate entries.
    1782  *
    1783  * @param   pSet        The set.
    1784  */
    1785 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
    1786 {
    1787     for (uint32_t i = 0 ; i < pSet->cEntries; i++)
    1788     {
    1789         uint16_t const  iPage = pSet->aEntries[i].iPage;
    1790         uint32_t        j     = i + 1;
    1791         while (j < pSet->cEntries)
    1792         {
    1793             if (pSet->aEntries[j].iPage != iPage)
    1794                 j++;
    1795             else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX)
    1796             {
    1797                 /* merge j into i removing j. */
    1798                 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
    1799                 pSet->cEntries--;
    1800                 if (j < pSet->cEntries)
     2118 * Indicates that the given page is unused and its mapping can be re-used.
     2119 *
     2120 * @param   pVCpu           The current CPU.
     2121 * @param   pvHint          The page that is now unused.  This does not have to
     2122 *                          point at the start of the page.  NULL is ignored.
     2123 */
     2124#ifdef LOG_ENABLED
     2125void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL)
     2126#else
     2127void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint)
     2128#endif
     2129{
     2130    /*
     2131     * Ignore NULL pointers and mask off the page offset bits.
     2132     */
     2133    if (pvHint == NULL)
     2134        return;
     2135    pvHint = (void *)((uintptr_t)pvHint & ~(uintptr_t)PAGE_OFFSET_MASK);
     2136
     2137    PPGMMAPSET  pSet    = &pVCpu->pgm.s.AutoSet;
     2138    uint32_t    iEntry  = pSet->cEntries;
     2139    AssertReturnVoid(iEntry > 0);
     2140
     2141    /*
     2142     * Find the entry in the usual unrolled fashion.
     2143     */
     2144#define IS_MATCHING_ENTRY(pSet, iEntry, pvHint) \
     2145        (   (pSet)->aEntries[(iEntry)].pvPage == (pvHint) \
     2146         &&   (uint32_t)(pSet)->aEntries[(iEntry)].cRefs + (pSet)->aEntries[(iEntry)].cInlinedRefs \
     2147            > (pSet)->aEntries[(iEntry)].cUnrefs )
     2148    if (     iEntry >= 1 && IS_MATCHING_ENTRY(pSet, iEntry - 1, pvHint))
     2149        iEntry = iEntry - 1;
     2150    else if (iEntry >= 2 && IS_MATCHING_ENTRY(pSet, iEntry - 2, pvHint))
     2151        iEntry = iEntry - 2;
     2152    else if (iEntry >= 3 && IS_MATCHING_ENTRY(pSet, iEntry - 3, pvHint))
     2153        iEntry = iEntry - 3;
     2154    else if (iEntry >= 4 && IS_MATCHING_ENTRY(pSet, iEntry - 4, pvHint))
     2155        iEntry = iEntry - 4;
     2156    else if (iEntry >= 5 && IS_MATCHING_ENTRY(pSet, iEntry - 5, pvHint))
     2157        iEntry = iEntry - 5;
     2158    else if (iEntry >= 6 && IS_MATCHING_ENTRY(pSet, iEntry - 6, pvHint))
     2159        iEntry = iEntry - 6;
     2160    else if (iEntry >= 7 && IS_MATCHING_ENTRY(pSet, iEntry - 7, pvHint))
     2161        iEntry = iEntry - 7;
     2162    else
     2163    {
     2164        /*
     2165         * Loop till we find it.
     2166         */
     2167        bool fFound = false;
     2168        if (iEntry > 7)
     2169        {
     2170            iEntry -= 7;
     2171            while (iEntry-- > 0)
     2172                if (IS_MATCHING_ENTRY(pSet, iEntry, pvHint))
    18012173                {
    1802                     pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
    1803                     pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
    1804                     pSet->aEntries[pSet->cEntries].cRefs = 0;
     2174                    fFound = true;
     2175                    break;
    18052176                }
    1806                 else
    1807                 {
    1808                     pSet->aEntries[j].iPage = UINT16_MAX;
    1809                     pSet->aEntries[j].cRefs = 0;
    1810                 }
    1811             }
    1812             else
    1813             {
    1814                 /* migrate the max number of refs from j into i and quit the inner loop. */
    1815                 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
    1816                 Assert(pSet->aEntries[j].cRefs > cMigrate);
    1817                 pSet->aEntries[j].cRefs -= cMigrate;
    1818                 pSet->aEntries[i].cRefs = UINT16_MAX - 1;
    1819                 break;
    1820             }
    1821         }
    1822     }
    1823 }
    1824 
    1825 
    1826 /**
    1827  * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and
    1828  * pgmR0DynMapGCPageInlined.
     2177        }
     2178        AssertMsgReturnVoid(fFound,
     2179                            ("pvHint=%p cEntries=%#x iSubset=%#x\n"
     2180                             "aEntries[0] = {%#x, %#x, %#x, %#x, %p}\n"
     2181                             "aEntries[1] = {%#x, %#x, %#x, %#x, %p}\n"
     2182                             "aEntries[2] = {%#x, %#x, %#x, %#x, %p}\n"
     2183                             "aEntries[3] = {%#x, %#x, %#x, %#x, %p}\n"
     2184                             "aEntries[4] = {%#x, %#x, %#x, %#x, %p}\n"
     2185                             "aEntries[5] = {%#x, %#x, %#x, %#x, %p}\n"
     2186                             ,
     2187                             pvHint, pSet->cEntries, pSet->iSubset,
     2188                             pSet->aEntries[0].iPage, pSet->aEntries[0].cRefs, pSet->aEntries[0].cInlinedRefs, pSet->aEntries[0].cUnrefs, pSet->aEntries[0].pvPage,
     2189                             pSet->aEntries[1].iPage, pSet->aEntries[1].cRefs, pSet->aEntries[1].cInlinedRefs, pSet->aEntries[1].cUnrefs, pSet->aEntries[1].pvPage,
     2190                             pSet->aEntries[2].iPage, pSet->aEntries[2].cRefs, pSet->aEntries[2].cInlinedRefs, pSet->aEntries[2].cUnrefs, pSet->aEntries[2].pvPage,
     2191                             pSet->aEntries[3].iPage, pSet->aEntries[3].cRefs, pSet->aEntries[3].cInlinedRefs, pSet->aEntries[3].cUnrefs, pSet->aEntries[3].pvPage,
     2192                             pSet->aEntries[4].iPage, pSet->aEntries[4].cRefs, pSet->aEntries[4].cInlinedRefs, pSet->aEntries[4].cUnrefs, pSet->aEntries[4].pvPage,
     2193                             pSet->aEntries[5].iPage, pSet->aEntries[5].cRefs, pSet->aEntries[5].cInlinedRefs, pSet->aEntries[5].cUnrefs, pSet->aEntries[5].pvPage));
     2194    }
     2195#undef IS_MATCHING_ENTRY
     2196
     2197    /*
     2198     * Update it.
     2199     */
     2200    uint32_t const  cTotalRefs = (uint32_t)pSet->aEntries[iEntry].cRefs + pSet->aEntries[iEntry].cInlinedRefs;
     2201    uint32_t const  cUnrefs    = pSet->aEntries[iEntry].cUnrefs;
     2202    LogFlow(("pgmRZDynMapUnusedHint: pvHint=%p #%u cRefs=%d cInlinedRefs=%d cUnrefs=%d (+1) cTotalRefs=%d %s(%d) %s\n",
     2203             pvHint, iEntry, pSet->aEntries[iEntry].cRefs, pSet->aEntries[iEntry].cInlinedRefs, cUnrefs, cTotalRefs, pszFile, iLine, pszFunction));
     2204    AssertReturnVoid(cTotalRefs > cUnrefs);
     2205
     2206    if (RT_LIKELY(cUnrefs < UINT16_MAX - 1))
     2207        pSet->aEntries[iEntry].cUnrefs++;
     2208    else if (pSet->aEntries[iEntry].cInlinedRefs)
     2209    {
     2210        uint32_t cSub = RT_MIN(pSet->aEntries[iEntry].cInlinedRefs, pSet->aEntries[iEntry].cUnrefs);
     2211        pSet->aEntries[iEntry].cInlinedRefs -= cSub;
     2212        pSet->aEntries[iEntry].cUnrefs      -= cSub;
     2213        pSet->aEntries[iEntry].cUnrefs++;
     2214    }
     2215    else
     2216        Log(("pgmRZDynMapUnusedHint: pvHint=%p ignored because of overflow! %s(%d) %s\n", pvHint, pszFile, iLine, pszFunction));
     2217}
     2218
     2219
     2220/**
     2221 * Common worker code for pgmRZDynMapHCPageInlined, pgmRZDynMapHCPageV2Inlined
     2222 * and pgmR0DynMapGCPageOffInlined.
    18292223 *
    18302224 * @returns VINF_SUCCESS, bails out to ring-3 on failure.
     
    18352229 * @remarks This is a very hot path.
    18362230 */
    1837 int pgmR0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)
    1838 {
    1839     LogFlow(("pgmR0DynMapHCPageCommon: pSet=%p HCPhys=%RHp ppv=%p\n", pSet, HCPhys, ppv));
    1840     AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));
    1841     PVMCPU pVCpu = PGMR0DYNMAP_2_VMCPU(pSet);
     2231int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
     2232{
     2233    AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags()));
     2234    PVMCPU pVCpu = PGMRZDYNMAP_SET_2_VMCPU(pSet);
     2235    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    18422236
    18432237    /*
    18442238     * Map it.
    18452239     */
    1846     void *pvPage;
    1847     uint32_t const  iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVCpu, &pvPage);
     2240    void           *pvPage;
     2241    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
     2242    uint32_t        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
    18482243    if (RT_UNLIKELY(iPage == UINT32_MAX))
    18492244    {
    1850         RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
    1851                          g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages);
    1852         if (!g_fPGMR0DynMapTestRunning)
    1853             VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
    1854         *ppv = NULL;
    1855         return VERR_PGM_DYNMAP_FAILED;
     2245        /*
     2246         * We're out of mapping space, optimize our set to try remedy the
     2247         * situation.  (Only works if there are unreference hints.)
     2248         */
     2249        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
     2250        pgmDynMapOptimizeAutoSet(pSet);
     2251
     2252        iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage);
     2253        if (RT_UNLIKELY(iPage == UINT32_MAX))
     2254        {
     2255            RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
     2256                             pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pThis->cGuardPages);
     2257            if (!g_fPGMR0DynMapTestRunning)
     2258                VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
     2259            *ppv = NULL;
     2260            STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
     2261            return VERR_PGM_DYNMAP_FAILED;
     2262        }
    18562263    }
    18572264
     
    18692276    {
    18702277        unsigned iEntry = pSet->cEntries++;
    1871         pSet->aEntries[iEntry].cRefs  = 1;
    1872         pSet->aEntries[iEntry].iPage  = iPage;
    1873         pSet->aEntries[iEntry].pvPage = pvPage;
    1874         pSet->aEntries[iEntry].HCPhys = HCPhys;
     2278        pSet->aEntries[iEntry].cRefs        = 1;
     2279        pSet->aEntries[iEntry].cUnrefs      = 0;
     2280        pSet->aEntries[iEntry].cInlinedRefs = 0;
     2281        pSet->aEntries[iEntry].iPage        = iPage;
     2282        pSet->aEntries[iEntry].pvPage       = pvPage;
     2283        pSet->aEntries[iEntry].HCPhys       = HCPhys;
    18752284        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2285        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/0/0 iPage=%#x  [a] %s(%d) %s\n",
     2286                 pSet, HCPhys, iEntry, iEntry + 1, pvPage, 1, iPage, pszFile, iLine, pszFunction));
    18762287    }
    18772288    /* Any of the last 5 pages? */
    18782289    else if (   pSet->aEntries[i - 0].iPage == iPage
    18792290             && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1)
     2291    {
    18802292        pSet->aEntries[i - 0].cRefs++;
     2293        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [0] %s(%d) %s\n", pSet, HCPhys, i - 0, pSet->cEntries, pvPage, pSet->aEntries[i - 0].cRefs, pSet->aEntries[i - 0].cInlinedRefs, pSet->aEntries[i - 0].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2294    }
    18812295    else if (   pSet->aEntries[i - 1].iPage == iPage
    18822296             && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1)
     2297    {
    18832298        pSet->aEntries[i - 1].cRefs++;
     2299        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [1] %s(%d) %s\n", pSet, HCPhys, i - 1, pSet->cEntries, pvPage, pSet->aEntries[i - 1].cRefs, pSet->aEntries[i - 1].cInlinedRefs, pSet->aEntries[i - 1].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2300    }
    18842301    else if (   pSet->aEntries[i - 2].iPage == iPage
    18852302             && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1)
     2303    {
    18862304        pSet->aEntries[i - 2].cRefs++;
     2305        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [2] %s(%d) %s\n", pSet, HCPhys, i - 2, pSet->cEntries, pvPage, pSet->aEntries[i - 2].cRefs, pSet->aEntries[i - 2].cInlinedRefs, pSet->aEntries[i - 2].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2306    }
    18872307    else if (   pSet->aEntries[i - 3].iPage == iPage
    18882308             && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1)
     2309    {
    18892310        pSet->aEntries[i - 3].cRefs++;
     2311        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 3, pSet->cEntries, pvPage, pSet->aEntries[i - 3].cRefs, pSet->aEntries[i - 3].cInlinedRefs, pSet->aEntries[i - 3].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2312    }
    18902313    else if (   pSet->aEntries[i - 4].iPage == iPage
    18912314             && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1)
     2315    {
    18922316        pSet->aEntries[i - 4].cRefs++;
     2317        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 4, pSet->cEntries, pvPage, pSet->aEntries[i - 4].cRefs, pSet->aEntries[i - 4].cInlinedRefs, pSet->aEntries[i - 4].cUnrefs, iPage, pszFile, iLine, pszFunction));
     2318    }
    18932319    /* Don't bother searching unless we're above a 60% load. */
    18942320    else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100))
    18952321    {
    18962322        unsigned iEntry = pSet->cEntries++;
    1897         pSet->aEntries[iEntry].cRefs  = 1;
    1898         pSet->aEntries[iEntry].iPage  = iPage;
    1899         pSet->aEntries[iEntry].pvPage = pvPage;
    1900         pSet->aEntries[iEntry].HCPhys = HCPhys;
     2323        pSet->aEntries[iEntry].cRefs        = 1;
     2324        pSet->aEntries[iEntry].cUnrefs      = 0;
     2325        pSet->aEntries[iEntry].cInlinedRefs = 0;
     2326        pSet->aEntries[iEntry].iPage        = iPage;
     2327        pSet->aEntries[iEntry].pvPage       = pvPage;
     2328        pSet->aEntries[iEntry].HCPhys       = HCPhys;
    19012329        pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2330        LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [b] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
    19022331    }
    19032332    else
     
    19112340            {
    19122341                pSet->aEntries[i].cRefs++;
    1913                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchHits);
     2342                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchHits);
     2343                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [c] %s(%d) %s\n", pSet, HCPhys, i, pSet->cEntries, pvPage, pSet->aEntries[i].cRefs, pSet->aEntries[i].cInlinedRefs, pSet->aEntries[i].cUnrefs, iPage, pszFile, iLine, pszFunction));
    19142344                break;
    19152345            }
    19162346        if (i < 0)
    19172347        {
    1918             STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchMisses);
     2348            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchMisses);
    19192349            if (pSet->iSubset < pSet->cEntries)
    19202350            {
    1921                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchFlushes);
    1922                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
     2351                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchFlushes);
     2352                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);
    19232353                AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries));
    19242354                pgmDynMapFlushSubset(pSet);
     
    19272357            if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
    19282358            {
    1929                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetOptimize);
     2359                STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize);
    19302360                pgmDynMapOptimizeAutoSet(pSet);
    19312361            }
     
    19342364            {
    19352365                unsigned iEntry = pSet->cEntries++;
    1936                 pSet->aEntries[iEntry].cRefs  = 1;
    1937                 pSet->aEntries[iEntry].iPage  = iPage;
    1938                 pSet->aEntries[iEntry].pvPage = pvPage;
    1939                 pSet->aEntries[iEntry].HCPhys = HCPhys;
     2366                pSet->aEntries[iEntry].cRefs        = 1;
     2367                pSet->aEntries[iEntry].cUnrefs      = 0;
     2368                pSet->aEntries[iEntry].cInlinedRefs = 0;
     2369                pSet->aEntries[iEntry].iPage        = iPage;
     2370                pSet->aEntries[iEntry].pvPage       = pvPage;
     2371                pSet->aEntries[iEntry].HCPhys       = HCPhys;
    19402372                pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
     2373                LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [d] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction));
    19412374            }
    19422375            else
    19432376            {
    19442377                /* We're screwed. */
    1945                 pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
    1946 
    1947                 RTAssertMsg2Weak("PGMDynMapHCPage: set is full!\n");
     2378                pgmRZDynMapReleasePage(pThis, iPage, 1);
     2379
     2380                RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: set is full!\n");
    19482381                if (!g_fPGMR0DynMapTestRunning)
    1949                     VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
     2382                    VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);
    19502383                *ppv = NULL;
     2384                STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    19512385                return VERR_PGM_DYNMAP_FULL_SET;
    19522386            }
     
    19552389
    19562390    *ppv = pvPage;
     2391    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a);
    19572392    return VINF_SUCCESS;
    19582393}
    1959 
    1960 
    1961 #if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */
    1962 /* documented elsewhere - a bit of a mess. */
    1963 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
    1964 {
    1965 #ifdef VBOX_WITH_STATISTICS
    1966     PVMCPU pVCpu = VMMGetCpu(pVM);
    1967 #endif
    1968     /*
    1969      * Validate state.
    1970      */
    1971     STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
    1972     AssertPtr(ppv);
    1973     AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
    1974               ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap));
    1975     AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
    1976     PVMCPU          pVCpu   = VMMGetCpu(pVM);
    1977     AssertPtr(pVCpu);
    1978     PPGMMAPSET      pSet    = &pVCpu->pgm.s.AutoSet;
    1979     AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),
    1980               ("%#x (%u)\n", pSet->cEntries, pSet->cEntries));
    1981 
    1982     /*
    1983      * Call common code.
    1984      */
    1985     int rc = pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
    1986 
    1987     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatR0DynMapHCPage, a);
    1988     return rc;
    1989 }
    1990 #endif
    19912394
    19922395
     
    20252428{
    20262429    LogRel(("pgmR0DynMapTest: ****** START ******\n"));
    2027     PPGMR0DYNMAP    pThis = g_pPGMR0DynMap;
    20282430    PPGMMAPSET      pSet  = &pVM->aCpus[0].pgm.s.AutoSet;
     2431    PPGMRZDYNMAP    pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet);
    20292432    uint32_t        i;
    20302433
     
    20472450    LogRel(("Test #1\n"));
    20482451    ASMIntDisable();
    2049     PGMDynMapStartAutoSet(&pVM->aCpus[0]);
     2452    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
    20502453
    20512454    uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK;
    20522455    void    *pv  = (void *)(intptr_t)-1;
    20532456    void    *pv2 = (void *)(intptr_t)-2;
    2054     rc           = PGMDynMapHCPage(pVM, cr3, &pv);
    2055     int      rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);
     2457    rc           = pgmRZDynMapHCPageCommon(pVM, cr3, &pv  RTLOG_COMMA_SRC_POS);
     2458    int      rc2 = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
    20562459    ASMIntEnable();
    20572460    if (    RT_SUCCESS(rc2)
     
    20682471        LogRel(("Test #2\n"));
    20692472        ASMIntDisable();
    2070         PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2473        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    20712474        for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++)
    20722475        {
    20732476            pv2 = (void *)(intptr_t)-4;
    2074             rc = PGMDynMapHCPage(pVM, cr3, &pv2);
     2477            rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS);
    20752478        }
    20762479        ASMIntEnable();
     
    21062509            LogRel(("Test #3\n"));
    21072510            ASMIntDisable();
    2108             PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2511            PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21092512            pv2 = NULL;
    21102513            for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++)
    21112514            {
    21122515                pv2 = (void *)(intptr_t)(-5 - i);
    2113                 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);
     2516                rc = pgmRZDynMapHCPageCommon(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
    21142517            }
    21152518            ASMIntEnable();
     
    21342537                LogRel(("Test #4\n"));
    21352538                ASMIntDisable();
    2136                 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2539                PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21372540                for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++)
    21382541                {
    2139                     rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);
     2542                    rc = pgmRZDynMapHCPageCommon(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS);
    21402543                    if (RT_SUCCESS(rc))
    21412544                        rc = PGMR0DynMapAssertIntegrity();
     
    21492552                    LogRel(("Test #5\n"));
    21502553                    ASMIntDisable();
    2151                     PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
    2152                     PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
    2153                     PGMDynMapStartAutoSet(&pVM->aCpus[0]);
     2554                    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
     2555                    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
     2556                    PGMRZDynMapStartAutoSet(&pVM->aCpus[0]);
    21542557                    ASMIntEnable();
    21552558
     
    21792582        LogRel(("Test #5\n"));
    21802583        ASMIntDisable();
    2181         PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
     2584        PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
    21822585        RTHCPHYS  HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0);
    2183         rc  = PGMDynMapHCPage(pVM, HCPhysPT, &pv);
     2586        rc  = pgmRZDynMapHCPageCommon(pVM, HCPhysPT, &pv RTLOG_COMMA_SRC_POS);
    21842587        if (RT_SUCCESS(rc))
    21852588        {
     
    22162619    LogRel(("Cleanup.\n"));
    22172620    ASMIntDisable();
    2218     PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
    2219     PGMDynMapFlushAutoSet(&pVM->aCpus[0]);
    2220     PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
     2621    PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]);
     2622    PGMRZDynMapFlushAutoSet(&pVM->aCpus[0]);
     2623    PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]);
    22212624    ASMIntEnable();
    22222625
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette