Changeset 31402 in vbox for trunk/src/VBox/VMM/VMMRZ
- Timestamp:
- Aug 5, 2010 12:28:18 PM (14 years ago)
- File:
-
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
r31270 r31402 1 1 /* $Id$ */ 2 2 /** @file 3 * PGM - Page Manager and Monitor, ring-0dynamic mapping cache.3 * PGM - Page Manager and Monitor, dynamic mapping cache. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2008 Oracle Corporation7 * Copyright (C) 2008-2010 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 16 16 */ 17 17 18 18 19 /******************************************************************************* 19 20 * Internal Functions * 20 21 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_PGM 22 #define LOG_GROUP LOG_GROUP_PGM_DYNMAP 22 23 #include <VBox/pgm.h> 23 24 #include "../PGMInternal.h" 24 25 #include <VBox/vm.h> 25 26 #include "../PGMInline.h" 27 #include <VBox/err.h> 28 #include <VBox/param.h> 26 29 #include <VBox/sup.h> 27 #include <VBox/err.h>28 30 #include <iprt/asm.h> 29 31 #include <iprt/asm-amd64-x86.h> 30 #include <iprt/alloc.h>31 32 #include <iprt/assert.h> 32 #include <iprt/cpuset.h> 33 #include <iprt/memobj.h> 34 #include <iprt/mp.h> 35 #include <iprt/semaphore.h> 36 #include <iprt/spinlock.h> 33 #ifndef IN_RC 34 # include <iprt/cpuset.h> 35 # include <iprt/mem.h> 36 # include <iprt/memobj.h> 37 # include <iprt/mp.h> 38 # include <iprt/semaphore.h> 39 # include <iprt/spinlock.h> 40 #endif 37 41 #include <iprt/string.h> 38 42 … … 41 45 * Defined Constants And Macros * 42 46 *******************************************************************************/ 47 #ifdef IN_RING0 43 48 /** The max size of the mapping cache (in pages). */ 44 # define PGMR0DYNMAP_MAX_PAGES((16*_1M) >> PAGE_SHIFT)49 # define PGMR0DYNMAP_MAX_PAGES ((16*_1M) >> PAGE_SHIFT) 45 50 /** The small segment size that is adopted on out-of-memory conditions with a 46 51 * single big segment. */ 47 # define PGMR0DYNMAP_SMALL_SEG_PAGES12852 # define PGMR0DYNMAP_SMALL_SEG_PAGES 128 48 53 /** The number of pages we reserve per CPU. */ 49 # define PGMR0DYNMAP_PAGES_PER_CPU25654 # define PGMR0DYNMAP_PAGES_PER_CPU 256 50 55 /** The minimum number of pages we reserve per CPU. 51 56 * This must be equal or larger than the autoset size. */ 52 #define PGMR0DYNMAP_PAGES_PER_CPU_MIN 64 57 # define PGMR0DYNMAP_PAGES_PER_CPU_MIN 64 58 /** Calcs the overload threshold (safety margin). Current set at 50%. */ 59 # define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 53 60 /** The number of guard pages. 54 61 * @remarks Never do tuning of the hashing or whatnot with a strict build! */ 55 #if defined(VBOX_STRICT) 56 # define PGMR0DYNMAP_GUARD_PAGES 1 57 #else 58 # define PGMR0DYNMAP_GUARD_PAGES 0 59 #endif 62 # if defined(VBOX_STRICT) 63 # define PGMR0DYNMAP_GUARD_PAGES 1 64 # else 65 # define PGMR0DYNMAP_GUARD_PAGES 0 66 # endif 67 #endif /* IN_RING0 */ 60 68 /** The dummy physical address of guard pages. */ 61 69 #define PGMR0DYNMAP_GUARD_PAGE_HCPHYS UINT32_C(0x7777feed) … … 66 74 * The alternative is to replace the entire PTE with an bad not-present 67 75 * PTE. Either way, XNU will screw us. :-/ */ 68 # define PGMR0DYNMAP_GUARD_NP76 # define PGMR0DYNMAP_GUARD_NP 69 77 #endif 70 78 /** The dummy PTE value for a page. */ … … 72 80 /** The dummy PTE value for a page. */ 73 81 #define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE UINT64_MAX /*X86_PTE_PAE_PG_MASK*/ 74 /** Calcs the overload threshold. Current set at 50%. */ 75 #define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2) 76 77 #if 0 78 /* Assertions causes panics if preemption is disabled, this can be used to work around that. */ 79 //#define RTSpinlockAcquire(a,b) do {} while (0) 80 //#define RTSpinlockRelease(a,b) do {} while (0) 81 #endif 82 83 #ifdef IN_RING0 /* Note! Assertions causes panics if preemption is disabled, 84 * disable this to work around that. */ 85 /** 86 * Acquire the spinlock. 87 * This will declare a temporary variable and expands to two statements! 88 */ 89 # define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) \ 90 RTSPINLOCKTMP MySpinlockTmp = RTSPINLOCKTMP_INITIALIZER; \ 91 RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp) 92 /** 93 * Releases the spinlock. 94 */ 95 # define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) \ 96 RTSpinlockRelease((pThis)->hSpinlock, &MySpinlockTmp) 97 98 /** 99 * Re-acquires the spinlock. 100 */ 101 # define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) \ 102 RTSpinlockAcquire((pThis)->hSpinlock, &MySpinlockTmp) 103 #else 104 # define PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis) do { } while (0) 105 # define PGMRZDYNMAP_SPINLOCK_RELEASE(pThis) do { } while (0) 106 # define PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis) do { } while (0) 107 #endif 108 82 109 83 110 /** Converts a PGMCPUM::AutoSet pointer into a PVMCPU. */ 84 #define PGMR 0DYNMAP_2_VMCPU(pSet)(RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet))111 #define PGMRZDYNMAP_SET_2_VMCPU(pSet) (RT_FROM_MEMBER(pSet, VMCPU, pgm.s.AutoSet)) 85 112 86 113 /** Converts a PGMCPUM::AutoSet pointer into a PVM. */ 87 #define PGMR0DYNMAP_2_VM(pSet) (PGMR0DYNMAP_2_VMCPU(pSet)->CTX_SUFF(pVM)) 114 #define PGMRZDYNMAP_SET_2_VM(pSet) (PGMRZDYNMAP_SET_2_VMCPU(pSet)->CTX_SUFF(pVM)) 115 116 /** Converts a PGMCPUM::AutoSet pointer into a PVM. */ 117 #ifdef IN_RC 118 # define PGMRZDYNMAP_SET_2_DYNMAP(pSet) (PGMRZDYNMAP_SET_2_VM(pSet)->pgm.s.pRCDynMap) 119 #else 120 # define PGMRZDYNMAP_SET_2_DYNMAP(pSet) (g_pPGMR0DynMap) 121 #endif 122 123 /** 124 * Gets the set index of the current CPU. 125 * 126 * This always returns 0 when in raw-mode context because there is only ever 127 * one EMT in that context (at least presently). 128 */ 129 #ifdef IN_RC 130 # define PGMRZDYNMAP_CUR_CPU() (0) 131 #else 132 # define PGMRZDYNMAP_CUR_CPU() RTMpCpuIdToSetIndex(RTMpCpuId()) 133 #endif 134 135 /** PGMRZDYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */ 136 #define PGMRZDYNMAP_MAGIC UINT32_C(0x19640201) 137 138 139 /** Zaps an set entry. */ 140 #define PGMRZDYNMAP_ZAP_ENTRY(pEntry) \ 141 do \ 142 { \ 143 (pEntry)->iPage = UINT16_MAX; \ 144 (pEntry)->cRefs = 0; \ 145 (pEntry)->cInlinedRefs = 0; \ 146 (pEntry)->cUnrefs = 0; \ 147 } while (0) 88 148 89 149 … … 91 151 * Structures and Typedefs * 92 152 *******************************************************************************/ 153 #ifdef IN_RING0 93 154 /** 94 155 * Ring-0 dynamic mapping cache segment. … … 125 186 * Ring-0 dynamic mapping cache entry. 126 187 * 127 * This structure tracks188 * @sa PGMRZDYNMAPENTRY, PGMRCDYNMAPENTRY. 128 189 */ 129 190 typedef struct PGMR0DYNMAPENTRY … … 147 208 void *pv; 148 209 } uPte; 210 # ifndef IN_RC 149 211 /** CPUs that haven't invalidated this entry after it's last update. */ 150 212 RTCPUSET PendingSet; 213 # endif 151 214 } PGMR0DYNMAPENTRY; 152 /** Pointer to a ring-0 dynamic mapping cache entry. */ 215 /** Pointer a mapping cache entry for the ring-0. 216 * @sa PPGMRZDYNMAPENTRY, PPGMRCDYNMAPENTRY, */ 153 217 typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY; 154 218 155 219 156 220 /** 157 * Ring-0 dynamic mapping cache. 158 * 159 * This is initialized during VMMR0 module init but no segments are allocated at 160 * that time. Segments will be added when the first VM is started and removed 161 * again when the last VM shuts down, thus avoid consuming memory while dormant. 162 * At module termination, the remaining bits will be freed up. 221 * Dynamic mapping cache for ring-0. 222 * 223 * This is initialized during VMMR0 module init but no segments are allocated 224 * at that time. Segments will be added when the first VM is started and 225 * removed again when the last VM shuts down, thus avoid consuming memory while 226 * dormant. At module termination, the remaining bits will be freed up. 227 * 228 * @sa PPGMRZDYNMAP, PGMRCDYNMAP. 163 229 */ 164 230 typedef struct PGMR0DYNMAP 165 231 { 166 /** The usual magic number / eye catcher (PGMR 0DYNMAP_MAGIC). */232 /** The usual magic number / eye catcher (PGMRZDYNMAP_MAGIC). */ 167 233 uint32_t u32Magic; 234 # ifndef IN_RC 168 235 /** Spinlock serializing the normal operation of the cache. */ 169 236 RTSPINLOCK hSpinlock; 237 # endif 170 238 /** Array for tracking and managing the pages. */ 171 239 PPGMR0DYNMAPENTRY paPages; … … 180 248 * This is maintained to get trigger adding of more mapping space. */ 181 249 uint32_t cMaxLoad; 250 # ifndef IN_RC 182 251 /** Initialization / termination lock. */ 183 252 RTSEMFASTMUTEX hInitLock; 253 # endif 184 254 /** The number of guard pages. */ 185 255 uint32_t cGuardPages; 186 256 /** The number of users (protected by hInitLock). */ 187 257 uint32_t cUsers; 258 # ifndef IN_RC 188 259 /** Array containing a copy of the original page tables. 189 260 * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */ … … 193 264 /** The paging mode. */ 194 265 SUPPAGINGMODE enmPgMode; 266 # endif 195 267 } PGMR0DYNMAP; 196 /** Pointer to the ring-0 dynamic mapping cache */197 typedef PGMR0DYNMAP *PPGMR0DYNMAP;198 199 /** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */200 #define PGMR0DYNMAP_MAGIC 0x19640201201 268 202 269 … … 228 295 /** Pointer to paging level data. */ 229 296 typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL; 297 #endif 298 299 /** Mapping cache entry for the current context. 300 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY */ 301 typedef CTX_MID(PGM,DYNMAPENTRY) PGMRZDYNMAPENTRY; 302 /** Pointer a mapping cache entry for the current context. 303 * @sa PGMR0DYNMAPENTRY, PGMRCDYNMAPENTRY */ 304 typedef PGMRZDYNMAPENTRY *PPGMRZDYNMAPENTRY; 305 306 /** Pointer the mapping cache instance for the current context. 307 * @sa PGMR0DYNMAP, PGMRCDYNMAP */ 308 typedef CTX_MID(PGM,DYNMAP) *PPGMRZDYNMAP; 309 230 310 231 311 … … 233 313 * Global Variables * 234 314 *******************************************************************************/ 315 #ifdef IN_RING0 235 316 /** Pointer to the ring-0 dynamic mapping cache. */ 236 static PPGMR0DYNMAP g_pPGMR0DynMap; 317 static PGMR0DYNMAP *g_pPGMR0DynMap; 318 #endif 237 319 /** For overflow testing. */ 238 320 static bool g_fPGMR0DynMapTestRunning = false; … … 242 324 * Internal Functions * 243 325 *******************************************************************************/ 244 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs); 245 static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis); 246 static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis); 247 static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis); 326 static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs); 327 #ifdef IN_RING0 328 static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis); 329 static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis); 330 static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis); 331 #endif 248 332 #if 0 /*def DEBUG*/ 249 333 static int pgmR0DynMapTest(PVM pVM); … … 252 336 253 337 /** 338 * Initializes the auto mapping sets for a VM. 339 * 340 * @returns VINF_SUCCESS on success, VERR_INTERNAL_ERROR on failure. 341 * @param pVM The VM in question. 342 */ 343 static int pgmRZDynMapInitAutoSetsForVM(PVM pVM) 344 { 345 VMCPUID idCpu = pVM->cCpus; 346 AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR); 347 while (idCpu-- > 0) 348 { 349 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 350 uint32_t j = RT_ELEMENTS(pSet->aEntries); 351 while (j-- > 0) 352 { 353 pSet->aEntries[j].pvPage = NULL; 354 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 355 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 356 } 357 pSet->cEntries = PGMMAPSET_CLOSED; 358 pSet->iSubset = UINT32_MAX; 359 pSet->iCpu = -1; 360 memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable)); 361 } 362 363 return VINF_SUCCESS; 364 } 365 366 367 #ifdef IN_RING0 368 369 /** 254 370 * Initializes the ring-0 dynamic mapping cache. 255 371 * … … 263 379 * Create and initialize the cache instance. 264 380 */ 265 PPGMR 0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));381 PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)RTMemAllocZ(sizeof(*pThis)); 266 382 AssertLogRelReturn(pThis, VERR_NO_MEMORY); 267 383 int rc = VINF_SUCCESS; … … 295 411 if (RT_SUCCESS(rc)) 296 412 { 297 pThis->u32Magic = PGMR 0DYNMAP_MAGIC;413 pThis->u32Magic = PGMRZDYNMAP_MAGIC; 298 414 g_pPGMR0DynMap = pThis; 299 415 return VINF_SUCCESS; … … 322 438 * is just a mirror image of PGMR0DynMapInit. 323 439 */ 324 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;440 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 325 441 if (pThis) 326 442 { … … 359 475 * Initialize the auto sets. 360 476 */ 361 VMCPUID idCpu = pVM->cCpus; 362 AssertReturn(idCpu > 0 && idCpu <= VMM_MAX_CPU_COUNT, VERR_INTERNAL_ERROR); 363 while (idCpu-- > 0) 364 { 365 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet; 366 uint32_t j = RT_ELEMENTS(pSet->aEntries); 367 while (j-- > 0) 368 { 369 pSet->aEntries[j].iPage = UINT16_MAX; 370 pSet->aEntries[j].cRefs = 0; 371 pSet->aEntries[j].pvPage = NULL; 372 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 373 } 374 pSet->cEntries = PGMMAPSET_CLOSED; 375 pSet->iSubset = UINT32_MAX; 376 pSet->iCpu = -1; 377 memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable)); 378 } 477 int rc = pgmRZDynMapInitAutoSetsForVM(pVM); 478 if (RT_FAILURE(rc)) 479 return rc; 379 480 380 481 /* … … 387 488 * Reference and if necessary setup or expand the cache. 388 489 */ 389 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;490 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 390 491 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR); 391 intrc = RTSemFastMutexRequest(pThis->hInitLock);492 rc = RTSemFastMutexRequest(pThis->hInitLock); 392 493 AssertLogRelRCReturn(rc, rc); 393 494 … … 430 531 return; 431 532 432 PPGMR 0DYNMAP pThis = g_pPGMR0DynMap;533 PPGMRZDYNMAP pThis = g_pPGMR0DynMap; 433 534 AssertPtrReturnVoid(pThis); 434 535 … … 463 564 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage)); 464 565 if (iPage < pThis->cPages && cRefs > 0) 465 pgmR 0DynMapReleasePage(pThis, iPage, cRefs);566 pgmRZDynMapReleasePage(pThis, iPage, cRefs); 466 567 else 467 568 AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages)); 468 569 469 pSet->aEntries[j].iPage = UINT16_MAX; 470 pSet->aEntries[j].cRefs = 0; 471 pSet->aEntries[j].pvPage = NULL; 472 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS; 570 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 473 571 } 474 572 pSet->cEntries = PGMMAPSET_CLOSED; … … 512 610 { 513 611 Assert(!pvUser2); 514 PPGMR 0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1;612 PPGMRZDYNMAP pThis = (PPGMRZDYNMAP)pvUser1; 515 613 Assert(pThis == g_pPGMR0DynMap); 516 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;614 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 517 615 uint32_t iPage = pThis->cPages; 518 616 while (iPage-- > 0) … … 527 625 * @param pThis The dynamic mapping cache instance. 528 626 */ 529 static int pgmR0DynMapTlbShootDown(PPGMR 0DYNMAP pThis)627 static int pgmR0DynMapTlbShootDown(PPGMRZDYNMAP pThis) 530 628 { 531 629 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL); … … 548 646 * @param pcMinPages The minimal size in pages. 549 647 */ 550 static uint32_t pgmR0DynMapCalcNewSize(PPGMR 0DYNMAP pThis, uint32_t *pcMinPages)648 static uint32_t pgmR0DynMapCalcNewSize(PPGMRZDYNMAP pThis, uint32_t *pcMinPages) 551 649 { 552 650 Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES); … … 594 692 * @param pPgLvl The paging level data. 595 693 */ 596 void pgmR0DynMapPagingArrayInit(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)694 void pgmR0DynMapPagingArrayInit(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl) 597 695 { 598 696 RTCCUINTREG cr4 = ASMGetCR4(); … … 704 802 * @param ppvPTE Where to store the PTE address. 705 803 */ 706 static int pgmR0DynMapPagingArrayMapPte(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,804 static int pgmR0DynMapPagingArrayMapPte(PPGMRZDYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage, 707 805 PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE) 708 806 { … … 791 889 * @param pPage The page. 792 890 */ 793 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR 0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)891 DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMRZDYNMAP pThis, PPGMRZDYNMAPENTRY pPage) 794 892 { 795 893 memset(pPage->pvPage, 0xfd, PAGE_SIZE); … … 815 913 * @param cPages The size of the new segment, give as a page count. 816 914 */ 817 static int pgmR0DynMapAddSeg(PPGMR 0DYNMAP pThis, uint32_t cPages)915 static int pgmR0DynMapAddSeg(PPGMRZDYNMAP pThis, uint32_t cPages) 818 916 { 819 917 int rc2; … … 838 936 } 839 937 840 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 841 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 938 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 842 939 843 940 memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages); 844 941 void *pvToFree = pThis->paPages; 845 pThis->paPages = (PPGMR 0DYNMAPENTRY)pvPages;846 847 RTSpinlockRelease(pThis->hSpinlock, &Tmp);942 pThis->paPages = (PPGMRZDYNMAPENTRY)pvPages; 943 944 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 848 945 RTMemFree(pvToFree); 849 946 … … 882 979 pThis->paPages[iPage].cRefs = 0; 883 980 pThis->paPages[iPage].uPte.pPae = 0; 981 #ifndef IN_RC 884 982 RTCpuSetFill(&pThis->paPages[iPage].PendingSet); 983 #endif 885 984 886 985 /* Map its page table, retry until we've got a clean run (paranoia). */ … … 983 1082 * @param pThis The dynamic mapping cache instance. 984 1083 */ 985 static int pgmR0DynMapSetup(PPGMR 0DYNMAP pThis)1084 static int pgmR0DynMapSetup(PPGMRZDYNMAP pThis) 986 1085 { 987 1086 /* … … 1026 1125 * @param pThis The dynamic mapping cache instance. 1027 1126 */ 1028 static int pgmR0DynMapExpand(PPGMR 0DYNMAP pThis)1127 static int pgmR0DynMapExpand(PPGMRZDYNMAP pThis) 1029 1128 { 1030 1129 /* … … 1069 1168 * @param pThis The dynamic mapping cache instance. 1070 1169 */ 1071 static void pgmR0DynMapTearDown(PPGMR 0DYNMAP pThis)1170 static void pgmR0DynMapTearDown(PPGMRZDYNMAP pThis) 1072 1171 { 1073 1172 /* 1074 1173 * Restore the original page table entries 1075 1174 */ 1076 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1175 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1077 1176 uint32_t iPage = pThis->cPages; 1078 1177 if (pThis->fLegacyMode) … … 1145 1244 } 1146 1245 1246 #endif /* IN_RING0 */ 1247 #ifdef IN_RC 1248 1249 /** 1250 * Initializes the dynamic mapping cache in raw-mode context. 1251 * 1252 * @returns VBox status code. 1253 * @param pVM The VM handle. 1254 */ 1255 VMMRCDECL(int) PGMRCDynMapInit(PVM pVM) 1256 { 1257 /* 1258 * Allocate and initialize the instance data and page array. 1259 */ 1260 PPGMRZDYNMAP pThis; 1261 size_t const cPages = MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE; 1262 size_t const cb = RT_ALIGN_Z(sizeof(*pThis), 32) 1263 + sizeof(PGMRZDYNMAPENTRY) * cPages; 1264 int rc = MMHyperAlloc(pVM, cb, 32, MM_TAG_PGM, (void **)&pThis); 1265 if (RT_FAILURE(rc)) 1266 return rc; 1267 1268 pThis->u32Magic = PGMRZDYNMAP_MAGIC; 1269 pThis->paPages = RT_ALIGN_PT(pThis + 1, 32, PPGMRZDYNMAPENTRY); 1270 pThis->cPages = cPages; 1271 pThis->fLegacyMode = PGMGetHostMode(pVM) == PGMMODE_32_BIT; 1272 pThis->cLoad = 0; 1273 pThis->cMaxLoad = 0; 1274 pThis->cGuardPages = 0; 1275 pThis->cUsers = 1; 1276 1277 for (size_t iPage = 0; iPage < cPages; iPage++) 1278 { 1279 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS; 1280 pThis->paPages[iPage].pvPage = pVM->pgm.s.pbDynPageMapBaseGC + iPage * PAGE_SIZE; 1281 pThis->paPages[iPage].cRefs = 0; 1282 if (pThis->fLegacyMode) 1283 pThis->paPages[iPage].uPte.pLegacy = &pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage]; 1284 else 1285 pThis->paPages[iPage].uPte.pPae = &pVM->pgm.s.paDynPageMapPaePTEsGC[iPage]; 1286 } 1287 1288 pVM->pgm.s.pRCDynMap = pThis; 1289 1290 /* 1291 * Initialize the autosets the VM. 1292 */ 1293 rc = pgmRZDynMapInitAutoSetsForVM(pVM); 1294 if (RT_FAILURE(rc)) 1295 return rc; 1296 1297 return VINF_SUCCESS; 1298 } 1299 1300 #endif /* IN_RC */ 1147 1301 1148 1302 /** … … 1153 1307 * @param cRefs The number of references to release. 1154 1308 */ 1155 DECLINLINE(void) pgmR 0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)1309 DECLINLINE(void) pgmRZDynMapReleasePageLocked(PPGMRZDYNMAP pThis, uint32_t iPage, int32_t cRefs) 1156 1310 { 1157 1311 cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs; … … 1169 1323 * @param cRefs The number of references to release. 1170 1324 */ 1171 static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs) 1172 { 1173 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1174 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1175 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1176 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1325 static void pgmRZDynMapReleasePage(PPGMRZDYNMAP pThis, uint32_t iPage, uint32_t cRefs) 1326 { 1327 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1328 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 1329 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1177 1330 } 1178 1331 … … 1186 1339 * @param iPage The page index pgmR0DynMapPage hashed HCPhys to. 1187 1340 * @param pVCpu The current CPU, for statistics. 1188 */ 1189 static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu) 1190 { 1191 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlow); 1341 * @param pfNew Set to @c true if a new entry was made and @c false if 1342 * an old entry was found and reused. 1343 */ 1344 static uint32_t pgmR0DynMapPageSlow(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVMCPU pVCpu, bool *pfNew) 1345 { 1346 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlow); 1192 1347 1193 1348 /* … … 1199 1354 #endif 1200 1355 uint32_t const cPages = pThis->cPages; 1201 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1356 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1202 1357 uint32_t iFreePage; 1203 1358 if (!paPages[iPage].cRefs) … … 1217 1372 if (paPages[iFreePage].HCPhys == HCPhys) 1218 1373 { 1219 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageSlowLoopHits); 1374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopHits); 1375 *pfNew = false; 1220 1376 return iFreePage; 1221 1377 } … … 1228 1384 return UINT32_MAX; 1229 1385 } 1230 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageSlowLoopMisses);1386 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageSlowLoopMisses); 1231 1387 #ifdef VBOX_WITH_STATISTICS 1232 1388 fLooped = true; … … 1240 1396 for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages) 1241 1397 if (paPages[iPage2].HCPhys == HCPhys) 1242 STAM_COUNTER_INC(&pVCpu->pgm.s.StatR 0DynMapPageSlowLostHits);1398 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZDynMapPageSlowLostHits); 1243 1399 #endif 1244 1400 … … 1246 1402 * Setup the new entry. 1247 1403 */ 1404 *pfNew = true; 1248 1405 /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/ 1249 1406 paPages[iFreePage].HCPhys = HCPhys; 1407 #ifndef IN_RC 1250 1408 RTCpuSetFill(&paPages[iFreePage].PendingSet); 1409 #endif 1251 1410 if (pThis->fLegacyMode) 1252 1411 { … … 1286 1445 * @param ppvPage Where to the page address. 1287 1446 */ 1288 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage) 1289 { 1290 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1291 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1447 DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMRZDYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVMCPU pVCpu, void **ppvPage) 1448 { 1449 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1292 1450 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys)); 1293 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPage);1451 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPage); 1294 1452 1295 1453 /* … … 1301 1459 * to pgmR0DynMapPageSlow(). 1302 1460 */ 1461 bool fNew = false; 1303 1462 uint32_t const cPages = pThis->cPages; 1304 1463 uint32_t iPage = (HCPhys >> PAGE_SHIFT) % cPages; 1305 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1464 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1306 1465 if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys)) 1307 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits0);1466 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits0); 1308 1467 else 1309 1468 { … … 1312 1471 { 1313 1472 iPage = iPage2; 1314 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits1);1473 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits1); 1315 1474 } 1316 1475 else … … 1320 1479 { 1321 1480 iPage = iPage2; 1322 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapPageHits2);1481 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageHits2); 1323 1482 } 1324 1483 else 1325 1484 { 1326 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu );1485 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVCpu, &fNew); 1327 1486 if (RT_UNLIKELY(iPage == UINT32_MAX)) 1328 1487 { 1329 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1488 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1330 1489 *ppvPage = NULL; 1331 1490 return iPage; … … 1349 1508 { 1350 1509 ASMAtomicDecS32(&paPages[iPage].cRefs); 1351 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1510 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1352 1511 *ppvPage = NULL; 1353 1512 AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX); … … 1355 1514 void *pvPage = paPages[iPage].pvPage; 1356 1515 1516 #ifndef IN_RC 1357 1517 /* 1358 1518 * Invalidate the entry? … … 1361 1521 if (RT_UNLIKELY(fInvalidateIt)) 1362 1522 RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu); 1363 1364 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 1523 #endif 1524 1525 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1365 1526 1366 1527 /* 1367 1528 * Do the actual invalidation outside the spinlock. 1368 1529 */ 1530 #ifdef IN_RC 1531 if (RT_UNLIKELY(fNew)) 1532 #else 1369 1533 if (RT_UNLIKELY(fInvalidateIt)) 1370 { 1371 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapPageInvlPg); 1534 #endif 1535 { 1536 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapPageInvlPg); 1372 1537 ASMInvalidatePage(pvPage); 1373 1538 } … … 1383 1548 * @returns VBox status code. 1384 1549 */ 1385 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)1550 static int pgmRZDynMapAssertIntegrity(PPGMRZDYNMAP pThis) 1386 1551 { 1387 1552 /* 1388 1553 * Basic pool stuff that doesn't require any lock, just assumes we're a user. 1389 1554 */ 1390 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;1391 1555 if (!pThis) 1392 1556 return VINF_SUCCESS; 1393 1557 AssertPtrReturn(pThis, VERR_INVALID_POINTER); 1394 AssertReturn(pThis->u32Magic == PGMR 0DYNMAP_MAGIC, VERR_INVALID_MAGIC);1558 AssertReturn(pThis->u32Magic == PGMRZDYNMAP_MAGIC, VERR_INVALID_MAGIC); 1395 1559 if (!pThis->cUsers) 1396 1560 return VERR_INVALID_PARAMETER; … … 1398 1562 1399 1563 int rc = VINF_SUCCESS; 1400 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1401 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1564 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1402 1565 1403 1566 #define CHECK_RET(expr, a) \ … … 1405 1568 if (RT_UNLIKELY(!(expr))) \ 1406 1569 { \ 1407 RTSpinlockRelease(pThis->hSpinlock, &Tmp); \1570 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); \ 1408 1571 RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \ 1409 1572 RTAssertMsg2Weak a; \ … … 1417 1580 uint32_t cGuard = 0; 1418 1581 uint32_t cLoad = 0; 1419 PPGMR 0DYNMAPENTRY paPages = pThis->paPages;1582 PPGMRZDYNMAPENTRY paPages = pThis->paPages; 1420 1583 uint32_t iPage = pThis->cPages; 1421 1584 if (pThis->fLegacyMode) 1422 1585 { 1586 #ifdef IN_RING0 1423 1587 PCX86PGUINT paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1588 #endif 1424 1589 while (iPage-- > 0) 1425 1590 { … … 1440 1605 { 1441 1606 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1442 X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1443 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1607 X86PGUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1608 #ifdef IN_RING0 1609 | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1610 #endif 1444 1611 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1445 1612 CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte, … … 1448 1615 cLoad++; 1449 1616 } 1617 #ifdef IN_RING0 1450 1618 else 1451 1619 CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage], 1452 1620 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage])); 1621 #endif 1453 1622 } 1454 1623 } 1455 1624 else 1456 1625 { 1626 #ifdef IN_RING0 1457 1627 PCX86PGPAEUINT paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs); 1628 #endif 1458 1629 while (iPage-- > 0) 1459 1630 { … … 1474 1645 { 1475 1646 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys)); 1476 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1477 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1647 X86PGPAEUINT uPte = X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D 1648 #ifdef IN_RING0 1649 | (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)) 1650 #endif 1478 1651 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK); 1479 1652 CHECK_RET(paPages[iPage].uPte.pPae->u == uPte, … … 1482 1655 cLoad++; 1483 1656 } 1657 #ifdef IN_RING0 1484 1658 else 1485 1659 CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage], 1486 1660 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage])); 1661 #endif 1487 1662 } 1488 1663 } … … 1492 1667 1493 1668 #undef CHECK_RET 1494 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1669 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1495 1670 return VINF_SUCCESS; 1496 1671 } 1672 1673 #ifdef IN_RING0 1674 /** 1675 * Assert the the integrity of the pool. 1676 * 1677 * @returns VBox status code. 1678 */ 1679 VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void) 1680 { 1681 return pgmRZDynMapAssertIntegrity(g_pPGMR0DynMap); 1682 } 1683 #endif /* IN_RING0 */ 1684 1685 #ifdef IN_RC 1686 /** 1687 * Assert the the integrity of the pool. 1688 * 1689 * @returns VBox status code. 1690 */ 1691 VMMRCDECL(int) PGMRCDynMapAssertIntegrity(PVM pVM) 1692 { 1693 return pgmRZDynMapAssertIntegrity((PPGMRZDYNMAP)pVM->pgm.s.pRCDynMap); 1694 } 1695 #endif /* IN_RC */ 1696 1697 1698 /** 1699 * As a final resort for a (somewhat) full auto set or full cache, try merge 1700 * duplicate entries and flush the ones we can. 1701 * 1702 * @param pSet The set. 1703 */ 1704 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet) 1705 { 1706 LogFlow(("pgmDynMapOptimizeAutoSet\n")); 1707 1708 for (uint32_t i = 0 ; i < pSet->cEntries; i++) 1709 { 1710 /* 1711 * Try merge entries. 1712 */ 1713 uint16_t const iPage = pSet->aEntries[i].iPage; 1714 uint32_t j = i + 1; 1715 while ( j < pSet->cEntries 1716 && ( pSet->iSubset == UINT32_MAX 1717 || pSet->iSubset < pSet->cEntries) ) 1718 { 1719 if (pSet->aEntries[j].iPage != iPage) 1720 j++; 1721 else 1722 { 1723 uint32_t const cHardRefs = (uint32_t)pSet->aEntries[i].cRefs 1724 + (uint32_t)pSet->aEntries[j].cRefs; 1725 uint32_t cInlinedRefs = (uint32_t)pSet->aEntries[i].cInlinedRefs 1726 + (uint32_t)pSet->aEntries[j].cInlinedRefs; 1727 uint32_t cUnrefs = (uint32_t)pSet->aEntries[i].cUnrefs 1728 + (uint32_t)pSet->aEntries[j].cUnrefs; 1729 uint32_t cSub = RT_MIN(cUnrefs, cInlinedRefs); 1730 cInlinedRefs -= cSub; 1731 cUnrefs -= cSub; 1732 1733 if ( cHardRefs < UINT16_MAX 1734 && cInlinedRefs < UINT16_MAX 1735 && cUnrefs < UINT16_MAX) 1736 { 1737 /* merge j into i removing j. */ 1738 Log2(("pgmDynMapOptimizeAutoSet: Merging #%u into #%u\n", j, i)); 1739 pSet->aEntries[i].cRefs = cHardRefs; 1740 pSet->aEntries[i].cInlinedRefs = cInlinedRefs; 1741 pSet->aEntries[i].cUnrefs = cUnrefs; 1742 pSet->cEntries--; 1743 if (j < pSet->cEntries) 1744 { 1745 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries]; 1746 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]); 1747 } 1748 else 1749 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[j]); 1750 } 1751 #if 0 /* too complicated, skip it. */ 1752 else 1753 { 1754 /* migrate the max number of refs from j into i and quit the inner loop. */ 1755 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs; 1756 Assert(pSet->aEntries[j].cRefs > cMigrate); 1757 pSet->aEntries[j].cRefs -= cMigrate; 1758 pSet->aEntries[i].cRefs = UINT16_MAX - 1; 1759 break; 1760 } 1761 #endif 1762 } 1763 } 1764 1765 /* 1766 * Try make use of the unused hinting (cUnrefs) to evict entries 1767 * from both the set as well as the mapping cache. 1768 */ 1769 1770 uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[i].cRefs + pSet->aEntries[i].cInlinedRefs; 1771 Log2(("pgmDynMapOptimizeAutoSet: #%u/%u/%u pvPage=%p iPage=%u cRefs=%u cInlinedRefs=%u cUnrefs=%u cTotalRefs=%u\n", 1772 i, 1773 pSet->iSubset, 1774 pSet->cEntries, 1775 pSet->aEntries[i].pvPage, 1776 pSet->aEntries[i].iPage, 1777 pSet->aEntries[i].cRefs, 1778 pSet->aEntries[i].cInlinedRefs, 1779 pSet->aEntries[i].cUnrefs, 1780 cTotalRefs)); 1781 Assert(cTotalRefs >= pSet->aEntries[i].cUnrefs); 1782 1783 if ( cTotalRefs == pSet->aEntries[i].cUnrefs 1784 && ( pSet->iSubset == UINT32_MAX 1785 || pSet->iSubset < pSet->cEntries) 1786 ) 1787 { 1788 Log2(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage)); 1789 //LogFlow(("pgmDynMapOptimizeAutoSet: Releasing iPage=%d/%p\n", pSet->aEntries[i].iPage, pSet->aEntries[i].pvPage)); 1790 pgmRZDynMapReleasePage(PGMRZDYNMAP_SET_2_DYNMAP(pSet), 1791 pSet->aEntries[i].iPage, 1792 pSet->aEntries[i].cRefs); 1793 pSet->cEntries--; 1794 if (i < pSet->cEntries) 1795 { 1796 pSet->aEntries[i] = pSet->aEntries[pSet->cEntries]; 1797 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[pSet->cEntries]); 1798 } 1799 1800 i--; 1801 } 1802 } 1803 } 1804 1805 1497 1806 1498 1807 … … 1505 1814 * @param pVCpu The shared data for the current virtual CPU. 1506 1815 */ 1507 VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu) 1508 { 1816 VMMDECL(void) PGMRZDynMapStartAutoSet(PVMCPU pVCpu) 1817 { 1818 LogFlow(("PGMRZDynMapStartAutoSet:\n")); 1509 1819 Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED); 1510 1820 Assert(pVCpu->pgm.s.AutoSet.iSubset == UINT32_MAX); 1511 1821 pVCpu->pgm.s.AutoSet.cEntries = 0; 1512 pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId()); 1513 } 1514 1515 1822 pVCpu->pgm.s.AutoSet.iCpu = PGMRZDYNMAP_CUR_CPU(); 1823 } 1824 1825 1826 #ifdef IN_RING0 1516 1827 /** 1517 1828 * Starts or migrates the autoset of a virtual CPU. … … 1526 1837 * @thread EMT 1527 1838 */ 1528 VMM DECL(bool) PGMDynMapStartOrMigrateAutoSet(PVMCPU pVCpu)1839 VMMR0DECL(bool) PGMR0DynMapStartOrMigrateAutoSet(PVMCPU pVCpu) 1529 1840 { 1530 1841 bool fStartIt = pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED; 1531 1842 if (fStartIt) 1532 PGM DynMapStartAutoSet(pVCpu);1843 PGMRZDynMapStartAutoSet(pVCpu); 1533 1844 else 1534 PGM DynMapMigrateAutoSet(pVCpu);1845 PGMR0DynMapMigrateAutoSet(pVCpu); 1535 1846 return fStartIt; 1536 1847 } 1848 #endif /* IN_RING0 */ 1537 1849 1538 1850 … … 1551 1863 && RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries))) 1552 1864 { 1553 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1554 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1555 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1865 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 1866 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1556 1867 1557 1868 uint32_t i = cEntries; … … 1562 1873 int32_t cRefs = pSet->aEntries[i].cRefs; 1563 1874 Assert(cRefs > 0); 1564 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1565 1566 pSet->aEntries[i].iPage = UINT16_MAX; 1567 pSet->aEntries[i].cRefs = 0; 1875 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 1876 1877 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]); 1568 1878 } 1569 1879 1570 1880 Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages); 1571 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1881 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1572 1882 } 1573 1883 } … … 1580 1890 * @param pVCpu The shared data for the current virtual CPU. 1581 1891 */ 1582 VMMDECL(void) PGM DynMapReleaseAutoSet(PVMCPU pVCpu)1892 VMMDECL(void) PGMRZDynMapReleaseAutoSet(PVMCPU pVCpu) 1583 1893 { 1584 1894 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; … … 1593 1903 pSet->iCpu = -1; 1594 1904 1595 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1905 #ifdef IN_RC 1906 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 1907 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 1908 else 1909 #endif 1910 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1596 1911 AssertMsg(cEntries < PGMMAPSET_MAX_FILL, ("%u\n", cEntries)); 1597 1912 if (cEntries > RT_ELEMENTS(pSet->aEntries) * 50 / 100) 1598 Log(("PGMDynMapReleaseAutoSet: cEntries=%d\n", pSet->cEntries)); 1913 Log(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries)); 1914 else 1915 LogFlow(("PGMRZDynMapReleaseAutoSet: cEntries=%d\n", cEntries)); 1599 1916 1600 1917 pgmDynMapFlushAutoSetWorker(pSet, cEntries); … … 1607 1924 * @param pVCpu The shared data for the current virtual CPU. 1608 1925 */ 1609 VMMDECL(void) PGM DynMapFlushAutoSet(PVMCPU pVCpu)1926 VMMDECL(void) PGMRZDynMapFlushAutoSet(PVMCPU pVCpu) 1610 1927 { 1611 1928 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1612 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));1929 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 1613 1930 1614 1931 /* … … 1617 1934 uint32_t cEntries = pSet->cEntries; 1618 1935 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1619 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1936 #ifdef IN_RC 1937 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 1938 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 1939 else 1940 #endif 1941 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1620 1942 if (cEntries >= RT_ELEMENTS(pSet->aEntries) * 45 / 100) 1621 1943 { … … 1626 1948 1627 1949 pgmDynMapFlushAutoSetWorker(pSet, cEntries); 1628 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags())); 1629 } 1630 } 1631 1632 1950 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 1951 } 1952 } 1953 1954 1955 #ifndef IN_RC 1633 1956 /** 1634 1957 * Migrates the automatic mapping set of the current vCPU if it's active and … … 1644 1967 * @thread EMT 1645 1968 */ 1646 VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu) 1647 { 1969 VMMR0DECL(void) PGMR0DynMapMigrateAutoSet(PVMCPU pVCpu) 1970 { 1971 LogFlow(("PGMR0DynMapMigrateAutoSet\n")); 1648 1972 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1649 int32_t iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());1973 int32_t iRealCpu = PGMRZDYNMAP_CUR_CPU(); 1650 1974 if (pSet->iCpu != iRealCpu) 1651 1975 { … … 1656 1980 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries))) 1657 1981 { 1658 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1659 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1660 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 1982 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 1983 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1661 1984 1662 1985 while (i-- > 0) … … 1668 1991 { 1669 1992 RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu); 1670 RTSpinlockRelease(pThis->hSpinlock, &Tmp);1993 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1671 1994 1672 1995 ASMInvalidatePage(pThis->paPages[iPage].pvPage); 1673 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapMigrateInvlPg);1674 1675 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);1996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapMigrateInvlPg); 1997 1998 PGMRZDYNMAP_SPINLOCK_REACQUIRE(pThis); 1676 1999 } 1677 2000 } 1678 2001 1679 RTSpinlockRelease(pThis->hSpinlock, &Tmp);2002 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1680 2003 } 1681 2004 } … … 1683 2006 } 1684 2007 } 2008 #endif /* !IN_RC */ 1685 2009 1686 2010 … … 1706 2030 pSet->cEntries = iSubset; 1707 2031 1708 PPGMR0DYNMAP pThis = g_pPGMR0DynMap; 1709 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; 1710 RTSpinlockAcquire(pThis->hSpinlock, &Tmp); 2032 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2033 PGMRZDYNMAP_SPINLOCK_ACQUIRE(pThis); 1711 2034 1712 2035 while (i-- > iSubset) … … 1716 2039 int32_t cRefs = pSet->aEntries[i].cRefs; 1717 2040 Assert(cRefs > 0); 1718 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs); 1719 1720 pSet->aEntries[i].iPage = UINT16_MAX; 1721 pSet->aEntries[i].cRefs = 0; 1722 } 1723 1724 RTSpinlockRelease(pThis->hSpinlock, &Tmp); 2041 pgmRZDynMapReleasePageLocked(pThis, iPage, cRefs); 2042 2043 PGMRZDYNMAP_ZAP_ENTRY(&pSet->aEntries[i]); 2044 } 2045 2046 PGMRZDYNMAP_SPINLOCK_RELEASE(pThis); 1725 2047 } 1726 2048 } … … 1738 2060 * 1739 2061 * @returns The index of the previous subset. Pass this to 1740 * PGMDynMapPopAutoSubset when poping it.2062 * PGMDynMapPopAutoSubset when popping it. 1741 2063 * @param pVCpu Pointer to the virtual cpu data. 1742 2064 */ 1743 VMMDECL(uint32_t) PGM DynMapPushAutoSubset(PVMCPU pVCpu)2065 VMMDECL(uint32_t) PGMRZDynMapPushAutoSubset(PVMCPU pVCpu) 1744 2066 { 1745 2067 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1746 2068 AssertReturn(pSet->cEntries != PGMMAPSET_CLOSED, UINT32_MAX); 1747 2069 uint32_t iPrevSubset = pSet->iSubset; 1748 LogFlow(("PGMDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset)); 2070 LogFlow(("PGMRZDynMapPushAutoSubset: pVCpu=%p iPrevSubset=%u\n", pVCpu, iPrevSubset)); 2071 2072 #ifdef IN_RC 2073 /* kludge */ 2074 if (pSet->cEntries > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE / 2) 2075 { 2076 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 2077 pgmDynMapOptimizeAutoSet(pSet); 2078 } 2079 #endif 1749 2080 1750 2081 pSet->iSubset = pSet->cEntries; 1751 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSubsets); 2082 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSubsets); 2083 1752 2084 return iPrevSubset; 1753 2085 } … … 1760 2092 * @param iPrevSubset What PGMDynMapPushAutoSubset returned. 1761 2093 */ 1762 VMMDECL(void) PGM DynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset)2094 VMMDECL(void) PGMRZDynMapPopAutoSubset(PVMCPU pVCpu, uint32_t iPrevSubset) 1763 2095 { 1764 2096 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 1765 2097 uint32_t cEntries = pSet->cEntries; 1766 LogFlow(("PGM DynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries));2098 LogFlow(("PGMRZDynMapPopAutoSubset: pVCpu=%p iPrevSubset=%u iSubset=%u cEntries=%u\n", pVCpu, iPrevSubset, pSet->iSubset, cEntries)); 1767 2099 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED); 1768 2100 AssertReturnVoid(pSet->iSubset >= iPrevSubset || iPrevSubset == UINT32_MAX); 1769 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR0DynMapSetSize[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 2101 #ifdef IN_RC 2102 if (RT_ELEMENTS(pSet->aEntries) > MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE) 2103 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / (MM_HYPER_DYNAMIC_SIZE / PAGE_SIZE)) % 11]); 2104 else 2105 #endif 2106 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1770 2107 if ( cEntries >= RT_ELEMENTS(pSet->aEntries) * 40 / 100 1771 2108 && cEntries != pSet->iSubset) … … 1779 2116 1780 2117 /** 1781 * As a final resort for a full auto set, try merge duplicate entries. 1782 * 1783 * @param pSet The set. 1784 */ 1785 static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet) 1786 { 1787 for (uint32_t i = 0 ; i < pSet->cEntries; i++) 1788 { 1789 uint16_t const iPage = pSet->aEntries[i].iPage; 1790 uint32_t j = i + 1; 1791 while (j < pSet->cEntries) 1792 { 1793 if (pSet->aEntries[j].iPage != iPage) 1794 j++; 1795 else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX) 1796 { 1797 /* merge j into i removing j. */ 1798 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs; 1799 pSet->cEntries--; 1800 if (j < pSet->cEntries) 2118 * Indicates that the given page is unused and its mapping can be re-used. 2119 * 2120 * @param pVCpu The current CPU. 2121 * @param pvHint The page that is now unused. This does not have to 2122 * point at the start of the page. NULL is ignored. 2123 */ 2124 #ifdef LOG_ENABLED 2125 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL) 2126 #else 2127 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint) 2128 #endif 2129 { 2130 /* 2131 * Ignore NULL pointers and mask off the page offset bits. 2132 */ 2133 if (pvHint == NULL) 2134 return; 2135 pvHint = (void *)((uintptr_t)pvHint & ~(uintptr_t)PAGE_OFFSET_MASK); 2136 2137 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet; 2138 uint32_t iEntry = pSet->cEntries; 2139 AssertReturnVoid(iEntry > 0); 2140 2141 /* 2142 * Find the entry in the usual unrolled fashion. 2143 */ 2144 #define IS_MATCHING_ENTRY(pSet, iEntry, pvHint) \ 2145 ( (pSet)->aEntries[(iEntry)].pvPage == (pvHint) \ 2146 && (uint32_t)(pSet)->aEntries[(iEntry)].cRefs + (pSet)->aEntries[(iEntry)].cInlinedRefs \ 2147 > (pSet)->aEntries[(iEntry)].cUnrefs ) 2148 if ( iEntry >= 1 && IS_MATCHING_ENTRY(pSet, iEntry - 1, pvHint)) 2149 iEntry = iEntry - 1; 2150 else if (iEntry >= 2 && IS_MATCHING_ENTRY(pSet, iEntry - 2, pvHint)) 2151 iEntry = iEntry - 2; 2152 else if (iEntry >= 3 && IS_MATCHING_ENTRY(pSet, iEntry - 3, pvHint)) 2153 iEntry = iEntry - 3; 2154 else if (iEntry >= 4 && IS_MATCHING_ENTRY(pSet, iEntry - 4, pvHint)) 2155 iEntry = iEntry - 4; 2156 else if (iEntry >= 5 && IS_MATCHING_ENTRY(pSet, iEntry - 5, pvHint)) 2157 iEntry = iEntry - 5; 2158 else if (iEntry >= 6 && IS_MATCHING_ENTRY(pSet, iEntry - 6, pvHint)) 2159 iEntry = iEntry - 6; 2160 else if (iEntry >= 7 && IS_MATCHING_ENTRY(pSet, iEntry - 7, pvHint)) 2161 iEntry = iEntry - 7; 2162 else 2163 { 2164 /* 2165 * Loop till we find it. 2166 */ 2167 bool fFound = false; 2168 if (iEntry > 7) 2169 { 2170 iEntry -= 7; 2171 while (iEntry-- > 0) 2172 if (IS_MATCHING_ENTRY(pSet, iEntry, pvHint)) 1801 2173 { 1802 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries]; 1803 pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX; 1804 pSet->aEntries[pSet->cEntries].cRefs = 0; 2174 fFound = true; 2175 break; 1805 2176 } 1806 else 1807 { 1808 pSet->aEntries[j].iPage = UINT16_MAX; 1809 pSet->aEntries[j].cRefs = 0; 1810 } 1811 } 1812 else 1813 { 1814 /* migrate the max number of refs from j into i and quit the inner loop. */ 1815 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs; 1816 Assert(pSet->aEntries[j].cRefs > cMigrate); 1817 pSet->aEntries[j].cRefs -= cMigrate; 1818 pSet->aEntries[i].cRefs = UINT16_MAX - 1; 1819 break; 1820 } 1821 } 1822 } 1823 } 1824 1825 1826 /** 1827 * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and 1828 * pgmR0DynMapGCPageInlined. 2177 } 2178 AssertMsgReturnVoid(fFound, 2179 ("pvHint=%p cEntries=%#x iSubset=%#x\n" 2180 "aEntries[0] = {%#x, %#x, %#x, %#x, %p}\n" 2181 "aEntries[1] = {%#x, %#x, %#x, %#x, %p}\n" 2182 "aEntries[2] = {%#x, %#x, %#x, %#x, %p}\n" 2183 "aEntries[3] = {%#x, %#x, %#x, %#x, %p}\n" 2184 "aEntries[4] = {%#x, %#x, %#x, %#x, %p}\n" 2185 "aEntries[5] = {%#x, %#x, %#x, %#x, %p}\n" 2186 , 2187 pvHint, pSet->cEntries, pSet->iSubset, 2188 pSet->aEntries[0].iPage, pSet->aEntries[0].cRefs, pSet->aEntries[0].cInlinedRefs, pSet->aEntries[0].cUnrefs, pSet->aEntries[0].pvPage, 2189 pSet->aEntries[1].iPage, pSet->aEntries[1].cRefs, pSet->aEntries[1].cInlinedRefs, pSet->aEntries[1].cUnrefs, pSet->aEntries[1].pvPage, 2190 pSet->aEntries[2].iPage, pSet->aEntries[2].cRefs, pSet->aEntries[2].cInlinedRefs, pSet->aEntries[2].cUnrefs, pSet->aEntries[2].pvPage, 2191 pSet->aEntries[3].iPage, pSet->aEntries[3].cRefs, pSet->aEntries[3].cInlinedRefs, pSet->aEntries[3].cUnrefs, pSet->aEntries[3].pvPage, 2192 pSet->aEntries[4].iPage, pSet->aEntries[4].cRefs, pSet->aEntries[4].cInlinedRefs, pSet->aEntries[4].cUnrefs, pSet->aEntries[4].pvPage, 2193 pSet->aEntries[5].iPage, pSet->aEntries[5].cRefs, pSet->aEntries[5].cInlinedRefs, pSet->aEntries[5].cUnrefs, pSet->aEntries[5].pvPage)); 2194 } 2195 #undef IS_MATCHING_ENTRY 2196 2197 /* 2198 * Update it. 2199 */ 2200 uint32_t const cTotalRefs = (uint32_t)pSet->aEntries[iEntry].cRefs + pSet->aEntries[iEntry].cInlinedRefs; 2201 uint32_t const cUnrefs = pSet->aEntries[iEntry].cUnrefs; 2202 LogFlow(("pgmRZDynMapUnusedHint: pvHint=%p #%u cRefs=%d cInlinedRefs=%d cUnrefs=%d (+1) cTotalRefs=%d %s(%d) %s\n", 2203 pvHint, iEntry, pSet->aEntries[iEntry].cRefs, pSet->aEntries[iEntry].cInlinedRefs, cUnrefs, cTotalRefs, pszFile, iLine, pszFunction)); 2204 AssertReturnVoid(cTotalRefs > cUnrefs); 2205 2206 if (RT_LIKELY(cUnrefs < UINT16_MAX - 1)) 2207 pSet->aEntries[iEntry].cUnrefs++; 2208 else if (pSet->aEntries[iEntry].cInlinedRefs) 2209 { 2210 uint32_t cSub = RT_MIN(pSet->aEntries[iEntry].cInlinedRefs, pSet->aEntries[iEntry].cUnrefs); 2211 pSet->aEntries[iEntry].cInlinedRefs -= cSub; 2212 pSet->aEntries[iEntry].cUnrefs -= cSub; 2213 pSet->aEntries[iEntry].cUnrefs++; 2214 } 2215 else 2216 Log(("pgmRZDynMapUnusedHint: pvHint=%p ignored because of overflow! %s(%d) %s\n", pvHint, pszFile, iLine, pszFunction)); 2217 } 2218 2219 2220 /** 2221 * Common worker code for pgmRZDynMapHCPageInlined, pgmRZDynMapHCPageV2Inlined 2222 * and pgmR0DynMapGCPageOffInlined. 1829 2223 * 1830 2224 * @returns VINF_SUCCESS, bails out to ring-3 on failure. … … 1835 2229 * @remarks This is a very hot path. 1836 2230 */ 1837 int pgmR 0DynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)1838 { 1839 LogFlow(("pgmR0DynMapHCPageCommon: pSet=%p HCPhys=%RHp ppv=%p\n", pSet, HCPhys, ppv));1840 AssertMsg(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()), ("%d %d(%d) efl=%#x\n", pSet->iCpu, RTMpCpuIdToSetIndex(RTMpCpuId()), RTMpCpuId(), ASMGetFlags()));1841 PVMCPU pVCpu = PGMR0DYNMAP_2_VMCPU(pSet);2231 int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 2232 { 2233 AssertMsg(pSet->iCpu == PGMRZDYNMAP_CUR_CPU(), ("%d %d efl=%#x\n", pSet->iCpu, PGMRZDYNMAP_CUR_CPU(), ASMGetFlags())); 2234 PVMCPU pVCpu = PGMRZDYNMAP_SET_2_VMCPU(pSet); 2235 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1842 2236 1843 2237 /* 1844 2238 * Map it. 1845 2239 */ 1846 void *pvPage; 1847 uint32_t const iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVCpu, &pvPage); 2240 void *pvPage; 2241 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2242 uint32_t iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage); 1848 2243 if (RT_UNLIKELY(iPage == UINT32_MAX)) 1849 2244 { 1850 RTAssertMsg2Weak("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n", 1851 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages); 1852 if (!g_fPGMR0DynMapTestRunning) 1853 VMMRZCallRing3NoCpu(PGMR0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 1854 *ppv = NULL; 1855 return VERR_PGM_DYNMAP_FAILED; 2245 /* 2246 * We're out of mapping space, optimize our set to try remedy the 2247 * situation. (Only works if there are unreference hints.) 2248 */ 2249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 2250 pgmDynMapOptimizeAutoSet(pSet); 2251 2252 iPage = pgmR0DynMapPage(pThis, HCPhys, pSet->iCpu, pVCpu, &pvPage); 2253 if (RT_UNLIKELY(iPage == UINT32_MAX)) 2254 { 2255 RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: cLoad=%u/%u cPages=%u cGuardPages=%u\n", 2256 pThis->cLoad, pThis->cMaxLoad, pThis->cPages, pThis->cGuardPages); 2257 if (!g_fPGMR0DynMapTestRunning) 2258 VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 2259 *ppv = NULL; 2260 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 2261 return VERR_PGM_DYNMAP_FAILED; 2262 } 1856 2263 } 1857 2264 … … 1869 2276 { 1870 2277 unsigned iEntry = pSet->cEntries++; 1871 pSet->aEntries[iEntry].cRefs = 1; 1872 pSet->aEntries[iEntry].iPage = iPage; 1873 pSet->aEntries[iEntry].pvPage = pvPage; 1874 pSet->aEntries[iEntry].HCPhys = HCPhys; 2278 pSet->aEntries[iEntry].cRefs = 1; 2279 pSet->aEntries[iEntry].cUnrefs = 0; 2280 pSet->aEntries[iEntry].cInlinedRefs = 0; 2281 pSet->aEntries[iEntry].iPage = iPage; 2282 pSet->aEntries[iEntry].pvPage = pvPage; 2283 pSet->aEntries[iEntry].HCPhys = HCPhys; 1875 2284 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2285 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/0/0 iPage=%#x [a] %s(%d) %s\n", 2286 pSet, HCPhys, iEntry, iEntry + 1, pvPage, 1, iPage, pszFile, iLine, pszFunction)); 1876 2287 } 1877 2288 /* Any of the last 5 pages? */ 1878 2289 else if ( pSet->aEntries[i - 0].iPage == iPage 1879 2290 && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1) 2291 { 1880 2292 pSet->aEntries[i - 0].cRefs++; 2293 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [0] %s(%d) %s\n", pSet, HCPhys, i - 0, pSet->cEntries, pvPage, pSet->aEntries[i - 0].cRefs, pSet->aEntries[i - 0].cInlinedRefs, pSet->aEntries[i - 0].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2294 } 1881 2295 else if ( pSet->aEntries[i - 1].iPage == iPage 1882 2296 && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1) 2297 { 1883 2298 pSet->aEntries[i - 1].cRefs++; 2299 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [1] %s(%d) %s\n", pSet, HCPhys, i - 1, pSet->cEntries, pvPage, pSet->aEntries[i - 1].cRefs, pSet->aEntries[i - 1].cInlinedRefs, pSet->aEntries[i - 1].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2300 } 1884 2301 else if ( pSet->aEntries[i - 2].iPage == iPage 1885 2302 && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1) 2303 { 1886 2304 pSet->aEntries[i - 2].cRefs++; 2305 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [2] %s(%d) %s\n", pSet, HCPhys, i - 2, pSet->cEntries, pvPage, pSet->aEntries[i - 2].cRefs, pSet->aEntries[i - 2].cInlinedRefs, pSet->aEntries[i - 2].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2306 } 1887 2307 else if ( pSet->aEntries[i - 3].iPage == iPage 1888 2308 && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1) 2309 { 1889 2310 pSet->aEntries[i - 3].cRefs++; 2311 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 3, pSet->cEntries, pvPage, pSet->aEntries[i - 3].cRefs, pSet->aEntries[i - 3].cInlinedRefs, pSet->aEntries[i - 3].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2312 } 1890 2313 else if ( pSet->aEntries[i - 4].iPage == iPage 1891 2314 && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1) 2315 { 1892 2316 pSet->aEntries[i - 4].cRefs++; 2317 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [4] %s(%d) %s\n", pSet, HCPhys, i - 4, pSet->cEntries, pvPage, pSet->aEntries[i - 4].cRefs, pSet->aEntries[i - 4].cInlinedRefs, pSet->aEntries[i - 4].cUnrefs, iPage, pszFile, iLine, pszFunction)); 2318 } 1893 2319 /* Don't bother searching unless we're above a 60% load. */ 1894 2320 else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) * 60 / 100)) 1895 2321 { 1896 2322 unsigned iEntry = pSet->cEntries++; 1897 pSet->aEntries[iEntry].cRefs = 1; 1898 pSet->aEntries[iEntry].iPage = iPage; 1899 pSet->aEntries[iEntry].pvPage = pvPage; 1900 pSet->aEntries[iEntry].HCPhys = HCPhys; 2323 pSet->aEntries[iEntry].cRefs = 1; 2324 pSet->aEntries[iEntry].cUnrefs = 0; 2325 pSet->aEntries[iEntry].cInlinedRefs = 0; 2326 pSet->aEntries[iEntry].iPage = iPage; 2327 pSet->aEntries[iEntry].pvPage = pvPage; 2328 pSet->aEntries[iEntry].HCPhys = HCPhys; 1901 2329 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2330 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [b] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction)); 1902 2331 } 1903 2332 else … … 1911 2340 { 1912 2341 pSet->aEntries[i].cRefs++; 1913 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapSetSearchHits); 2342 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchHits); 2343 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=%u/%u/%u iPage=%#x [c] %s(%d) %s\n", pSet, HCPhys, i, pSet->cEntries, pvPage, pSet->aEntries[i].cRefs, pSet->aEntries[i].cInlinedRefs, pSet->aEntries[i].cUnrefs, iPage, pszFile, iLine, pszFunction)); 1914 2344 break; 1915 2345 } 1916 2346 if (i < 0) 1917 2347 { 1918 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetSearchMisses);2348 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchMisses); 1919 2349 if (pSet->iSubset < pSet->cEntries) 1920 2350 { 1921 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetSearchFlushes);1922 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatR 0DynMapSetSize[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]);2351 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetSearchFlushes); 2352 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->aStatRZDynMapSetFilledPct[(pSet->cEntries * 10 / RT_ELEMENTS(pSet->aEntries)) % 11]); 1923 2353 AssertMsg(pSet->cEntries < PGMMAPSET_MAX_FILL, ("%u\n", pSet->cEntries)); 1924 2354 pgmDynMapFlushSubset(pSet); … … 1927 2357 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries))) 1928 2358 { 1929 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR 0DynMapSetOptimize);2359 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapSetOptimize); 1930 2360 pgmDynMapOptimizeAutoSet(pSet); 1931 2361 } … … 1934 2364 { 1935 2365 unsigned iEntry = pSet->cEntries++; 1936 pSet->aEntries[iEntry].cRefs = 1; 1937 pSet->aEntries[iEntry].iPage = iPage; 1938 pSet->aEntries[iEntry].pvPage = pvPage; 1939 pSet->aEntries[iEntry].HCPhys = HCPhys; 2366 pSet->aEntries[iEntry].cRefs = 1; 2367 pSet->aEntries[iEntry].cUnrefs = 0; 2368 pSet->aEntries[iEntry].cInlinedRefs = 0; 2369 pSet->aEntries[iEntry].iPage = iPage; 2370 pSet->aEntries[iEntry].pvPage = pvPage; 2371 pSet->aEntries[iEntry].HCPhys = HCPhys; 1940 2372 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry; 2373 LogFlow(("pgmRZDynMapHCPageCommon: pSet=%p HCPhys=%RHp #%u/%u/%p cRefs=1/0/0 iPage=%#x [d] %s(%d) %s\n", pSet, HCPhys, iEntry, pSet->cEntries, pvPage, iPage, pszFile, iLine, pszFunction)); 1941 2374 } 1942 2375 else 1943 2376 { 1944 2377 /* We're screwed. */ 1945 pgmR 0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);1946 1947 RTAssertMsg2Weak(" PGMDynMapHCPage: set is full!\n");2378 pgmRZDynMapReleasePage(pThis, iPage, 1); 2379 2380 RTAssertMsg2Weak("pgmRZDynMapHCPageCommon: set is full!\n"); 1948 2381 if (!g_fPGMR0DynMapTestRunning) 1949 VMMRZCallRing3NoCpu(PGMR 0DYNMAP_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0);2382 VMMRZCallRing3NoCpu(PGMRZDYNMAP_SET_2_VM(pSet), VMMCALLRING3_VM_R0_ASSERTION, 0); 1950 2383 *ppv = NULL; 2384 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1951 2385 return VERR_PGM_DYNMAP_FULL_SET; 1952 2386 } … … 1955 2389 1956 2390 *ppv = pvPage; 2391 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPage, a); 1957 2392 return VINF_SUCCESS; 1958 2393 } 1959 1960 1961 #if 0 /* Not used in R0, should internalized the other PGMDynMapHC/GCPage too. */1962 /* documented elsewhere - a bit of a mess. */1963 VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)1964 {1965 #ifdef VBOX_WITH_STATISTICS1966 PVMCPU pVCpu = VMMGetCpu(pVM);1967 #endif1968 /*1969 * Validate state.1970 */1971 STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapHCPage, a);1972 AssertPtr(ppv);1973 AssertMsg(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,1974 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap));1975 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));1976 PVMCPU pVCpu = VMMGetCpu(pVM);1977 AssertPtr(pVCpu);1978 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;1979 AssertMsg(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),1980 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries));1981 1982 /*1983 * Call common code.1984 */1985 int rc = pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);1986 1987 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatR0DynMapHCPage, a);1988 return rc;1989 }1990 #endif1991 2394 1992 2395 … … 2025 2428 { 2026 2429 LogRel(("pgmR0DynMapTest: ****** START ******\n")); 2027 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;2028 2430 PPGMMAPSET pSet = &pVM->aCpus[0].pgm.s.AutoSet; 2431 PPGMRZDYNMAP pThis = PGMRZDYNMAP_SET_2_DYNMAP(pSet); 2029 2432 uint32_t i; 2030 2433 … … 2047 2450 LogRel(("Test #1\n")); 2048 2451 ASMIntDisable(); 2049 PGM DynMapStartAutoSet(&pVM->aCpus[0]);2452 PGMRZDynMapStartAutoSet(&pVM->aCpus[0]); 2050 2453 2051 2454 uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK; 2052 2455 void *pv = (void *)(intptr_t)-1; 2053 2456 void *pv2 = (void *)(intptr_t)-2; 2054 rc = PGMDynMapHCPage(pVM, cr3, &pv);2055 int rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);2457 rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv RTLOG_COMMA_SRC_POS); 2458 int rc2 = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS); 2056 2459 ASMIntEnable(); 2057 2460 if ( RT_SUCCESS(rc2) … … 2068 2471 LogRel(("Test #2\n")); 2069 2472 ASMIntDisable(); 2070 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2473 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2071 2474 for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++) 2072 2475 { 2073 2476 pv2 = (void *)(intptr_t)-4; 2074 rc = PGMDynMapHCPage(pVM, cr3, &pv2);2477 rc = pgmRZDynMapHCPageCommon(pVM, cr3, &pv2 RTLOG_COMMA_SRC_POS); 2075 2478 } 2076 2479 ASMIntEnable(); … … 2106 2509 LogRel(("Test #3\n")); 2107 2510 ASMIntDisable(); 2108 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2511 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2109 2512 pv2 = NULL; 2110 2513 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++) 2111 2514 { 2112 2515 pv2 = (void *)(intptr_t)(-5 - i); 2113 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);2516 rc = pgmRZDynMapHCPageCommon(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS); 2114 2517 } 2115 2518 ASMIntEnable(); … … 2134 2537 LogRel(("Test #4\n")); 2135 2538 ASMIntDisable(); 2136 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2539 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2137 2540 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++) 2138 2541 { 2139 rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);2542 rc = pgmRZDynMapHCPageCommon(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2 RTLOG_COMMA_SRC_POS); 2140 2543 if (RT_SUCCESS(rc)) 2141 2544 rc = PGMR0DynMapAssertIntegrity(); … … 2149 2552 LogRel(("Test #5\n")); 2150 2553 ASMIntDisable(); 2151 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2152 PGM DynMapReleaseAutoSet(&pVM->aCpus[0]);2153 PGM DynMapStartAutoSet(&pVM->aCpus[0]);2554 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2555 PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]); 2556 PGMRZDynMapStartAutoSet(&pVM->aCpus[0]); 2154 2557 ASMIntEnable(); 2155 2558 … … 2179 2582 LogRel(("Test #5\n")); 2180 2583 ASMIntDisable(); 2181 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2584 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2182 2585 RTHCPHYS HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0); 2183 rc = PGMDynMapHCPage(pVM, HCPhysPT, &pv);2586 rc = pgmRZDynMapHCPageCommon(pVM, HCPhysPT, &pv RTLOG_COMMA_SRC_POS); 2184 2587 if (RT_SUCCESS(rc)) 2185 2588 { … … 2216 2619 LogRel(("Cleanup.\n")); 2217 2620 ASMIntDisable(); 2218 PGM DynMapMigrateAutoSet(&pVM->aCpus[0]);2219 PGM DynMapFlushAutoSet(&pVM->aCpus[0]);2220 PGM DynMapReleaseAutoSet(&pVM->aCpus[0]);2621 PGMR0DynMapMigrateAutoSet(&pVM->aCpus[0]); 2622 PGMRZDynMapFlushAutoSet(&pVM->aCpus[0]); 2623 PGMRZDynMapReleaseAutoSet(&pVM->aCpus[0]); 2221 2624 ASMIntEnable(); 2222 2625
Note:
See TracChangeset
for help on using the changeset viewer.