VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp@ 15347

Last change on this file since 15347 was 15347, checked in by vboxsync, 16 years ago

PGMR0DynMap: Fixed testcase (missing migration).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.0 KB
Line 
1/* $Id: PGMR0DynMap.cpp 15347 2008-12-12 01:59:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
4 */
5
6/*
7 * Copyright (C) 2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Internal Functions *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include "../PGMInternal.h"
28#include <VBox/vm.h>
29#include <VBox/sup.h>
30#include <VBox/err.h>
31#include <iprt/asm.h>
32#include <iprt/alloc.h>
33#include <iprt/assert.h>
34#include <iprt/cpuset.h>
35#include <iprt/memobj.h>
36#include <iprt/mp.h>
37#include <iprt/semaphore.h>
38#include <iprt/spinlock.h>
39#include <iprt/string.h>
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** The max size of the mapping cache (in pages). */
46#define PGMR0DYNMAP_MAX_PAGES ((16*_1M) >> PAGE_SHIFT)
47/** The small segment size that is adopted on out-of-memory conditions with a
48 * single big segment. */
49#define PGMR0DYNMAP_SMALL_SEG_PAGES 128
50/** The number of pages we reserve per CPU. */
51#define PGMR0DYNMAP_PAGES_PER_CPU 256
52/** The minimum number of pages we reserve per CPU.
53 * This must be equal or larger than the autoset size. */
54#define PGMR0DYNMAP_PAGES_PER_CPU_MIN 32
55/** The number of guard pages.
56 * @remarks Never do tuning of the hashing or whatnot with a strict build! */
57#if defined(VBOX_STRICT)
58# define PGMR0DYNMAP_GUARD_PAGES 1
59#else
60# define PGMR0DYNMAP_GUARD_PAGES 0
61#endif
62/** The dummy physical address of guard pages. */
63#define PGMR0DYNMAP_GUARD_PAGE_HCPHYS UINT32_C(0x7777feed)
64/** The dummy reference count of guard pages. (Must be non-zero.) */
65#define PGMR0DYNMAP_GUARD_PAGE_REF_COUNT INT32_C(0x7777feed)
66#if 0
67/** Define this to just clear the present bit on guard pages.
68 * The alternative is to replace the entire PTE with an bad not-present
69 * PTE. Either way, XNU will screw us. :-/ */
70#define PGMR0DYNMAP_GUARD_NP
71#endif
72/** The dummy PTE value for a page. */
73#define PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE X86_PTE_PG_MASK
74/** The dummy PTE value for a page. */
75#define PGMR0DYNMAP_GUARD_PAGE_PAE_PTE UINT64_MAX /*X86_PTE_PAE_PG_MASK*/
76/** Calcs the overload threshold. Current set at 50%. */
77#define PGMR0DYNMAP_CALC_OVERLOAD(cPages) ((cPages) / 2)
78
79#if 0
80/* Assertions causes panics if preemption is disabled, this can be used to work aroudn that. */
81//#define RTSpinlockAcquire(a,b) do {} while (0)
82//#define RTSpinlockRelease(a,b) do {} while (0)
83#endif
84
85
86/*******************************************************************************
87* Structures and Typedefs *
88*******************************************************************************/
89/**
90 * Ring-0 dynamic mapping cache segment.
91 *
92 * The dynamic mapping cache can be extended with additional segments if the
93 * load is found to be too high. This done the next time a VM is created, under
94 * the protection of the init mutex. The arrays is reallocated and the new
95 * segment is added to the end of these. Nothing is rehashed of course, as the
96 * indexes / addresses must remain unchanged.
97 *
98 * This structure is only modified while owning the init mutex or during module
99 * init / term.
100 */
101typedef struct PGMR0DYNMAPSEG
102{
103 /** Pointer to the next segment. */
104 struct PGMR0DYNMAPSEG *pNext;
105 /** The memory object for the virtual address range that we're abusing. */
106 RTR0MEMOBJ hMemObj;
107 /** The start page in the cache. (I.e. index into the arrays.) */
108 uint16_t iPage;
109 /** The number of pages this segment contributes. */
110 uint16_t cPages;
111 /** The number of page tables. */
112 uint16_t cPTs;
113 /** The memory objects for the page tables. */
114 RTR0MEMOBJ ahMemObjPTs[1];
115} PGMR0DYNMAPSEG;
116/** Pointer to a ring-0 dynamic mapping cache segment. */
117typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
118
119
120/**
121 * Ring-0 dynamic mapping cache entry.
122 *
123 * This structure tracks
124 */
125typedef struct PGMR0DYNMAPENTRY
126{
127 /** The physical address of the currently mapped page.
128 * This is duplicate for three reasons: cache locality, cache policy of the PT
129 * mappings and sanity checks. */
130 RTHCPHYS HCPhys;
131 /** Pointer to the page. */
132 void *pvPage;
133 /** The number of references. */
134 int32_t volatile cRefs;
135 /** PTE pointer union. */
136 union PGMR0DYNMAPENTRY_PPTE
137 {
138 /** PTE pointer, 32-bit legacy version. */
139 PX86PTE pLegacy;
140 /** PTE pointer, PAE version. */
141 PX86PTEPAE pPae;
142 /** PTE pointer, the void version. */
143 void *pv;
144 } uPte;
145 /** CPUs that haven't invalidated this entry after it's last update. */
146 RTCPUSET PendingSet;
147} PGMR0DYNMAPENTRY;
148/** Pointer to a ring-0 dynamic mapping cache entry. */
149typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
150
151
152/**
153 * Ring-0 dynamic mapping cache.
154 *
155 * This is initialized during VMMR0 module init but no segments are allocated at
156 * that time. Segments will be added when the first VM is started and removed
157 * again when the last VM shuts down, thus avoid consuming memory while dormant.
158 * At module termination, the remaining bits will be freed up.
159 */
160typedef struct PGMR0DYNMAP
161{
162 /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
163 uint32_t u32Magic;
164 /** Spinlock serializing the normal operation of the cache. */
165 RTSPINLOCK hSpinlock;
166 /** Array for tracking and managing the pages. */
167 PPGMR0DYNMAPENTRY paPages;
168 /** The cache size given as a number of pages. */
169 uint32_t cPages;
170 /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
171 bool fLegacyMode;
172 /** The current load.
173 * This does not include guard pages. */
174 uint32_t cLoad;
175 /** The max load ever.
176 * This is maintained to get trigger adding of more mapping space. */
177 uint32_t cMaxLoad;
178 /** Initialization / termination lock. */
179 RTSEMFASTMUTEX hInitLock;
180 /** The number of guard pages. */
181 uint32_t cGuardPages;
182 /** The number of users (protected by hInitLock). */
183 uint32_t cUsers;
184 /** Array containing a copy of the original page tables.
185 * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
186 void *pvSavedPTEs;
187 /** List of segments. */
188 PPGMR0DYNMAPSEG pSegHead;
189 /** The paging mode. */
190 SUPPAGINGMODE enmPgMode;
191} PGMR0DYNMAP;
192/** Pointer to the ring-0 dynamic mapping cache */
193typedef PGMR0DYNMAP *PPGMR0DYNMAP;
194
195/** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
196#define PGMR0DYNMAP_MAGIC 0x19640201
197
198
199/**
200 * Paging level data.
201 */
202typedef struct PGMR0DYNMAPPGLVL
203{
204 uint32_t cLevels; /**< The number of levels. */
205 struct
206 {
207 RTHCPHYS HCPhys; /**< The address of the page for the current level,
208 * i.e. what hMemObj/hMapObj is currently mapping. */
209 RTHCPHYS fPhysMask; /**< Mask for extracting HCPhys from uEntry. */
210 RTR0MEMOBJ hMemObj; /**< Memory object for HCPhys, PAGE_SIZE. */
211 RTR0MEMOBJ hMapObj; /**< Mapping object for hMemObj. */
212 uint32_t fPtrShift; /**< The pointer shift count. */
213 uint64_t fPtrMask; /**< The mask to apply to the shifted pointer to get the table index. */
214 uint64_t fAndMask; /**< And mask to check entry flags. */
215 uint64_t fResMask; /**< The result from applying fAndMask. */
216 union
217 {
218 void *pv; /**< hMapObj address. */
219 PX86PGUINT paLegacy; /**< Legacy table view. */
220 PX86PGPAEUINT paPae; /**< PAE/AMD64 table view. */
221 } u;
222 } a[4];
223} PGMR0DYNMAPPGLVL;
224/** Pointer to paging level data. */
225typedef PGMR0DYNMAPPGLVL *PPGMR0DYNMAPPGLVL;
226
227
228/*******************************************************************************
229* Global Variables *
230*******************************************************************************/
231/** Pointer to the ring-0 dynamic mapping cache. */
232static PPGMR0DYNMAP g_pPGMR0DynMap;
233
234
235/*******************************************************************************
236* Internal Functions *
237*******************************************************************************/
238static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
239static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
240static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis);
241static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
242#ifdef DEBUG
243static int pgmR0DynMapTest(PVM pVM);
244#endif
245
246
247/**
248 * Initializes the ring-0 dynamic mapping cache.
249 *
250 * @returns VBox status code.
251 */
252VMMR0DECL(int) PGMR0DynMapInit(void)
253{
254 Assert(!g_pPGMR0DynMap);
255
256 /*
257 * Create and initialize the cache instance.
258 */
259 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
260 AssertLogRelReturn(pThis, VERR_NO_MEMORY);
261 int rc = VINF_SUCCESS;
262 pThis->enmPgMode = SUPR0GetPagingMode();
263 switch (pThis->enmPgMode)
264 {
265 case SUPPAGINGMODE_32_BIT:
266 case SUPPAGINGMODE_32_BIT_GLOBAL:
267 pThis->fLegacyMode = false;
268 break;
269 case SUPPAGINGMODE_PAE:
270 case SUPPAGINGMODE_PAE_GLOBAL:
271 case SUPPAGINGMODE_PAE_NX:
272 case SUPPAGINGMODE_PAE_GLOBAL_NX:
273 case SUPPAGINGMODE_AMD64:
274 case SUPPAGINGMODE_AMD64_GLOBAL:
275 case SUPPAGINGMODE_AMD64_NX:
276 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
277 pThis->fLegacyMode = false;
278 break;
279 default:
280 rc = VERR_INTERNAL_ERROR;
281 break;
282 }
283 if (RT_SUCCESS(rc))
284 {
285 rc = RTSemFastMutexCreate(&pThis->hInitLock);
286 if (RT_SUCCESS(rc))
287 {
288 rc = RTSpinlockCreate(&pThis->hSpinlock);
289 if (RT_SUCCESS(rc))
290 {
291 pThis->u32Magic = PGMR0DYNMAP_MAGIC;
292 g_pPGMR0DynMap = pThis;
293 return VINF_SUCCESS;
294 }
295 RTSemFastMutexDestroy(pThis->hInitLock);
296 }
297 }
298 RTMemFree(pThis);
299 return rc;
300}
301
302
303/**
304 * Terminates the ring-0 dynamic mapping cache.
305 */
306VMMR0DECL(void) PGMR0DynMapTerm(void)
307{
308 /*
309 * Destroy the cache.
310 *
311 * There is not supposed to be any races here, the loader should
312 * make sure about that. So, don't bother locking anything.
313 *
314 * The VM objects should all be destroyed by now, so there is no
315 * dangling users or anything like that to clean up. This routine
316 * is just a mirror image of PGMR0DynMapInit.
317 */
318 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
319 if (pThis)
320 {
321 AssertPtr(pThis);
322 g_pPGMR0DynMap = NULL;
323
324 /* This should *never* happen, but in case it does try not to leak memory. */
325 AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->pvSavedPTEs && !pThis->cPages,
326 ("cUsers=%d paPages=%p pvSavedPTEs=%p cPages=%#x\n",
327 pThis->cUsers, pThis->paPages, pThis->pvSavedPTEs, pThis->cPages));
328 if (pThis->paPages)
329 pgmR0DynMapTearDown(pThis);
330
331 /* Free the associated resources. */
332 RTSemFastMutexDestroy(pThis->hInitLock);
333 pThis->hInitLock = NIL_RTSEMFASTMUTEX;
334 RTSpinlockDestroy(pThis->hSpinlock);
335 pThis->hSpinlock = NIL_RTSPINLOCK;
336 pThis->u32Magic = UINT32_MAX;
337 RTMemFree(pThis);
338 }
339}
340
341
342/**
343 * Initializes the dynamic mapping cache for a new VM.
344 *
345 * @returns VBox status code.
346 * @param pVM Pointer to the shared VM structure.
347 */
348VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
349{
350 AssertMsgReturn(!pVM->pgm.s.pvR0DynMapUsed, ("%p (pThis=%p)\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap), VERR_WRONG_ORDER);
351
352 /*
353 * Initialize the auto sets.
354 */
355 VMCPUID idCpu = pVM->cCPUs;
356 AssertReturn(idCpu > 0 && idCpu <= VMCPU_MAX_CPU_COUNT, VERR_INTERNAL_ERROR);
357 while (idCpu-- > 0)
358 {
359 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
360 uint32_t j = RT_ELEMENTS(pSet->aEntries);
361 while (j-- > 0)
362 {
363 pSet->aEntries[j].iPage = UINT16_MAX;
364 pSet->aEntries[j].cRefs = 0;
365 pSet->aEntries[j].pvPage = NULL;
366 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
367 }
368 pSet->cEntries = PGMMAPSET_CLOSED;
369 pSet->iCpu = -1;
370 memset(&pSet->aiHashTable[0], 0xff, sizeof(pSet->aiHashTable));
371 }
372
373 /*
374 * Do we need the cache? Skip the last bit if we don't.
375 */
376 if (!VMMIsHwVirtExtForced(pVM))
377 return VINF_SUCCESS;
378
379 /*
380 * Reference and if necessary setup or expand the cache.
381 */
382 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
383 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
384 int rc = RTSemFastMutexRequest(pThis->hInitLock);
385 AssertLogRelRCReturn(rc, rc);
386
387 pThis->cUsers++;
388 if (pThis->cUsers == 1)
389 {
390 rc = pgmR0DynMapSetup(pThis);
391#ifdef DEBUG
392 if (RT_SUCCESS(rc))
393 {
394 rc = pgmR0DynMapTest(pVM);
395 if (RT_FAILURE(rc))
396 pgmR0DynMapTearDown(pThis);
397 }
398#endif
399 }
400 else if (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(pThis->cPages - pThis->cGuardPages))
401 rc = pgmR0DynMapExpand(pThis);
402 if (RT_SUCCESS(rc))
403 pVM->pgm.s.pvR0DynMapUsed = pThis;
404 else
405 pThis->cUsers--;
406
407 RTSemFastMutexRelease(pThis->hInitLock);
408 return rc;
409}
410
411
412/**
413 * Terminates the dynamic mapping cache usage for a VM.
414 *
415 * @param pVM Pointer to the shared VM structure.
416 */
417VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM)
418{
419 /*
420 * Return immediately if we're not using the cache.
421 */
422 if (!pVM->pgm.s.pvR0DynMapUsed)
423 return;
424
425 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
426 AssertPtrReturnVoid(pThis);
427
428 int rc = RTSemFastMutexRequest(pThis->hInitLock);
429 AssertLogRelRCReturnVoid(rc);
430
431 if (pVM->pgm.s.pvR0DynMapUsed == pThis)
432 {
433 pVM->pgm.s.pvR0DynMapUsed = NULL;
434
435#ifdef VBOX_STRICT
436 PGMR0DynMapAssertIntegrity();
437#endif
438
439 /*
440 * Clean up and check the auto sets.
441 */
442 VMCPUID idCpu = pVM->cCPUs;
443 while (idCpu-- > 0)
444 {
445 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
446 uint32_t j = pSet->cEntries;
447 if (j <= RT_ELEMENTS(pSet->aEntries))
448 {
449 /*
450 * The set is open, close it.
451 */
452 while (j-- > 0)
453 {
454 int32_t cRefs = pSet->aEntries[j].cRefs;
455 uint32_t iPage = pSet->aEntries[j].iPage;
456 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
457 if (iPage < pThis->cPages && cRefs > 0)
458 pgmR0DynMapReleasePage(pThis, iPage, cRefs);
459 else
460 AssertLogRelMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
461
462 pSet->aEntries[j].iPage = UINT16_MAX;
463 pSet->aEntries[j].cRefs = 0;
464 pSet->aEntries[j].pvPage = NULL;
465 pSet->aEntries[j].HCPhys = NIL_RTHCPHYS;
466 }
467 pSet->cEntries = PGMMAPSET_CLOSED;
468 pSet->iCpu = -1;
469 }
470 else
471 AssertMsg(j == PGMMAPSET_CLOSED, ("cEntries=%#x\n", j));
472
473 j = RT_ELEMENTS(pSet->aEntries);
474 while (j-- > 0)
475 {
476 Assert(pSet->aEntries[j].iPage == UINT16_MAX);
477 Assert(!pSet->aEntries[j].cRefs);
478 }
479 }
480
481 /*
482 * Release our reference to the mapping cache.
483 */
484 Assert(pThis->cUsers > 0);
485 pThis->cUsers--;
486 if (!pThis->cUsers)
487 pgmR0DynMapTearDown(pThis);
488 }
489 else
490 AssertLogRelMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));
491
492 RTSemFastMutexRelease(pThis->hInitLock);
493}
494
495
496/**
497 * Shoots down the TLBs for all the cache pages, pgmR0DynMapTearDown helper.
498 *
499 * @param idCpu The current CPU.
500 * @param pvUser1 The dynamic mapping cache instance.
501 * @param pvUser2 Unused, NULL.
502 */
503static DECLCALLBACK(void) pgmR0DynMapShootDownTlbs(RTCPUID idCpu, void *pvUser1, void *pvUser2)
504{
505 Assert(!pvUser2);
506 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)pvUser1;
507 Assert(pThis == g_pPGMR0DynMap);
508 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
509 uint32_t iPage = pThis->cPages;
510 while (iPage-- > 0)
511 ASMInvalidatePage(paPages[iPage].pvPage);
512}
513
514
515/**
516 * Shoot down the TLBs for every single cache entry on all CPUs.
517 *
518 * @returns IPRT status code (RTMpOnAll).
519 * @param pThis The dynamic mapping cache instance.
520 */
521static int pgmR0DynMapTlbShootDown(PPGMR0DYNMAP pThis)
522{
523 int rc = RTMpOnAll(pgmR0DynMapShootDownTlbs, pThis, NULL);
524 AssertRC(rc);
525 if (RT_FAILURE(rc))
526 {
527 uint32_t iPage = pThis->cPages;
528 while (iPage-- > 0)
529 ASMInvalidatePage(pThis->paPages[iPage].pvPage);
530 }
531 return rc;
532}
533
534
535/**
536 * Calculate the new cache size based on cMaxLoad statistics.
537 *
538 * @returns Number of pages.
539 * @param pThis The dynamic mapping cache instance.
540 * @param pcMinPages The minimal size in pages.
541 */
542static uint32_t pgmR0DynMapCalcNewSize(PPGMR0DYNMAP pThis, uint32_t *pcMinPages)
543{
544 Assert(pThis->cPages <= PGMR0DYNMAP_MAX_PAGES);
545
546 /* cCpus * PGMR0DYNMAP_PAGES_PER_CPU(_MIN). */
547 RTCPUID cCpus = RTMpGetCount();
548 AssertReturn(cCpus > 0 && cCpus <= RTCPUSET_MAX_CPUS, 0);
549 uint32_t cPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU;
550 uint32_t cMinPages = cCpus * PGMR0DYNMAP_PAGES_PER_CPU_MIN;
551
552 /* adjust against cMaxLoad. */
553 AssertMsg(pThis->cMaxLoad <= PGMR0DYNMAP_MAX_PAGES, ("%#x\n", pThis->cMaxLoad));
554 if (pThis->cMaxLoad > PGMR0DYNMAP_MAX_PAGES)
555 pThis->cMaxLoad = 0;
556
557 while (pThis->cMaxLoad > PGMR0DYNMAP_CALC_OVERLOAD(cPages))
558 cPages += PGMR0DYNMAP_PAGES_PER_CPU;
559
560 if (pThis->cMaxLoad > cMinPages)
561 cMinPages = pThis->cMaxLoad;
562
563 /* adjust against max and current size. */
564 if (cPages < pThis->cPages)
565 cPages = pThis->cPages;
566 cPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
567 if (cPages > PGMR0DYNMAP_MAX_PAGES)
568 cPages = PGMR0DYNMAP_MAX_PAGES;
569
570 if (cMinPages < pThis->cPages)
571 cMinPages = pThis->cPages;
572 cMinPages *= PGMR0DYNMAP_GUARD_PAGES + 1;
573 if (cMinPages > PGMR0DYNMAP_MAX_PAGES)
574 cMinPages = PGMR0DYNMAP_MAX_PAGES;
575
576 Assert(cMinPages);
577 *pcMinPages = cMinPages;
578 return cPages;
579}
580
581
582/**
583 * Initializes the paging level data.
584 *
585 * @param pThis The dynamic mapping cache instance.
586 * @param pPgLvl The paging level data.
587 */
588void pgmR0DynMapPagingArrayInit(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl)
589{
590 RTCCUINTREG cr4 = ASMGetCR4();
591 switch (pThis->enmPgMode)
592 {
593 case SUPPAGINGMODE_32_BIT:
594 case SUPPAGINGMODE_32_BIT_GLOBAL:
595 pPgLvl->cLevels = 2;
596 pPgLvl->a[0].fPhysMask = X86_CR3_PAGE_MASK;
597 pPgLvl->a[0].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
598 pPgLvl->a[0].fResMask = X86_PDE_P | X86_PDE_RW;
599 pPgLvl->a[0].fPtrMask = X86_PD_MASK;
600 pPgLvl->a[0].fPtrShift = X86_PD_SHIFT;
601
602 pPgLvl->a[1].fPhysMask = X86_PDE_PG_MASK;
603 pPgLvl->a[1].fAndMask = X86_PTE_P | X86_PTE_RW;
604 pPgLvl->a[1].fResMask = X86_PTE_P | X86_PTE_RW;
605 pPgLvl->a[1].fPtrMask = X86_PT_MASK;
606 pPgLvl->a[1].fPtrShift = X86_PT_SHIFT;
607 break;
608
609 case SUPPAGINGMODE_PAE:
610 case SUPPAGINGMODE_PAE_GLOBAL:
611 case SUPPAGINGMODE_PAE_NX:
612 case SUPPAGINGMODE_PAE_GLOBAL_NX:
613 pPgLvl->cLevels = 3;
614 pPgLvl->a[0].fPhysMask = X86_CR3_PAE_PAGE_MASK;
615 pPgLvl->a[0].fPtrMask = X86_PDPT_MASK_PAE;
616 pPgLvl->a[0].fPtrShift = X86_PDPT_SHIFT;
617 pPgLvl->a[0].fAndMask = X86_PDPE_P;
618 pPgLvl->a[0].fResMask = X86_PDPE_P;
619
620 pPgLvl->a[1].fPhysMask = X86_PDPE_PG_MASK;
621 pPgLvl->a[1].fPtrMask = X86_PD_PAE_MASK;
622 pPgLvl->a[1].fPtrShift = X86_PD_PAE_SHIFT;
623 pPgLvl->a[1].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
624 pPgLvl->a[1].fResMask = X86_PDE_P | X86_PDE_RW;
625
626 pPgLvl->a[2].fPhysMask = X86_PDE_PAE_PG_MASK;
627 pPgLvl->a[2].fPtrMask = X86_PT_PAE_MASK;
628 pPgLvl->a[2].fPtrShift = X86_PT_PAE_SHIFT;
629 pPgLvl->a[2].fAndMask = X86_PTE_P | X86_PTE_RW;
630 pPgLvl->a[2].fResMask = X86_PTE_P | X86_PTE_RW;
631 break;
632
633 case SUPPAGINGMODE_AMD64:
634 case SUPPAGINGMODE_AMD64_GLOBAL:
635 case SUPPAGINGMODE_AMD64_NX:
636 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
637 pPgLvl->cLevels = 4;
638 pPgLvl->a[0].fPhysMask = X86_CR3_AMD64_PAGE_MASK;
639 pPgLvl->a[0].fPtrShift = X86_PML4_SHIFT;
640 pPgLvl->a[0].fPtrMask = X86_PML4_MASK;
641 pPgLvl->a[0].fAndMask = X86_PML4E_P | X86_PML4E_RW;
642 pPgLvl->a[0].fResMask = X86_PML4E_P | X86_PML4E_RW;
643
644 pPgLvl->a[1].fPhysMask = X86_PML4E_PG_MASK;
645 pPgLvl->a[1].fPtrShift = X86_PDPT_SHIFT;
646 pPgLvl->a[1].fPtrMask = X86_PDPT_MASK_AMD64;
647 pPgLvl->a[1].fAndMask = X86_PDPE_P | X86_PDPE_RW /** @todo check for X86_PDPT_PS support. */;
648 pPgLvl->a[1].fResMask = X86_PDPE_P | X86_PDPE_RW;
649
650 pPgLvl->a[2].fPhysMask = X86_PDPE_PG_MASK;
651 pPgLvl->a[2].fPtrShift = X86_PD_PAE_SHIFT;
652 pPgLvl->a[2].fPtrMask = X86_PD_PAE_MASK;
653 pPgLvl->a[2].fAndMask = X86_PDE_P | X86_PDE_RW | (cr4 & X86_CR4_PSE ? X86_PDE_PS : 0);
654 pPgLvl->a[2].fResMask = X86_PDE_P | X86_PDE_RW;
655
656 pPgLvl->a[3].fPhysMask = X86_PDE_PAE_PG_MASK;
657 pPgLvl->a[3].fPtrShift = X86_PT_PAE_SHIFT;
658 pPgLvl->a[3].fPtrMask = X86_PT_PAE_MASK;
659 pPgLvl->a[3].fAndMask = X86_PTE_P | X86_PTE_RW;
660 pPgLvl->a[3].fResMask = X86_PTE_P | X86_PTE_RW;
661 break;
662
663 default:
664 AssertFailed();
665 pPgLvl->cLevels = 0;
666 break;
667 }
668
669 for (uint32_t i = 0; i < 4; i++) /* ASSUMING array size. */
670 {
671 pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
672 pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
673 pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
674 pPgLvl->a[i].u.pv = NULL;
675 }
676}
677
678
679/**
680 * Maps a PTE.
681 *
682 * This will update the segment structure when new PTs are mapped.
683 *
684 * It also assumes that we (for paranoid reasons) wish to establish a mapping
685 * chain from CR3 to the PT that all corresponds to the processor we're
686 * currently running on, and go about this by running with interrupts disabled
687 * and restarting from CR3 for every change.
688 *
689 * @returns VBox status code, VINF_TRY_AGAIN if we changed any mappings and had
690 * to re-enable interrupts.
691 * @param pThis The dynamic mapping cache instance.
692 * @param pPgLvl The paging level structure.
693 * @param pvPage The page.
694 * @param pSeg The segment.
695 * @param cMaxPTs The max number of PTs expected in the segment.
696 * @param ppvPTE Where to store the PTE address.
697 */
698static int pgmR0DynMapPagingArrayMapPte(PPGMR0DYNMAP pThis, PPGMR0DYNMAPPGLVL pPgLvl, void *pvPage,
699 PPGMR0DYNMAPSEG pSeg, uint32_t cMaxPTs, void **ppvPTE)
700{
701 Assert(!(ASMGetFlags() & X86_EFL_IF));
702 void *pvEntry = NULL;
703 X86PGPAEUINT uEntry = ASMGetCR3();
704 for (uint32_t i = 0; i < pPgLvl->cLevels; i++)
705 {
706 RTHCPHYS HCPhys = uEntry & pPgLvl->a[i].fPhysMask;
707 if (pPgLvl->a[i].HCPhys != HCPhys)
708 {
709 /*
710 * Need to remap this level.
711 * The final level, the PT, will not be freed since that is what it's all about.
712 */
713 ASMIntEnable();
714 if (i + 1 == pPgLvl->cLevels)
715 AssertReturn(pSeg->cPTs < cMaxPTs, VERR_INTERNAL_ERROR);
716 else
717 {
718 int rc2 = RTR0MemObjFree(pPgLvl->a[i].hMemObj, true /* fFreeMappings */); AssertRC(rc2);
719 pPgLvl->a[i].hMemObj = pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
720 }
721
722 int rc = RTR0MemObjEnterPhys(&pPgLvl->a[i].hMemObj, HCPhys, PAGE_SIZE);
723 if (RT_SUCCESS(rc))
724 {
725 rc = RTR0MemObjMapKernel(&pPgLvl->a[i].hMapObj, pPgLvl->a[i].hMemObj,
726 (void *)-1 /* pvFixed */, 0 /* cbAlignment */,
727 RTMEM_PROT_WRITE | RTMEM_PROT_READ);
728 if (RT_SUCCESS(rc))
729 {
730 pPgLvl->a[i].u.pv = RTR0MemObjAddress(pPgLvl->a[i].hMapObj);
731 AssertMsg(((uintptr_t)pPgLvl->a[i].u.pv & ~(uintptr_t)PAGE_OFFSET_MASK), ("%p\n", pPgLvl->a[i].u.pv));
732 pPgLvl->a[i].HCPhys = HCPhys;
733 if (i + 1 == pPgLvl->cLevels)
734 pSeg->ahMemObjPTs[pSeg->cPTs++] = pPgLvl->a[i].hMemObj;
735 ASMIntDisable();
736 return VINF_TRY_AGAIN;
737 }
738
739 pPgLvl->a[i].hMapObj = NIL_RTR0MEMOBJ;
740 }
741 else
742 pPgLvl->a[i].hMemObj = NIL_RTR0MEMOBJ;
743 pPgLvl->a[i].HCPhys = NIL_RTHCPHYS;
744 return rc;
745 }
746
747 /*
748 * The next level.
749 */
750 uint32_t iEntry = ((uint64_t)(uintptr_t)pvPage >> pPgLvl->a[i].fPtrShift) & pPgLvl->a[i].fPtrMask;
751 if (pThis->fLegacyMode)
752 {
753 pvEntry = &pPgLvl->a[i].u.paLegacy[iEntry];
754 uEntry = pPgLvl->a[i].u.paLegacy[iEntry];
755 }
756 else
757 {
758 pvEntry = &pPgLvl->a[i].u.paPae[iEntry];
759 uEntry = pPgLvl->a[i].u.paPae[iEntry];
760 }
761
762 if ((uEntry & pPgLvl->a[i].fAndMask) != pPgLvl->a[i].fResMask)
763 {
764 LogRel(("PGMR0DynMap: internal error - iPgLvl=%u cLevels=%u uEntry=%#llx fAnd=%#llx fRes=%#llx got=%#llx\n"
765 "PGMR0DynMap: pv=%p pvPage=%p iEntry=%#x fLegacyMode=%RTbool\n",
766 i, pPgLvl->cLevels, uEntry, pPgLvl->a[i].fAndMask, pPgLvl->a[i].fResMask, uEntry & pPgLvl->a[i].fAndMask,
767 pPgLvl->a[i].u.pv, pvPage, iEntry, pThis->fLegacyMode));
768 return VERR_INTERNAL_ERROR;
769 }
770 /*Log(("#%d: iEntry=%4d uEntry=%#llx pvEntry=%p HCPhys=%RHp \n", i, iEntry, uEntry, pvEntry, pPgLvl->a[i].HCPhys));*/
771 }
772
773 /* made it thru without needing to remap anything. */
774 *ppvPTE = pvEntry;
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Sets up a guard page.
781 *
782 * @param pThis The dynamic mapping cache instance.
783 * @param pPage The page.
784 */
785DECLINLINE(void) pgmR0DynMapSetupGuardPage(PPGMR0DYNMAP pThis, PPGMR0DYNMAPENTRY pPage)
786{
787 memset(pPage->pvPage, 0xfd, PAGE_SIZE);
788 pPage->cRefs = PGMR0DYNMAP_GUARD_PAGE_REF_COUNT;
789 pPage->HCPhys = PGMR0DYNMAP_GUARD_PAGE_HCPHYS;
790#ifdef PGMR0DYNMAP_GUARD_NP
791 ASMAtomicBitClear(pPage->uPte.pv, X86_PTE_BIT_P);
792#else
793 if (pThis->fLegacyMode)
794 ASMAtomicWriteU32(&pPage->uPte.pLegacy->u, PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE);
795 else
796 ASMAtomicWriteU64(&pPage->uPte.pPae->u, PGMR0DYNMAP_GUARD_PAGE_PAE_PTE);
797#endif
798 pThis->cGuardPages++;
799}
800
801
802/**
803 * Adds a new segment of the specified size.
804 *
805 * @returns VBox status code.
806 * @param pThis The dynamic mapping cache instance.
807 * @param cPages The size of the new segment, give as a page count.
808 */
809static int pgmR0DynMapAddSeg(PPGMR0DYNMAP pThis, uint32_t cPages)
810{
811 int rc2;
812 AssertReturn(ASMGetFlags() & X86_EFL_IF, VERR_PREEMPT_DISABLED);
813
814 /*
815 * Do the array reallocations first.
816 * (The pages array has to be replaced behind the spinlock of course.)
817 */
818 void *pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * (pThis->cPages + cPages));
819 if (!pvSavedPTEs)
820 return VERR_NO_MEMORY;
821 pThis->pvSavedPTEs = pvSavedPTEs;
822
823 void *pvPages = RTMemAllocZ(sizeof(pThis->paPages[0]) * (pThis->cPages + cPages));
824 if (!pvPages)
825 {
826 pvSavedPTEs = RTMemRealloc(pThis->pvSavedPTEs, (pThis->fLegacyMode ? sizeof(X86PGUINT) : sizeof(X86PGPAEUINT)) * pThis->cPages);
827 if (pvSavedPTEs)
828 pThis->pvSavedPTEs = pvSavedPTEs;
829 return VERR_NO_MEMORY;
830 }
831
832 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
833 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
834
835 memcpy(pvPages, pThis->paPages, sizeof(pThis->paPages[0]) * pThis->cPages);
836 void *pvToFree = pThis->paPages;
837 pThis->paPages = (PPGMR0DYNMAPENTRY)pvPages;
838
839 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
840 RTMemFree(pvToFree);
841
842 /*
843 * Allocate the segment structure and pages of memory, then touch all the pages (paranoia).
844 */
845 uint32_t cMaxPTs = cPages / (pThis->fLegacyMode ? X86_PG_ENTRIES : X86_PG_PAE_ENTRIES) + 2;
846 PPGMR0DYNMAPSEG pSeg = (PPGMR0DYNMAPSEG)RTMemAllocZ(RT_UOFFSETOF(PGMR0DYNMAPSEG, ahMemObjPTs[cMaxPTs]));
847 if (!pSeg)
848 return VERR_NO_MEMORY;
849 pSeg->pNext = NULL;
850 pSeg->cPages = cPages;
851 pSeg->iPage = pThis->cPages;
852 pSeg->cPTs = 0;
853 int rc = RTR0MemObjAllocPage(&pSeg->hMemObj, cPages << PAGE_SHIFT, false);
854 if (RT_SUCCESS(rc))
855 {
856 uint8_t *pbPage = (uint8_t *)RTR0MemObjAddress(pSeg->hMemObj);
857 AssertMsg(VALID_PTR(pbPage) && !((uintptr_t)pbPage & PAGE_OFFSET_MASK), ("%p\n", pbPage));
858 memset(pbPage, 0xfe, cPages << PAGE_SHIFT);
859
860 /*
861 * Walk thru the pages and set them up with a mapping of their PTE and everything.
862 */
863 ASMIntDisable();
864 PGMR0DYNMAPPGLVL PgLvl;
865 pgmR0DynMapPagingArrayInit(pThis, &PgLvl);
866 uint32_t const iEndPage = pSeg->iPage + cPages;
867 for (uint32_t iPage = pSeg->iPage;
868 iPage < iEndPage;
869 iPage++, pbPage += PAGE_SIZE)
870 {
871 /* Initialize the page data. */
872 pThis->paPages[iPage].HCPhys = NIL_RTHCPHYS;
873 pThis->paPages[iPage].pvPage = pbPage;
874 pThis->paPages[iPage].cRefs = 0;
875 pThis->paPages[iPage].uPte.pPae = 0;
876 RTCpuSetFill(&pThis->paPages[iPage].PendingSet);
877
878 /* Map its page table, retry until we've got a clean run (paranoia). */
879 do
880 rc = pgmR0DynMapPagingArrayMapPte(pThis, &PgLvl, pbPage, pSeg, cMaxPTs,
881 &pThis->paPages[iPage].uPte.pv);
882 while (rc == VINF_TRY_AGAIN);
883 if (RT_FAILURE(rc))
884 break;
885
886 /* Save the PTE. */
887 if (pThis->fLegacyMode)
888 ((PX86PGUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pLegacy->u;
889 else
890 ((PX86PGPAEUINT)pThis->pvSavedPTEs)[iPage] = pThis->paPages[iPage].uPte.pPae->u;
891
892#ifdef VBOX_STRICT
893 /* Check that we've got the right entry. */
894 RTHCPHYS HCPhysPage = RTR0MemObjGetPagePhysAddr(pSeg->hMemObj, iPage - pSeg->iPage);
895 RTHCPHYS HCPhysPte = pThis->fLegacyMode
896 ? pThis->paPages[iPage].uPte.pLegacy->u & X86_PTE_PG_MASK
897 : pThis->paPages[iPage].uPte.pPae->u & X86_PTE_PAE_PG_MASK;
898 if (HCPhysPage != HCPhysPte)
899 {
900 LogRel(("pgmR0DynMapAddSeg: internal error - page #%u HCPhysPage=%RHp HCPhysPte=%RHp pbPage=%p pvPte=%p\n",
901 iPage - pSeg->iPage, HCPhysPage, HCPhysPte, pbPage, pThis->paPages[iPage].uPte.pv));
902 rc = VERR_INTERNAL_ERROR;
903 break;
904 }
905#endif
906 } /* for each page */
907 ASMIntEnable();
908
909 /* cleanup non-PT mappings */
910 for (uint32_t i = 0; i < PgLvl.cLevels - 1; i++)
911 RTR0MemObjFree(PgLvl.a[i].hMemObj, true /* fFreeMappings */);
912
913 if (RT_SUCCESS(rc))
914 {
915#if PGMR0DYNMAP_GUARD_PAGES > 0
916 /*
917 * Setup guard pages.
918 * (Note: TLBs will be shot down later on.)
919 */
920 uint32_t iPage = pSeg->iPage;
921 while (iPage < iEndPage)
922 {
923 for (uint32_t iGPg = 0; iGPg < PGMR0DYNMAP_GUARD_PAGES && iPage < iEndPage; iGPg++, iPage++)
924 pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
925 iPage++; /* the guarded page */
926 }
927
928 /* Make sure the very last page is a guard page too. */
929 iPage = iEndPage - 1;
930 if (pThis->paPages[iPage].cRefs != PGMR0DYNMAP_GUARD_PAGE_REF_COUNT)
931 pgmR0DynMapSetupGuardPage(pThis, &pThis->paPages[iPage]);
932#endif /* PGMR0DYNMAP_GUARD_PAGES > 0 */
933
934 /*
935 * Commit it by adding the segment to the list and updating the page count.
936 */
937 pSeg->pNext = pThis->pSegHead;
938 pThis->pSegHead = pSeg;
939 pThis->cPages += cPages;
940 return VINF_SUCCESS;
941 }
942
943 /*
944 * Bail out.
945 */
946 while (pSeg->cPTs-- > 0)
947 {
948 rc2 = RTR0MemObjFree(pSeg->ahMemObjPTs[pSeg->cPTs], true /* fFreeMappings */);
949 AssertRC(rc2);
950 pSeg->ahMemObjPTs[pSeg->cPTs] = NIL_RTR0MEMOBJ;
951 }
952
953 rc2 = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */);
954 AssertRC(rc2);
955 pSeg->hMemObj = NIL_RTR0MEMOBJ;
956 }
957 RTMemFree(pSeg);
958
959 /* Don't bother resizing the arrays, but free them if we're the only user. */
960 if (!pThis->cPages)
961 {
962 RTMemFree(pThis->paPages);
963 pThis->paPages = NULL;
964 RTMemFree(pThis->pvSavedPTEs);
965 pThis->pvSavedPTEs = NULL;
966 }
967 return rc;
968}
969
970
971/**
972 * Called by PGMR0DynMapInitVM under the init lock.
973 *
974 * @returns VBox status code.
975 * @param pThis The dynamic mapping cache instance.
976 */
977static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
978{
979 /*
980 * Calc the size and add a segment of that size.
981 */
982 uint32_t cMinPages;
983 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
984 AssertReturn(cPages, VERR_INTERNAL_ERROR);
985 int rc = pgmR0DynMapAddSeg(pThis, cPages);
986 if (rc == VERR_NO_MEMORY)
987 {
988 /*
989 * Try adding smaller segments.
990 */
991 do
992 rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
993 while (RT_SUCCESS(rc) && pThis->cPages < cPages);
994 if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
995 rc = VINF_SUCCESS;
996 if (rc == VERR_NO_MEMORY)
997 {
998 if (pThis->cPages)
999 pgmR0DynMapTearDown(pThis);
1000 rc = VERR_PGM_DYNMAP_SETUP_ERROR;
1001 }
1002 }
1003 Assert(ASMGetFlags() & X86_EFL_IF);
1004
1005#if PGMR0DYNMAP_GUARD_PAGES > 0
1006 /* paranoia */
1007 if (RT_SUCCESS(rc))
1008 pgmR0DynMapTlbShootDown(pThis);
1009#endif
1010 return rc;
1011}
1012
1013
1014/**
1015 * Called by PGMR0DynMapInitVM under the init lock.
1016 *
1017 * @returns VBox status code.
1018 * @param pThis The dynamic mapping cache instance.
1019 */
1020static int pgmR0DynMapExpand(PPGMR0DYNMAP pThis)
1021{
1022 /*
1023 * Calc the new target size and add a segment of the appropriate size.
1024 */
1025 uint32_t cMinPages;
1026 uint32_t cPages = pgmR0DynMapCalcNewSize(pThis, &cMinPages);
1027 AssertReturn(cPages, VERR_INTERNAL_ERROR);
1028 if (pThis->cPages >= cPages)
1029 return VINF_SUCCESS;
1030
1031 uint32_t cAdd = cPages - pThis->cPages;
1032 int rc = pgmR0DynMapAddSeg(pThis, cAdd);
1033 if (rc == VERR_NO_MEMORY)
1034 {
1035 /*
1036 * Try adding smaller segments.
1037 */
1038 do
1039 rc = pgmR0DynMapAddSeg(pThis, PGMR0DYNMAP_SMALL_SEG_PAGES);
1040 while (RT_SUCCESS(rc) && pThis->cPages < cPages);
1041 if (rc == VERR_NO_MEMORY && pThis->cPages >= cMinPages)
1042 rc = VINF_SUCCESS;
1043 if (rc == VERR_NO_MEMORY)
1044 rc = VERR_PGM_DYNMAP_EXPAND_ERROR;
1045 }
1046 Assert(ASMGetFlags() & X86_EFL_IF);
1047
1048#if PGMR0DYNMAP_GUARD_PAGES > 0
1049 /* paranoia */
1050 if (RT_SUCCESS(rc))
1051 pgmR0DynMapTlbShootDown(pThis);
1052#endif
1053 return rc;
1054}
1055
1056
1057/**
1058 * Called by PGMR0DynMapTermVM under the init lock.
1059 *
1060 * @returns VBox status code.
1061 * @param pThis The dynamic mapping cache instance.
1062 */
1063static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
1064{
1065 /*
1066 * Restore the original page table entries
1067 */
1068 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
1069 uint32_t iPage = pThis->cPages;
1070 if (pThis->fLegacyMode)
1071 {
1072 X86PGUINT const *paSavedPTEs = (X86PGUINT const *)pThis->pvSavedPTEs;
1073 while (iPage-- > 0)
1074 {
1075 X86PGUINT uOld = paPages[iPage].uPte.pLegacy->u;
1076 X86PGUINT uOld2 = uOld; NOREF(uOld2);
1077 X86PGUINT uNew = paSavedPTEs[iPage];
1078 while (!ASMAtomicCmpXchgExU32(&paPages[iPage].uPte.pLegacy->u, uNew, uOld, &uOld))
1079 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
1080 Assert(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage]);
1081 }
1082 }
1083 else
1084 {
1085 X86PGPAEUINT const *paSavedPTEs = (X86PGPAEUINT const *)pThis->pvSavedPTEs;
1086 while (iPage-- > 0)
1087 {
1088 X86PGPAEUINT uOld = paPages[iPage].uPte.pPae->u;
1089 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2);
1090 X86PGPAEUINT uNew = paSavedPTEs[iPage];
1091 while (!ASMAtomicCmpXchgExU64(&paPages[iPage].uPte.pPae->u, uNew, uOld, &uOld))
1092 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
1093 Assert(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage]);
1094 }
1095 }
1096
1097 /*
1098 * Shoot down the TLBs on all CPUs before freeing them.
1099 */
1100 pgmR0DynMapTlbShootDown(pThis);
1101
1102 /*
1103 * Free the segments.
1104 */
1105 while (pThis->pSegHead)
1106 {
1107 int rc;
1108 PPGMR0DYNMAPSEG pSeg = pThis->pSegHead;
1109 pThis->pSegHead = pSeg->pNext;
1110
1111 uint32_t iPT = pSeg->cPTs;
1112 while (iPT-- > 0)
1113 {
1114 rc = RTR0MemObjFree(pSeg->ahMemObjPTs[iPT], true /* fFreeMappings */); AssertRC(rc);
1115 pSeg->ahMemObjPTs[iPT] = NIL_RTR0MEMOBJ;
1116 }
1117 rc = RTR0MemObjFree(pSeg->hMemObj, true /* fFreeMappings */); AssertRC(rc);
1118 pSeg->hMemObj = NIL_RTR0MEMOBJ;
1119 pSeg->pNext = NULL;
1120 pSeg->iPage = UINT16_MAX;
1121 pSeg->cPages = 0;
1122 pSeg->cPTs = 0;
1123 RTMemFree(pSeg);
1124 }
1125
1126 /*
1127 * Free the arrays and restore the initial state.
1128 * The cLoadMax value is left behind for the next setup.
1129 */
1130 RTMemFree(pThis->paPages);
1131 pThis->paPages = NULL;
1132 RTMemFree(pThis->pvSavedPTEs);
1133 pThis->pvSavedPTEs = NULL;
1134 pThis->cPages = 0;
1135 pThis->cLoad = 0;
1136 pThis->cGuardPages = 0;
1137}
1138
1139
1140/**
1141 * Release references to a page, caller owns the spin lock.
1142 *
1143 * @param pThis The dynamic mapping cache instance.
1144 * @param iPage The page.
1145 * @param cRefs The number of references to release.
1146 */
1147DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
1148{
1149 cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs) - cRefs;
1150 AssertMsg(cRefs >= 0, ("%d\n", cRefs));
1151 if (!cRefs)
1152 pThis->cLoad--;
1153}
1154
1155
1156/**
1157 * Release references to a page, caller does not own the spin lock.
1158 *
1159 * @param pThis The dynamic mapping cache instance.
1160 * @param iPage The page.
1161 * @param cRefs The number of references to release.
1162 */
1163static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
1164{
1165 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1166 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1167 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
1168 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1169}
1170
1171
1172/**
1173 * pgmR0DynMapPage worker that deals with the tedious bits.
1174 *
1175 * @returns The page index on success, UINT32_MAX on failure.
1176 * @param pThis The dynamic mapping cache instance.
1177 * @param HCPhys The address of the page to be mapped.
1178 * @param iPage The page index pgmR0DynMapPage hashed HCPhys to.
1179 * @param pVM The shared VM structure, for statistics only.
1180 */
1181static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage, PVM pVM)
1182{
1183 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlow);
1184
1185 /*
1186 * Check if any of the first 3 pages are unreferenced since the caller
1187 * already has made sure they aren't matching.
1188 */
1189#ifdef VBOX_WITH_STATISTICS
1190 bool fLooped = false;
1191#endif
1192 uint32_t const cPages = pThis->cPages;
1193 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
1194 uint32_t iFreePage;
1195 if (!paPages[iPage].cRefs)
1196 iFreePage = iPage;
1197 else if (!paPages[(iPage + 1) % cPages].cRefs)
1198 iFreePage = (iPage + 1) % cPages;
1199 else if (!paPages[(iPage + 2) % cPages].cRefs)
1200 iFreePage = (iPage + 2) % cPages;
1201 else
1202 {
1203 /*
1204 * Search for an unused or matching entry.
1205 */
1206 iFreePage = (iPage + 3) % cPages;
1207 for (;;)
1208 {
1209 if (paPages[iFreePage].HCPhys == HCPhys)
1210 {
1211 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLoopHits);
1212 return iFreePage;
1213 }
1214 if (!paPages[iFreePage].cRefs)
1215 break;
1216
1217 /* advance */
1218 iFreePage = (iFreePage + 1) % cPages;
1219 if (RT_UNLIKELY(iFreePage == iPage))
1220 return UINT32_MAX;
1221 }
1222 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLoopMisses);
1223#ifdef VBOX_WITH_STATISTICS
1224 fLooped = true;
1225#endif
1226 }
1227 Assert(iFreePage < cPages);
1228
1229#if 0 //def VBOX_WITH_STATISTICS
1230 /* Check for lost hits. */
1231 if (!fLooped)
1232 for (uint32_t iPage2 = (iPage + 3) % cPages; iPage2 != iPage; iPage2 = (iPage2 + 1) % cPages)
1233 if (paPages[iPage2].HCPhys == HCPhys)
1234 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageSlowLostHits);
1235#endif
1236
1237 /*
1238 * Setup the new entry.
1239 */
1240 /*Log6(("pgmR0DynMapPageSlow: old - %RHp %#x %#llx\n", paPages[iFreePage].HCPhys, paPages[iFreePage].cRefs, paPages[iFreePage].uPte.pPae->u));*/
1241 paPages[iFreePage].HCPhys = HCPhys;
1242 RTCpuSetFill(&paPages[iFreePage].PendingSet);
1243 if (pThis->fLegacyMode)
1244 {
1245 X86PGUINT uOld = paPages[iFreePage].uPte.pLegacy->u;
1246 X86PGUINT uOld2 = uOld; NOREF(uOld2);
1247 X86PGUINT uNew = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1248 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
1249 | (HCPhys & X86_PTE_PG_MASK);
1250 while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld))
1251 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
1252 Assert(paPages[iFreePage].uPte.pLegacy->u == uNew);
1253 }
1254 else
1255 {
1256 X86PGPAEUINT uOld = paPages[iFreePage].uPte.pPae->u;
1257 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2);
1258 X86PGPAEUINT uNew = (uOld & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1259 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
1260 | (HCPhys & X86_PTE_PAE_PG_MASK);
1261 while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld))
1262 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
1263 Assert(paPages[iFreePage].uPte.pPae->u == uNew);
1264 /*Log6(("pgmR0DynMapPageSlow: #%x - %RHp %p %#llx\n", iFreePage, HCPhys, paPages[iFreePage].pvPage, uNew));*/
1265 }
1266 return iFreePage;
1267}
1268
1269
1270/**
1271 * Maps a page into the pool.
1272 *
1273 * @returns Page index on success, UINT32_MAX on failure.
1274 * @param pThis The dynamic mapping cache instance.
1275 * @param HCPhys The address of the page to be mapped.
1276 * @param iRealCpu The real cpu set index. (optimization)
1277 * @param pVM The shared VM structure, for statistics only.
1278 * @param ppvPage Where to the page address.
1279 */
1280DECLINLINE(uint32_t) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, int32_t iRealCpu, PVM pVM, void **ppvPage)
1281{
1282 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1283 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1284 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1285 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPage);
1286
1287 /*
1288 * Find an entry, if possible a matching one. The HCPhys address is hashed
1289 * down to a page index, collisions are handled by linear searching.
1290 * Optimized for a hit in the first 3 pages.
1291 *
1292 * To the cheap hits here and defer the tedious searching and inserting
1293 * to a helper function.
1294 */
1295 uint32_t const cPages = pThis->cPages;
1296 uint32_t iPage = (HCPhys >> PAGE_SHIFT) % cPages;
1297 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
1298 if (RT_LIKELY(paPages[iPage].HCPhys == HCPhys))
1299 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits0);
1300 else
1301 {
1302 uint32_t iPage2 = (iPage + 1) % cPages;
1303 if (RT_LIKELY(paPages[iPage2].HCPhys == HCPhys))
1304 {
1305 iPage = iPage2;
1306 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits1);
1307 }
1308 else
1309 {
1310 iPage2 = (iPage + 2) % cPages;
1311 if (paPages[iPage2].HCPhys == HCPhys)
1312 {
1313 iPage = iPage2;
1314 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageHits2);
1315 }
1316 else
1317 {
1318 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage, pVM);
1319 if (RT_UNLIKELY(iPage == UINT32_MAX))
1320 {
1321 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1322 return iPage;
1323 }
1324 }
1325 }
1326 }
1327
1328 /*
1329 * Reference it, update statistics and get the return address.
1330 */
1331 int32_t cRefs = ASMAtomicIncS32(&paPages[iPage].cRefs);
1332 if (cRefs == 1)
1333 {
1334 pThis->cLoad++;
1335 if (pThis->cLoad > pThis->cMaxLoad)
1336 pThis->cMaxLoad = pThis->cLoad;
1337 AssertMsg(pThis->cLoad <= pThis->cPages - pThis->cGuardPages, ("%d/%d\n", pThis->cLoad, pThis->cPages - pThis->cGuardPages));
1338 }
1339 else if (RT_UNLIKELY(cRefs <= 0))
1340 {
1341 ASMAtomicDecS32(&paPages[iPage].cRefs);
1342 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1343 AssertLogRelMsgFailedReturn(("cRefs=%d iPage=%p HCPhys=%RHp\n", cRefs, iPage, HCPhys), UINT32_MAX);
1344 }
1345 void *pvPage = paPages[iPage].pvPage;
1346
1347 /*
1348 * Invalidate the entry?
1349 */
1350 bool fInvalidateIt = RTCpuSetIsMemberByIndex(&paPages[iPage].PendingSet, iRealCpu);
1351 if (RT_UNLIKELY(fInvalidateIt))
1352 RTCpuSetDelByIndex(&paPages[iPage].PendingSet, iRealCpu);
1353
1354 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1355
1356 /*
1357 * Do the actual invalidation outside the spinlock.
1358 */
1359 if (RT_UNLIKELY(fInvalidateIt))
1360 {
1361 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapPageInvlPg);
1362 ASMInvalidatePage(pvPage);
1363 }
1364
1365 *ppvPage = pvPage;
1366 return iPage;
1367}
1368
1369
1370/**
1371 * Assert the the integrity of the pool.
1372 *
1373 * @returns VBox status code.
1374 */
1375VMMR0DECL(int) PGMR0DynMapAssertIntegrity(void)
1376{
1377 /*
1378 * Basic pool stuff that doesn't require any lock, just assumes we're a user.
1379 */
1380 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1381 if (!pThis)
1382 return VINF_SUCCESS;
1383 AssertPtrReturn(pThis, VERR_INVALID_POINTER);
1384 AssertReturn(pThis->u32Magic == PGMR0DYNMAP_MAGIC, VERR_INVALID_MAGIC);
1385 if (!pThis->cUsers)
1386 return VERR_INVALID_PARAMETER;
1387
1388
1389 int rc = VINF_SUCCESS;
1390 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1391 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1392
1393#define CHECK_RET(expr, a) \
1394 do { \
1395 if (RT_UNLIKELY(!(expr))) \
1396 { \
1397 RTSpinlockRelease(pThis->hSpinlock, &Tmp); \
1398 AssertMsg1(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
1399 AssertMsg2 a; \
1400 return VERR_INTERNAL_ERROR; \
1401 } \
1402 } while (0)
1403
1404 /*
1405 * Check that the PTEs are correct.
1406 */
1407 uint32_t cGuard = 0;
1408 uint32_t cLoad = 0;
1409 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
1410 uint32_t iPage = pThis->cPages;
1411 if (pThis->fLegacyMode)
1412 {
1413 PCX86PGUINT paSavedPTEs = (PCX86PGUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
1414 while (iPage-- > 0)
1415 {
1416 CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
1417 if ( paPages[iPage].cRefs == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
1418 && paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
1419 {
1420#ifdef PGMR0DYNMAP_GUARD_NP
1421 CHECK_RET(paPages[iPage].uPte.pLegacy->u == (paSavedPTEs[iPage] & ~(X86PGUINT)X86_PTE_P),
1422 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
1423#else
1424 CHECK_RET(paPages[iPage].uPte.pLegacy->u == PGMR0DYNMAP_GUARD_PAGE_LEGACY_PTE,
1425 ("#%u: %#x", iPage, paPages[iPage].uPte.pLegacy->u));
1426#endif
1427 cGuard++;
1428 }
1429 else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
1430 {
1431 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
1432 X86PGUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1433 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
1434 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
1435 CHECK_RET(paPages[iPage].uPte.pLegacy->u == uPte,
1436 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
1437 if (paPages[iPage].cRefs)
1438 cLoad++;
1439 }
1440 else
1441 CHECK_RET(paPages[iPage].uPte.pLegacy->u == paSavedPTEs[iPage],
1442 ("#%u: %#x %#x", iPage, paPages[iPage].uPte.pLegacy->u, paSavedPTEs[iPage]));
1443 }
1444 }
1445 else
1446 {
1447 PCX86PGPAEUINT paSavedPTEs = (PCX86PGPAEUINT)pThis->pvSavedPTEs; NOREF(paSavedPTEs);
1448 while (iPage-- > 0)
1449 {
1450 CHECK_RET(!((uintptr_t)paPages[iPage].pvPage & PAGE_OFFSET_MASK), ("#%u: %p\n", iPage, paPages[iPage].pvPage));
1451 if ( paPages[iPage].cRefs == PGMR0DYNMAP_GUARD_PAGE_REF_COUNT
1452 && paPages[iPage].HCPhys == PGMR0DYNMAP_GUARD_PAGE_HCPHYS)
1453 {
1454#ifdef PGMR0DYNMAP_GUARD_NP
1455 CHECK_RET(paPages[iPage].uPte.pPae->u == (paSavedPTEs[iPage] & ~(X86PGPAEUINT)X86_PTE_P),
1456 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
1457#else
1458 CHECK_RET(paPages[iPage].uPte.pPae->u == PGMR0DYNMAP_GUARD_PAGE_PAE_PTE,
1459 ("#%u: %#llx", iPage, paPages[iPage].uPte.pPae->u));
1460#endif
1461 cGuard++;
1462 }
1463 else if (paPages[iPage].HCPhys != NIL_RTHCPHYS)
1464 {
1465 CHECK_RET(!(paPages[iPage].HCPhys & PAGE_OFFSET_MASK), ("#%u: %RHp\n", iPage, paPages[iPage].HCPhys));
1466 X86PGPAEUINT uPte = (paSavedPTEs[iPage] & (X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT))
1467 | X86_PTE_P | X86_PTE_RW | X86_PTE_A | X86_PTE_D
1468 | (paPages[iPage].HCPhys & X86_PTE_PAE_PG_MASK);
1469 CHECK_RET(paPages[iPage].uPte.pPae->u == uPte,
1470 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pLegacy->u, uPte));
1471 if (paPages[iPage].cRefs)
1472 cLoad++;
1473 }
1474 else
1475 CHECK_RET(paPages[iPage].uPte.pPae->u == paSavedPTEs[iPage],
1476 ("#%u: %#llx %#llx", iPage, paPages[iPage].uPte.pPae->u, paSavedPTEs[iPage]));
1477 }
1478 }
1479
1480 CHECK_RET(cLoad == pThis->cLoad, ("%u %u\n", cLoad, pThis->cLoad));
1481 CHECK_RET(cGuard == pThis->cGuardPages, ("%u %u\n", cGuard, pThis->cGuardPages));
1482
1483#undef CHECK_RET
1484 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/**
1490 * Signals the start of a new set of mappings.
1491 *
1492 * Mostly for strictness. PGMDynMapHCPage won't work unless this
1493 * API is called.
1494 *
1495 * @param pVCpu The shared data for the current virtual CPU.
1496 */
1497VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
1498{
1499 Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
1500 pVCpu->pgm.s.AutoSet.cEntries = 0;
1501 pVCpu->pgm.s.AutoSet.iCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
1502}
1503
1504
1505/**
1506 * Worker that performs the actual flushing of the set.
1507 *
1508 * @param pSet The set to flush.
1509 * @param cEntries The number of entries.
1510 */
1511DECLINLINE(void) pgmDynMapFlushAutoSetWorker(PPGMMAPSET pSet, uint32_t cEntries)
1512{
1513 /*
1514 * Release any pages it's referencing.
1515 */
1516 if ( cEntries != 0
1517 && RT_LIKELY(cEntries <= RT_ELEMENTS(pSet->aEntries)))
1518 {
1519 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1520 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1521 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1522
1523 uint32_t i = cEntries;
1524 while (i-- > 0)
1525 {
1526 uint32_t iPage = pSet->aEntries[i].iPage;
1527 Assert(iPage < pThis->cPages);
1528 int32_t cRefs = pSet->aEntries[i].cRefs;
1529 Assert(cRefs > 0);
1530 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
1531
1532 pSet->aEntries[i].iPage = UINT16_MAX;
1533 pSet->aEntries[i].cRefs = 0;
1534 }
1535
1536 Assert(pThis->cLoad <= pThis->cPages - pThis->cGuardPages);
1537 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1538 }
1539}
1540
1541
1542/**
1543 * Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
1544 * since the PGMDynMapStartAutoSet call.
1545 *
1546 * @param pVCpu The shared data for the current virtual CPU.
1547 */
1548VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
1549{
1550 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1551
1552 /*
1553 * Close and flush the set.
1554 */
1555 uint32_t cEntries = pSet->cEntries;
1556 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
1557 AssertMsg(cEntries <= RT_ELEMENTS(pSet->aEntries), ("%#x (%u)\n", cEntries, cEntries));
1558 pSet->cEntries = PGMMAPSET_CLOSED;
1559 pSet->iCpu = -1;
1560
1561 pgmDynMapFlushAutoSetWorker(pSet, cEntries);
1562}
1563
1564
1565/**
1566 * Flushes the set if it's above a certain threshold.
1567 *
1568 * @param pVCpu The shared data for the current virtual CPU.
1569 */
1570VMMDECL(void) PGMDynMapFlushAutoSet(PVMCPU pVCpu)
1571{
1572 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1573
1574 /*
1575 * Only flush it if it's 50% full.
1576 */
1577 uint32_t cEntries = pSet->cEntries;
1578 AssertReturnVoid(cEntries != PGMMAPSET_CLOSED);
1579 if (cEntries >= RT_ELEMENTS(pSet->aEntries) / 2)
1580 {
1581 AssertMsg(cEntries <= RT_ELEMENTS(pSet->aEntries), ("%#x (%u)\n", cEntries, cEntries));
1582 pSet->cEntries = 0;
1583
1584 pgmDynMapFlushAutoSetWorker(pSet, cEntries);
1585 }
1586 Assert(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()));
1587}
1588
1589
1590
1591/**
1592 * Migrates the automatic mapping set of the current vCPU if it's active and
1593 * necessary.
1594 *
1595 * This is called when re-entering the hardware assisted execution mode after a
1596 * nip down to ring-3. We run the risk that the CPU might have change and we
1597 * will therefore make sure all the cache entries currently in the auto set will
1598 * be valid on the new CPU. If the cpu didn't change nothing will happen as all
1599 * the entries will have been flagged as invalidated.
1600 *
1601 * @param pVCpu The shared data for the current virtual CPU.
1602 * @thread EMT
1603 */
1604VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
1605{
1606 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1607 uint32_t i = pSet->cEntries;
1608 if (i != PGMMAPSET_CLOSED)
1609 {
1610 AssertMsg(i <= RT_ELEMENTS(pSet->aEntries), ("%#x (%u)\n", i, i));
1611 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pSet->aEntries)))
1612 {
1613 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1614 int32_t iRealCpu = RTMpCpuIdToSetIndex(RTMpCpuId());
1615 if (pSet->iCpu != iRealCpu)
1616 {
1617 while (i-- > 0)
1618 {
1619 Assert(pSet->aEntries[i].cRefs > 0);
1620 uint32_t iPage = pSet->aEntries[i].iPage;
1621 Assert(iPage < pThis->cPages);
1622 if (RTCpuSetIsMemberByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu))
1623 {
1624 RTCpuSetDelByIndex(&pThis->paPages[iPage].PendingSet, iRealCpu);
1625 ASMInvalidatePage(pThis->paPages[iPage].pvPage);
1626 STAM_COUNTER_INC(&pVCpu->pVMR0->pgm.s.StatR0DynMapMigrateInvlPg);
1627 }
1628 }
1629
1630 pSet->iCpu = iRealCpu;
1631 }
1632 }
1633 }
1634}
1635
1636
1637/**
1638 * As a final resort for a full auto set, try merge duplicate entries.
1639 *
1640 * @param pSet The set.
1641 */
1642static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
1643{
1644 for (uint32_t i = 0 ; i < pSet->cEntries; i++)
1645 {
1646 uint16_t const iPage = pSet->aEntries[i].iPage;
1647 uint32_t j = i + 1;
1648 while (j < pSet->cEntries)
1649 {
1650 if (pSet->aEntries[j].iPage != iPage)
1651 j++;
1652 else if ((uint32_t)pSet->aEntries[i].cRefs + (uint32_t)pSet->aEntries[j].cRefs < UINT16_MAX)
1653 {
1654 /* merge j into i removing j. */
1655 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
1656 pSet->cEntries--;
1657 if (j < pSet->cEntries)
1658 {
1659 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
1660 pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
1661 pSet->aEntries[pSet->cEntries].cRefs = 0;
1662 }
1663 else
1664 {
1665 pSet->aEntries[j].iPage = UINT16_MAX;
1666 pSet->aEntries[j].cRefs = 0;
1667 }
1668 }
1669 else
1670 {
1671 /* migrate the max number of refs from j into i and quit the inner loop. */
1672 uint32_t cMigrate = UINT16_MAX - 1 - pSet->aEntries[i].cRefs;
1673 Assert(pSet->aEntries[j].cRefs > cMigrate);
1674 pSet->aEntries[j].cRefs -= cMigrate;
1675 pSet->aEntries[i].cRefs = UINT16_MAX - 1;
1676 break;
1677 }
1678 }
1679 }
1680}
1681
1682
1683/**
1684 * Common worker code for PGMDynMapHCPhys, pgmR0DynMapHCPageInlined and
1685 * pgmR0DynMapGCPageInlined.
1686 *
1687 * @returns VBox status code.
1688 * @param pVM The shared VM structure (for statistics).
1689 * @param pSet The set.
1690 * @param HCPhys The physical address of the page.
1691 * @param ppv Where to store the address of the mapping on success.
1692 *
1693 * @remarks This is a very hot path.
1694 */
1695int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv)
1696{
1697 Assert(pSet->iCpu == RTMpCpuIdToSetIndex(RTMpCpuId()));
1698
1699 /*
1700 * Map it.
1701 */
1702 void *pvPage;
1703 uint32_t const iPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, pSet->iCpu, pVM, &pvPage);
1704 if (RT_UNLIKELY(iPage == UINT32_MAX))
1705 {
1706 static uint32_t s_cBitched = 0;
1707 if (++s_cBitched < 10)
1708 LogRel(("PGMDynMapHCPage: cLoad=%u/%u cPages=%u cGuardPages=%u\n",
1709 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages, g_pPGMR0DynMap->cGuardPages));
1710 *ppv = NULL;
1711 return VERR_PGM_DYNMAP_FAILED;
1712 }
1713
1714 /*
1715 * Add the page to the auto reference set.
1716 *
1717 * The typical usage pattern means that the same pages will be mapped
1718 * several times in the same set. We can catch most of these
1719 * remappings by looking a few pages back into the set. (The searching
1720 * and set optimizing path will hardly ever be used when doing this.)
1721 */
1722 AssertCompile(RT_ELEMENTS(pSet->aEntries) >= 8);
1723 int32_t i = pSet->cEntries;
1724 if (i-- < 5)
1725 {
1726 unsigned iEntry = pSet->cEntries++;
1727 pSet->aEntries[iEntry].cRefs = 1;
1728 pSet->aEntries[iEntry].iPage = iPage;
1729 pSet->aEntries[iEntry].pvPage = pvPage;
1730 pSet->aEntries[iEntry].HCPhys = HCPhys;
1731 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
1732 }
1733 /* Any of the last 5 pages? */
1734 else if ( pSet->aEntries[i - 0].iPage == iPage
1735 && pSet->aEntries[i - 0].cRefs < UINT16_MAX - 1)
1736 pSet->aEntries[i - 0].cRefs++;
1737 else if ( pSet->aEntries[i - 1].iPage == iPage
1738 && pSet->aEntries[i - 1].cRefs < UINT16_MAX - 1)
1739 pSet->aEntries[i - 1].cRefs++;
1740 else if ( pSet->aEntries[i - 2].iPage == iPage
1741 && pSet->aEntries[i - 2].cRefs < UINT16_MAX - 1)
1742 pSet->aEntries[i - 2].cRefs++;
1743 else if ( pSet->aEntries[i - 3].iPage == iPage
1744 && pSet->aEntries[i - 3].cRefs < UINT16_MAX - 1)
1745 pSet->aEntries[i - 3].cRefs++;
1746 else if ( pSet->aEntries[i - 4].iPage == iPage
1747 && pSet->aEntries[i - 4].cRefs < UINT16_MAX - 1)
1748 pSet->aEntries[i - 4].cRefs++;
1749 /* Don't bother searching unless we're above a 75% load. */
1750 else if (RT_LIKELY(i <= (int32_t)RT_ELEMENTS(pSet->aEntries) / 4 * 3))
1751 {
1752 unsigned iEntry = pSet->cEntries++;
1753 pSet->aEntries[iEntry].cRefs = 1;
1754 pSet->aEntries[iEntry].iPage = iPage;
1755 pSet->aEntries[iEntry].pvPage = pvPage;
1756 pSet->aEntries[iEntry].HCPhys = HCPhys;
1757 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
1758 }
1759 else
1760 {
1761 /* Search the rest of the set. */
1762 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
1763 i -= 4;
1764 while (i-- > 0)
1765 if ( pSet->aEntries[i].iPage == iPage
1766 && pSet->aEntries[i].cRefs < UINT16_MAX - 1)
1767 {
1768 pSet->aEntries[i].cRefs++;
1769 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetSearchHits);
1770 break;
1771 }
1772 if (i < 0)
1773 {
1774 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetSearchMisses);
1775 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
1776 {
1777 STAM_COUNTER_INC(&pVM->pgm.s.StatR0DynMapSetOptimize);
1778 pgmDynMapOptimizeAutoSet(pSet);
1779 }
1780 if (RT_LIKELY(pSet->cEntries < RT_ELEMENTS(pSet->aEntries)))
1781 {
1782 unsigned iEntry = pSet->cEntries++;
1783 pSet->aEntries[iEntry].cRefs = 1;
1784 pSet->aEntries[iEntry].iPage = iPage;
1785 pSet->aEntries[iEntry].pvPage = pvPage;
1786 pSet->aEntries[iEntry].HCPhys = HCPhys;
1787 pSet->aiHashTable[PGMMAPSET_HASH(HCPhys)] = iEntry;
1788 }
1789 else
1790 {
1791 /* We're screwed. */
1792 pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
1793
1794 static uint32_t s_cBitched = 0;
1795 if (++s_cBitched < 10)
1796 LogRel(("PGMDynMapHCPage: set is full!\n"));
1797 *ppv = NULL;
1798 return VERR_PGM_DYNMAP_FULL_SET;
1799 }
1800 }
1801 }
1802
1803 *ppv = pvPage;
1804 return VINF_SUCCESS;
1805}
1806
1807
1808/* documented elsewhere - a bit of a mess. */
1809VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1810{
1811 /*
1812 * Validate state.
1813 */
1814 STAM_PROFILE_START(&pVM->pgm.s.StatR0DynMapHCPage, a);
1815 AssertPtr(ppv);
1816 AssertMsgReturn(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
1817 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap),
1818 VERR_ACCESS_DENIED);
1819 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1820 PVMCPU pVCpu = VMMGetCpu(pVM);
1821 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
1822 AssertPtrReturn(pVCpu, VERR_INTERNAL_ERROR);
1823 AssertMsgReturn(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries),
1824 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries), VERR_WRONG_ORDER);
1825
1826 /*
1827 * Call common code.
1828 */
1829 int rc = pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
1830
1831 STAM_PROFILE_STOP(&pVM->pgm.s.StatR0DynMapHCPage, a);
1832 return rc;
1833}
1834
1835
1836#ifdef DEBUG
1837/** For pgmR0DynMapTest3PerCpu. */
1838typedef struct PGMR0DYNMAPTEST
1839{
1840 uint32_t u32Expect;
1841 uint32_t *pu32;
1842 uint32_t volatile cFailures;
1843} PGMR0DYNMAPTEST;
1844typedef PGMR0DYNMAPTEST *PPGMR0DYNMAPTEST;
1845
1846/**
1847 * Checks that the content of the page is the same on all CPUs, i.e. that there
1848 * are no CPU specfic PTs or similar nasty stuff involved.
1849 *
1850 * @param idCpu The current CPU.
1851 * @param pvUser1 Pointer a PGMR0DYNMAPTEST structure.
1852 * @param pvUser2 Unused, ignored.
1853 */
1854static DECLCALLBACK(void) pgmR0DynMapTest3PerCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
1855{
1856 PPGMR0DYNMAPTEST pTest = (PPGMR0DYNMAPTEST)pvUser1;
1857 ASMInvalidatePage(pTest->pu32);
1858 if (*pTest->pu32 != pTest->u32Expect)
1859 ASMAtomicIncU32(&pTest->cFailures);
1860 NOREF(pvUser2); NOREF(idCpu);
1861}
1862
1863
1864/**
1865 * Performs some basic tests in debug builds.
1866 */
1867static int pgmR0DynMapTest(PVM pVM)
1868{
1869 LogRel(("pgmR0DynMapTest: ****** START ******\n"));
1870 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
1871 PPGMMAPSET pSet = &pVM->aCpus[0].pgm.s.AutoSet;
1872 uint32_t i;
1873
1874 /*
1875 * Assert internal integrity first.
1876 */
1877 LogRel(("Test #0\n"));
1878 int rc = PGMR0DynMapAssertIntegrity();
1879 if (RT_FAILURE(rc))
1880 return rc;
1881
1882 void *pvR0DynMapUsedSaved = pVM->pgm.s.pvR0DynMapUsed;
1883 pVM->pgm.s.pvR0DynMapUsed = pThis;
1884
1885 /*
1886 * Simple test, map CR3 twice and check that we're getting the
1887 * same mapping address back.
1888 */
1889 LogRel(("Test #1\n"));
1890 ASMIntDisable();
1891 PGMDynMapStartAutoSet(&pVM->aCpus[0]);
1892
1893 uint64_t cr3 = ASMGetCR3() & ~(uint64_t)PAGE_OFFSET_MASK;
1894 void *pv = (void *)(intptr_t)-1;
1895 void *pv2 = (void *)(intptr_t)-2;
1896 rc = PGMDynMapHCPage(pVM, cr3, &pv);
1897 int rc2 = PGMDynMapHCPage(pVM, cr3, &pv2);
1898 ASMIntEnable();
1899 if ( RT_SUCCESS(rc2)
1900 && RT_SUCCESS(rc)
1901 && pv == pv2)
1902 {
1903 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
1904 rc = PGMR0DynMapAssertIntegrity();
1905
1906 /*
1907 * Check that the simple set overflow code works by filling it
1908 * with more CR3 mappings.
1909 */
1910 LogRel(("Test #2\n"));
1911 ASMIntDisable();
1912 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
1913 for (i = 0 ; i < UINT16_MAX*2 - 1 && RT_SUCCESS(rc) && pv2 == pv; i++)
1914 {
1915 pv2 = (void *)(intptr_t)-4;
1916 rc = PGMDynMapHCPage(pVM, cr3, &pv2);
1917 }
1918 ASMIntEnable();
1919 if (RT_FAILURE(rc) || pv != pv2)
1920 {
1921 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%p\n", __LINE__, rc, pv, pv2, i));
1922 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
1923 }
1924 else if (pSet->cEntries != 5)
1925 {
1926 LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries) / 2));
1927 rc = VERR_INTERNAL_ERROR;
1928 }
1929 else if ( pSet->aEntries[4].cRefs != UINT16_MAX - 1
1930 || pSet->aEntries[3].cRefs != UINT16_MAX - 1
1931 || pSet->aEntries[2].cRefs != 1
1932 || pSet->aEntries[1].cRefs != 1
1933 || pSet->aEntries[0].cRefs != 1)
1934 {
1935 LogRel(("failed(%d): bad set dist: ", __LINE__));
1936 for (i = 0; i < pSet->cEntries; i++)
1937 LogRel(("[%d]=%d, ", i, pSet->aEntries[i].cRefs));
1938 LogRel(("\n"));
1939 rc = VERR_INTERNAL_ERROR;
1940 }
1941 if (RT_SUCCESS(rc))
1942 rc = PGMR0DynMapAssertIntegrity();
1943 if (RT_SUCCESS(rc))
1944 {
1945 /*
1946 * Trigger an set optimization run (exactly).
1947 */
1948 LogRel(("Test #3\n"));
1949 ASMIntDisable();
1950 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
1951 pv2 = NULL;
1952 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) - 5 && RT_SUCCESS(rc) && pv2 != pv; i++)
1953 {
1954 pv2 = (void *)(intptr_t)(-5 - i);
1955 rc = PGMDynMapHCPage(pVM, cr3 + PAGE_SIZE * (i + 5), &pv2);
1956 }
1957 ASMIntEnable();
1958 if (RT_FAILURE(rc) || pv == pv2)
1959 {
1960 LogRel(("failed(%d): rc=%Rrc; pv=%p pv2=%p i=%d\n", __LINE__, rc, pv, pv2, i));
1961 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
1962 }
1963 else if (pSet->cEntries != RT_ELEMENTS(pSet->aEntries))
1964 {
1965 LogRel(("failed(%d): cEntries=%d expected %d\n", __LINE__, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
1966 rc = VERR_INTERNAL_ERROR;
1967 }
1968 LogRel(("Load=%u/%u/%u Set=%u/%u\n", pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
1969 if (RT_SUCCESS(rc))
1970 rc = PGMR0DynMapAssertIntegrity();
1971 if (RT_SUCCESS(rc))
1972 {
1973 /*
1974 * Trigger an overflow error.
1975 */
1976 LogRel(("Test #4\n"));
1977 ASMIntDisable();
1978 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
1979 for (i = 0 ; i < RT_ELEMENTS(pSet->aEntries) + 2; i++)
1980 {
1981 rc = PGMDynMapHCPage(pVM, cr3 - PAGE_SIZE * (i + 5), &pv2);
1982 if (RT_SUCCESS(rc))
1983 rc = PGMR0DynMapAssertIntegrity();
1984 if (RT_FAILURE(rc))
1985 break;
1986 }
1987 ASMIntEnable();
1988 if (rc == VERR_PGM_DYNMAP_FULL_SET)
1989 {
1990 /* flush the set. */
1991 LogRel(("Test #5\n"));
1992 ASMIntDisable();
1993 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
1994 PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
1995 PGMDynMapStartAutoSet(&pVM->aCpus[0]);
1996 ASMIntEnable();
1997
1998 rc = PGMR0DynMapAssertIntegrity();
1999 }
2000 else
2001 {
2002 LogRel(("failed(%d): rc=%Rrc, wanted %d ; pv2=%p Set=%u/%u; i=%d\n", __LINE__,
2003 rc, VERR_PGM_DYNMAP_FULL_SET, pv2, pSet->cEntries, RT_ELEMENTS(pSet->aEntries), i));
2004 if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR;
2005 }
2006 }
2007 }
2008 }
2009 else
2010 {
2011 LogRel(("failed(%d): rc=%Rrc rc2=%Rrc; pv=%p pv2=%p\n", __LINE__, rc, rc2, pv, pv2));
2012 if (RT_SUCCESS(rc))
2013 rc = rc2;
2014 }
2015
2016 /*
2017 * Check that everyone sees the same stuff.
2018 */
2019 if (RT_SUCCESS(rc))
2020 {
2021 LogRel(("Test #5\n"));
2022 ASMIntDisable();
2023 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
2024 RTHCPHYS HCPhysPT = RTR0MemObjGetPagePhysAddr(pThis->pSegHead->ahMemObjPTs[0], 0);
2025 rc = PGMDynMapHCPage(pVM, HCPhysPT, &pv);
2026 if (RT_SUCCESS(rc))
2027 {
2028 PGMR0DYNMAPTEST Test;
2029 uint32_t *pu32Real = &pThis->paPages[pThis->pSegHead->iPage].uPte.pLegacy->u;
2030 Test.pu32 = (uint32_t *)((uintptr_t)pv | ((uintptr_t)pu32Real & PAGE_OFFSET_MASK));
2031 Test.u32Expect = *pu32Real;
2032 ASMAtomicWriteU32(&Test.cFailures, 0);
2033 ASMIntEnable();
2034
2035 rc = RTMpOnAll(pgmR0DynMapTest3PerCpu, &Test, NULL);
2036 if (RT_FAILURE(rc))
2037 LogRel(("failed(%d): RTMpOnAll rc=%Rrc\n", __LINE__, rc));
2038 else if (Test.cFailures)
2039 {
2040 LogRel(("failed(%d): cFailures=%d pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n", __LINE__,
2041 Test.cFailures, pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
2042 rc = VERR_INTERNAL_ERROR;
2043 }
2044 else
2045 LogRel(("pu32Real=%p pu32=%p u32Expect=%#x *pu32=%#x\n",
2046 pu32Real, Test.pu32, Test.u32Expect, *Test.pu32));
2047 }
2048 else
2049 {
2050 ASMIntEnable();
2051 LogRel(("failed(%d): rc=%Rrc\n", rc));
2052 }
2053 }
2054
2055 /*
2056 * Clean up.
2057 */
2058 LogRel(("Cleanup.\n"));
2059 ASMIntDisable();
2060 PGMDynMapMigrateAutoSet(&pVM->aCpus[0]);
2061 PGMDynMapFlushAutoSet(&pVM->aCpus[0]);
2062 PGMDynMapReleaseAutoSet(&pVM->aCpus[0]);
2063 ASMIntEnable();
2064
2065 if (RT_SUCCESS(rc))
2066 rc = PGMR0DynMapAssertIntegrity();
2067 else
2068 PGMR0DynMapAssertIntegrity();
2069
2070 LogRel(("Result: rc=%Rrc Load=%u/%u/%u Set=%#x/%u\n", rc,
2071 pThis->cLoad, pThis->cMaxLoad, pThis->cPages - pThis->cPages, pSet->cEntries, RT_ELEMENTS(pSet->aEntries)));
2072 pVM->pgm.s.pvR0DynMapUsed = pvR0DynMapUsedSaved;
2073 LogRel(("pgmR0DynMapTest: ****** END ******\n"));
2074 return rc;
2075}
2076#endif /* DEBUG */
2077
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette