VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0DynMap.cpp@ 14376

Last change on this file since 14376 was 14376, checked in by vboxsync, 16 years ago

#1865: ring-0 mapping cache, code in progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.9 KB
Line 
1/* $Id: PGMR0DynMap.cpp 14376 2008-11-19 19:17:18Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, ring-0 dynamic mapping cache.
4 */
5
6/*
7 * Copyright (C) 2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Internal Functions *
24*******************************************************************************/
25#include <VBox/pgm.h>
26#include "../PGMInternal.h"
27#include <VBox/vm.h>
28#include <VBox/sup.h>
29#include <VBox/err.h>
30#include <iprt/asm.h>
31#include <iprt/alloc.h>
32#include <iprt/assert.h>
33#include <iprt/cpuset.h>
34#include <iprt/spinlock.h>
35#include <iprt/semaphore.h>
36
37
38/*******************************************************************************
39* Structures and Typedefs *
40*******************************************************************************/
41/**
42 * Ring-0 dynamic mapping cache segment.
43 *
44 * The dynamic mapping cache can be extended with additional segments if the
45 * load is found to be too high. This done the next time a VM is created, under
46 * the protection of the init mutex. The arrays is reallocated and the new
47 * segment is added to the end of these. Nothing is rehashed of course, as the
48 * indexes / addresses must remain unchanged.
49 *
50 * This structure is only modified while owning the init mutex or during module
51 * init / term.
52 */
53typedef struct PGMR0DYNMAPSEG
54{
55 /** Pointer to the next segment. */
56 struct PGMR0DYNMAPSEG *pNext;
57 /** The memory object for the virtual address range that we're abusing. */
58 RTR0MEMOBJ hMemObj;
59 /** The memory object for the page tables. */
60 RTR0MEMOBJ hMemObjPT;
61 /** The start page in the cache. (I.e. index into the arrays.) */
62 uint32_t iPage;
63 /** The number of pages this segment contributes. */
64 uint32_t cPages;
65} PGMR0DYNMAPSEG;
66/** Pointer to a ring-0 dynamic mapping cache segment. */
67typedef PGMR0DYNMAPSEG *PPGMR0DYNMAPSEG;
68
69
70/**
71 * Ring-0 dynamic mapping cache entry.
72 *
73 * This structure tracks
74 */
75typedef struct PGMR0DYNMAPENTRY
76{
77 /** The physical address of the currently mapped page.
78 * This is duplicate for three reasons: cache locality, cache policy of the PT
79 * mappings and sanity checks. */
80 RTHCPHYS HCPhys;
81 /** Pointer to the page. */
82 void *pvPage;
83 /** The number of references. */
84 int32_t volatile cRefs;
85 /** PTE pointer union. */
86 union PGMR0DYNMAPENTRY_PPTE
87 {
88 /** PTE pointer, 32-bit legacy version. */
89 PX86PTE pLegacy;
90 /** PTE pointer, PAE version. */
91 PX86PTEPAE pPae;
92 } uPte;
93 /** CPUs that haven't invalidated this entry after it's last update. */
94 RTCPUSET PendingSet;
95} PGMR0DYNMAPENTRY;
96/** Pointer to a ring-0 dynamic mapping cache entry. */
97typedef PGMR0DYNMAPENTRY *PPGMR0DYNMAPENTRY;
98
99
100/**
101 * Ring-0 dynamic mapping cache.
102 *
103 * This is initialized during VMMR0 module init but no segments are allocated at
104 * that time. Segments will be added when the first VM is started and removed
105 * again when the last VM shuts down, thus avoid consuming memory while dormant.
106 * At module termination, the remaining bits will be freed up.
107 */
108typedef struct PGMR0DYNMAP
109{
110 /** The usual magic number / eye catcher (PGMR0DYNMAP_MAGIC). */
111 uint32_t u32Magic;
112 /** Spinlock serializing the normal operation of the cache. */
113 RTSPINLOCK hSpinlock;
114 /** Array for tracking and managing the pages. */
115 PPGMR0DYNMAPENTRY paPages;
116 /** The cache size given as a number of pages. */
117 uint32_t cPages;
118 /** Whether it's 32-bit legacy or PAE/AMD64 paging mode. */
119 bool fLegacyMode;
120 /** The current load. */
121 uint32_t cLoad;
122 /** The max load.
123 * This is maintained to get trigger adding of more mapping space. */
124 uint32_t cMaxLoad;
125 /** Initialization / termination lock. */
126 RTSEMFASTMUTEX hInitLock;
127 /** The number of users (protected by hInitLock). */
128 uint32_t cUsers;
129 /** Array containing a copy of the original page tables.
130 * The entries are either X86PTE or X86PTEPAE according to fLegacyMode. */
131 void *pvSavedPTs;
132} PGMR0DYNMAP;
133/** Pointer to the ring-0 dynamic mapping cache */
134typedef PGMR0DYNMAP *PPGMR0DYNMAP;
135
136/** PGMR0DYNMAP::u32Magic. (Jens Christian Bugge Wesseltoft) */
137#define PGMR0DYNMAP_MAGIC 0x19640201
138
139
140/*******************************************************************************
141* Global Variables *
142*******************************************************************************/
143/** Pointer to the ring-0 dynamic mapping cache. */
144static PPGMR0DYNMAP g_pPGMR0DynMap;
145
146
147/*******************************************************************************
148* Internal Functions *
149*******************************************************************************/
150static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs);
151static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis);
152static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis);
153static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis);
154
155
156/**
157 * Initializes the ring-0 dynamic mapping cache.
158 *
159 * @returns VBox status code.
160 */
161VMMR0DECL(int) PGMR0DynMapInit(void)
162{
163 Assert(!g_pPGMR0DynMap);
164
165 /*
166 * Create and initialize the cache instance.
167 */
168 PPGMR0DYNMAP pThis = (PPGMR0DYNMAP)RTMemAllocZ(sizeof(*pThis));
169 AssertLogRelReturn(pThis, VERR_NO_MEMORY);
170 int rc = VINF_SUCCESS;
171 SUPPAGINGMODE enmMode = SUPR0GetPagingMode();
172 switch (enmMode)
173 {
174 case SUPPAGINGMODE_32_BIT:
175 case SUPPAGINGMODE_32_BIT_GLOBAL:
176 pThis->fLegacyMode = false;
177 break;
178 case SUPPAGINGMODE_PAE:
179 case SUPPAGINGMODE_PAE_GLOBAL:
180 case SUPPAGINGMODE_PAE_NX:
181 case SUPPAGINGMODE_PAE_GLOBAL_NX:
182 case SUPPAGINGMODE_AMD64:
183 case SUPPAGINGMODE_AMD64_GLOBAL:
184 case SUPPAGINGMODE_AMD64_NX:
185 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
186 pThis->fLegacyMode = false;
187 break;
188 default:
189 rc = VERR_INTERNAL_ERROR;
190 break;
191 }
192 if (RT_SUCCESS(rc))
193 {
194 rc = RTSemFastMutexCreate(&pThis->hInitLock);
195 if (RT_SUCCESS(rc))
196 {
197 rc = RTSpinlockCreate(&pThis->hSpinlock);
198 if (RT_SUCCESS(rc))
199 {
200 pThis->u32Magic = PGMR0DYNMAP_MAGIC;
201 g_pPGMR0DynMap = pThis;
202 return VINF_SUCCESS;
203 }
204 RTSemFastMutexDestroy(pThis->hInitLock);
205 }
206 }
207 RTMemFree(pThis);
208 return rc;
209}
210
211
212/**
213 * Terminates the ring-0 dynamic mapping cache.
214 */
215VMMR0DECL(void) PGMR0DynMapTerm(void)
216{
217 /*
218 * Destroy the cache.
219 *
220 * There is not supposed to be any races here, the loader should
221 * make sure about that. So, don't bother locking anything.
222 *
223 * The VM objects should all be destroyed by now, so there is no
224 * dangling users or anything like that to clean up. This routine
225 * is just a mirror image of PGMR0DynMapInit.
226 */
227 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
228 if (pThis)
229 {
230 AssertPtr(pThis);
231 g_pPGMR0DynMap = NULL;
232
233 AssertLogRelMsg(!pThis->cUsers && !pThis->paPages && !pThis->cPages,
234 ("cUsers=%d paPages=%p cPages=%#x\n",
235 pThis->cUsers, pThis->paPages, pThis->cPages));
236
237 /* Free the associated resources. */
238 RTSemFastMutexDestroy(pThis->hInitLock);
239 pThis->hInitLock = NIL_RTSEMFASTMUTEX;
240 RTSpinlockDestroy(pThis->hSpinlock);
241 pThis->hSpinlock = NIL_RTSPINLOCK;
242 pThis->u32Magic = UINT32_MAX;
243 RTMemFree(pThis);
244 }
245}
246
247
248/**
249 * Initializes the dynamic mapping cache for a new VM.
250 *
251 * @returns VBox status code.
252 * @param pVM Pointer to the shared VM structure.
253 */
254VMMR0DECL(int) PGMR0DynMapInitVM(PVM pVM)
255{
256 /*
257 * Initialize the auto sets.
258 */
259 VMCPUID idCpu = pVM->cCPUs;
260 while (idCpu-- > 0)
261 {
262 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
263 uint32_t j = RT_ELEMENTS(pSet->aEntries);
264 while (j-- > 0)
265 {
266 pSet->aEntries[j].iPage = UINT16_MAX;
267 pSet->aEntries[j].cRefs = 0;
268 }
269 pSet->cEntries = PGMMAPSET_CLOSED;
270 }
271
272 /*
273 * Do we need the cache? Skip the last bit if we don't.
274 */
275 Assert(!pVM->pgm.s.pvR0DynMapUsed);
276 pVM->pgm.s.pvR0DynMapUsed = NULL;
277 if (!HWACCMIsEnabled(pVM))
278 return VINF_SUCCESS;
279
280 /*
281 * Reference and if necessary setup or grow the cache.
282 */
283 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
284 AssertPtrReturn(pThis, VERR_INTERNAL_ERROR);
285 int rc = RTSemFastMutexRequest(pThis->hInitLock);
286 AssertLogRelRCReturn(rc, rc);
287
288 pThis->cUsers++;
289 if (pThis->cUsers == 1)
290 rc = pgmR0DynMapSetup(pThis);
291 else if (pThis->cMaxLoad > pThis->cPages / 2)
292 rc = pgmR0DynMapGrow(pThis);
293 if (RT_FAILURE(rc))
294 pThis->cUsers--;
295
296 RTSemFastMutexRelease(pThis->hInitLock);
297
298 return rc;
299}
300
301
302/**
303 * Terminates the dynamic mapping cache usage for a VM.
304 *
305 * @param pVM Pointer to the shared VM structure.
306 */
307VMMR0DECL(void) PGMR0DynMapTermVM(PVM pVM)
308{
309 /*
310 * Return immediately if we're not using the cache.
311 */
312 if (!pVM->pgm.s.pvR0DynMapUsed)
313 return;
314
315 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
316 AssertPtrReturnVoid(pThis);
317
318 int rc = RTSemFastMutexRequest(pThis->hInitLock);
319 AssertLogRelRCReturnVoid(rc);
320
321 if (pVM->pgm.s.pvR0DynMapUsed == pThis)
322 {
323 pVM->pgm.s.pvR0DynMapUsed = NULL;
324
325 /*
326 * Clean up and check the auto sets.
327 */
328 VMCPUID idCpu = pVM->cCPUs;
329 while (idCpu-- > 0)
330 {
331 PPGMMAPSET pSet = &pVM->aCpus[idCpu].pgm.s.AutoSet;
332 uint32_t j = pSet->cEntries;
333 if (j <= RT_ELEMENTS(pSet->aEntries))
334 {
335 /*
336 * The set is open, close it.
337 */
338 while (j-- > 0)
339 {
340 int32_t cRefs = pSet->aEntries[j].cRefs;
341 uint32_t iPage = pSet->aEntries[j].iPage;
342 LogRel(("PGMR0DynMapTermVM: %d dangling refs to %#x\n", cRefs, iPage));
343 if (iPage < pThis->cPages && cRefs > 0)
344 pgmR0DynMapReleasePage(pThis, iPage, cRefs);
345 else
346 AssertMsgFailed(("cRefs=%d iPage=%#x cPages=%u\n", cRefs, iPage, pThis->cPages));
347
348 pSet->aEntries[j].iPage = UINT16_MAX;
349 pSet->aEntries[j].cRefs = 0;
350 }
351 pSet->cEntries = PGMMAPSET_CLOSED;
352 }
353
354 j = RT_ELEMENTS(pSet->aEntries);
355 while (j-- > 0)
356 {
357 Assert(pSet->aEntries[j].iPage == UINT16_MAX);
358 Assert(!pSet->aEntries[j].cRefs);
359 }
360 }
361
362 /*
363 * Release our reference to the mapping cache.
364 */
365 Assert(pThis->cUsers > 0);
366 pThis->cUsers--;
367 if (!pThis->cUsers)
368 pgmR0DynMapTearDown(pThis);
369 }
370 else
371 AssertMsgFailed(("pvR0DynMapUsed=%p pThis=%p\n", pVM->pgm.s.pvR0DynMapUsed, pThis));
372
373 RTSemFastMutexRelease(pThis->hInitLock);
374}
375
376
377/**
378 * Called by PGMR0DynMapInitVM under the init lock.
379 *
380 * @returns VBox status code.
381 * @param pThis The dynamic mapping cache instance.
382 */
383static int pgmR0DynMapSetup(PPGMR0DYNMAP pThis)
384{
385 return VINF_SUCCESS;
386}
387
388
389/**
390 * Called by PGMR0DynMapInitVM under the init lock.
391 *
392 * @returns VBox status code.
393 * @param pThis The dynamic mapping cache instance.
394 */
395static int pgmR0DynMapGrow(PPGMR0DYNMAP pThis)
396{
397 return VINF_SUCCESS;
398}
399
400
401/**
402 * Called by PGMR0DynMapTermVM under the init lock.
403 *
404 * @returns VBox status code.
405 * @param pThis The dynamic mapping cache instance.
406 */
407static void pgmR0DynMapTearDown(PPGMR0DYNMAP pThis)
408{
409}
410
411
412/**
413 * Release references to a page, caller owns the spin lock.
414 *
415 * @param pThis The dynamic mapping cache instance.
416 * @param iPage The page.
417 * @param cRefs The number of references to release.
418 */
419DECLINLINE(void) pgmR0DynMapReleasePageLocked(PPGMR0DYNMAP pThis, uint32_t iPage, int32_t cRefs)
420{
421 cRefs = ASMAtomicSubS32(&pThis->paPages[iPage].cRefs, cRefs);
422 AssertMsg(cRefs >= 0, ("%d\n", cRefs));
423 if (!cRefs)
424 pThis->cLoad--;
425}
426
427
428/**
429 * Release references to a page, caller does not own the spin lock.
430 *
431 * @param pThis The dynamic mapping cache instance.
432 * @param iPage The page.
433 * @param cRefs The number of references to release.
434 */
435static void pgmR0DynMapReleasePage(PPGMR0DYNMAP pThis, uint32_t iPage, uint32_t cRefs)
436{
437 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
438 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
439 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
440 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
441}
442
443
444/**
445 * pgmR0DynMapPage worker that deals with the tedious bits.
446 *
447 * @returns The page index on success, UINT32_MAX on failure.
448 * @param pThis The dynamic mapping cache instance.
449 * @param HCPhys The address of the page to be mapped.
450 * @param iPage The page index pgmR0DynMapPage hashed HCPhys to.
451 */
452static uint32_t pgmR0DynMapPageSlow(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t iPage)
453{
454 /*
455 * Check if any of the first 5 pages are unreferenced since the caller
456 * already has made sure they aren't matching.
457 */
458 uint32_t const cPages = cPages;
459 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
460 uint32_t iFreePage;
461 if (!paPages[iPage].cRefs)
462 iFreePage = iPage;
463 else if (!paPages[(iPage + 1) % cPages].cRefs)
464 iFreePage = iPage;
465 else if (!paPages[(iPage + 2) % cPages].cRefs)
466 iFreePage = iPage;
467 else if (!paPages[(iPage + 3) % cPages].cRefs)
468 iFreePage = iPage;
469 else if (!paPages[(iPage + 4) % cPages].cRefs)
470 iFreePage = iPage;
471 else
472 {
473 /*
474 * Search for an unused or matching entry.
475 */
476 iFreePage = (iPage + 5) % pThis->cPages;
477 for (;;)
478 {
479 if (paPages[iFreePage].HCPhys == HCPhys)
480 return iFreePage;
481 if (!paPages[iFreePage].cRefs)
482 break;
483
484 /* advance */
485 iFreePage = (iFreePage + 1) % cPages;
486 if (RT_UNLIKELY(iFreePage != iPage))
487 return UINT32_MAX;
488 }
489 }
490
491 /*
492 * Setup the new entry.
493 */
494 paPages[iFreePage].HCPhys = HCPhys;
495 RTCpuSetFill(&paPages[iFreePage].PendingSet);
496 if (pThis->fLegacyMode)
497 {
498 X86PGUINT uOld = paPages[iFreePage].uPte.pLegacy->u;
499 X86PGUINT uOld2 = uOld; NOREF(uOld2);
500 X86PGUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)
501 | X86_PTE_P | X86_PTE_A | X86_PTE_D
502 | (HCPhys & X86_PTE_PG_MASK);
503 while (!ASMAtomicCmpXchgExU32(&paPages[iFreePage].uPte.pLegacy->u, uNew, uOld, &uOld))
504 AssertMsgFailed(("uOld=%#x uOld2=%#x uNew=%#x\n", uOld, uOld2, uNew));
505 }
506 else
507 {
508 X86PGPAEUINT uOld = paPages[iFreePage].uPte.pPae->u;
509 X86PGPAEUINT uOld2 = uOld; NOREF(uOld2);
510 X86PGPAEUINT uNew = (uOld & X86_PTE_G | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT)
511 | X86_PTE_P | X86_PTE_A | X86_PTE_D
512 | (HCPhys & X86_PTE_PAE_PG_MASK);
513 while (!ASMAtomicCmpXchgExU64(&paPages[iFreePage].uPte.pPae->u, uNew, uOld, &uOld))
514 AssertMsgFailed(("uOld=%#llx uOld2=%#llx uNew=%#llx\n", uOld, uOld2, uNew));
515 }
516 return iFreePage;
517}
518
519
520/**
521 * Maps a page into the pool.
522 *
523 * @returns Pointer to the mapping.
524 * @param pThis The dynamic mapping cache instance.
525 * @param HCPhys The address of the page to be mapped.
526 * @param piPage Where to store the page index.
527 */
528DECLINLINE(void *) pgmR0DynMapPage(PPGMR0DYNMAP pThis, RTHCPHYS HCPhys, uint32_t *piPage)
529{
530 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
531 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
532 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
533
534 /*
535 * Find an entry, if possible a matching one. The HCPhys address is hashed
536 * down to a page index, collisions are handled by linear searching. Optimize
537 * for a hit in the first 5 pages.
538 *
539 * To the cheap hits here and defer the tedious searching and inserting
540 * to a helper function.
541 */
542 uint32_t const cPages = cPages;
543 uint32_t iPage = (HCPhys >> PAGE_SHIFT) % cPages;
544 PPGMR0DYNMAPENTRY paPages = pThis->paPages;
545 if (paPages[iPage].HCPhys != HCPhys)
546 {
547 uint32_t iPage2 = (iPage + 1) % cPages;
548 if (paPages[iPage2].HCPhys != HCPhys)
549 {
550 iPage2 = (iPage + 2) % cPages;
551 if (paPages[iPage2].HCPhys != HCPhys)
552 {
553 iPage2 = (iPage + 3) % cPages;
554 if (paPages[iPage2].HCPhys != HCPhys)
555 {
556 iPage2 = (iPage + 4) % cPages;
557 if (paPages[iPage2].HCPhys != HCPhys)
558 {
559 iPage = pgmR0DynMapPageSlow(pThis, HCPhys, iPage);
560 if (RT_UNLIKELY(iPage == UINT32_MAX))
561 {
562 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
563 return NULL;
564 }
565 }
566 else
567 iPage = iPage2;
568 }
569 else
570 iPage = iPage2;
571 }
572 else
573 iPage = iPage2;
574 }
575 else
576 iPage = iPage2;
577 }
578
579 /*
580 * Reference it, update statistics and get the return address.
581 */
582 if (ASMAtomicIncS32(&paPages[iPage].cRefs) == 1)
583 {
584 pThis->cLoad++;
585 if (pThis->cLoad > pThis->cMaxLoad)
586 pThis->cMaxLoad = pThis->cLoad;
587 Assert(pThis->cLoad <= pThis->cPages);
588 }
589 void *pvPage = paPages[iPage].pvPage;
590
591 /*
592 * Invalidate the entry?
593 */
594 RTCPUID idRealCpu = RTMpCpuId();
595 bool fInvalidateIt = RTCpuSetIsMember(&paPages[iPage].PendingSet, idRealCpu);
596 if (fInvalidateIt)
597 RTCpuSetDel(&paPages[iPage].PendingSet, idRealCpu);
598
599 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
600
601 /*
602 * Do the actual invalidation outside the spinlock.
603 */
604 ASMInvalidatePage(pvPage);
605
606 *piPage = iPage;
607 return pvPage;
608}
609
610
611/**
612 * Signals the start of a new set of mappings.
613 *
614 * Mostly for strictness. PGMDynMapHCPage won't work unless this
615 * API is called.
616 *
617 * @param pVCpu The shared data for the current virtual CPU.
618 */
619VMMDECL(void) PGMDynMapStartAutoSet(PVMCPU pVCpu)
620{
621 Assert(pVCpu->pgm.s.AutoSet.cEntries == PGMMAPSET_CLOSED);
622 pVCpu->pgm.s.AutoSet.cEntries = 0;
623}
624
625
626/**
627 * Releases the dynamic memory mappings made by PGMDynMapHCPage and associates
628 * since the PGMDynMapStartAutoSet call.
629 *
630 * @param pVCpu The shared data for the current virtual CPU.
631 */
632VMMDECL(void) PGMDynMapReleaseAutoSet(PVMCPU pVCpu)
633{
634 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
635
636 /* close the set */
637 uint32_t i = pVCpu->pgm.s.AutoSet.cEntries;
638 AssertMsg(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries), ("%#x (%u)\n", i, i));
639 pVCpu->pgm.s.AutoSet.cEntries = PGMMAPSET_CLOSED;
640
641 /* release any pages we're referencing. */
642 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries)))
643 {
644 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
645 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
646 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
647
648 while (i-- > 0)
649 {
650 uint32_t iPage = pSet->aEntries[i].iPage;
651 Assert(iPage < pThis->cPages);
652 int32_t cRefs = pSet->aEntries[i].cRefs;
653 Assert(cRefs > 0);
654 pgmR0DynMapReleasePageLocked(pThis, iPage, cRefs);
655
656 pSet->aEntries[i].iPage = UINT16_MAX;
657 pSet->aEntries[i].cRefs = 0;
658 }
659
660 Assert(pThis->cLoad <= pThis->cPages);
661 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
662 }
663}
664
665
666/**
667 * Migrates the automatic mapping set of the current vCPU if necessary.
668 *
669 * This is called when re-entering the hardware assisted execution mode after a
670 * nip down to ring-3. We run the risk that the CPU might have change and we
671 * will therefore make sure all the cache entries currently in the auto set will
672 * be valid on the new CPU. If the cpu didn't change nothing will happen as all
673 * the entries will have been flagged as invalidated.
674 *
675 * @param pVCpu The shared data for the current virtual CPU.
676 * @thread EMT
677 */
678VMMDECL(void) PGMDynMapMigrateAutoSet(PVMCPU pVCpu)
679{
680 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
681 uint32_t i = pVCpu->pgm.s.AutoSet.cEntries;
682 AssertMsg(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries), ("%#x (%u)\n", i, i));
683 if (i != 0 && RT_LIKELY(i <= RT_ELEMENTS(pVCpu->pgm.s.AutoSet.aEntries)))
684 {
685 PPGMR0DYNMAP pThis = g_pPGMR0DynMap;
686 RTCPUID idRealCpu = RTMpCpuId();
687
688 while (i-- > 0)
689 {
690 Assert(pSet->aEntries[i].cRefs > 0);
691 uint32_t iPage = pSet->aEntries[i].iPage;
692 Assert(iPage < pThis->cPages);
693 if (RTCpuSetIsMember(&pThis->paPages[iPage].PendingSet, idRealCpu))
694 {
695 RTCpuSetDel(&pThis->paPages[iPage].PendingSet, idRealCpu);
696 ASMInvalidatePage(pThis->paPages[iPage].pvPage);
697 }
698 }
699 }
700}
701
702
703/**
704 * As a final resort for a full auto set, try merge duplicate entries.
705 *
706 * @param pSet The set.
707 */
708static void pgmDynMapOptimizeAutoSet(PPGMMAPSET pSet)
709{
710 for (uint32_t i = 0 ; i < pSet->cEntries; i++)
711 {
712 uint16_t const iPage = pSet->aEntries[i].iPage;
713 uint32_t j = i + 1;
714 while (j < pSet->cEntries)
715 {
716 if (pSet->aEntries[j].iPage != iPage)
717 j++;
718 else
719 {
720 /* merge j with i removing j. */
721 pSet->aEntries[i].cRefs += pSet->aEntries[j].cRefs;
722 pSet->cEntries--;
723 if (j < pSet->cEntries)
724 {
725 pSet->aEntries[j] = pSet->aEntries[pSet->cEntries];
726 pSet->aEntries[pSet->cEntries].iPage = UINT16_MAX;
727 pSet->aEntries[pSet->cEntries].cRefs = 0;
728 }
729 else
730 {
731 pSet->aEntries[j].iPage = UINT16_MAX;
732 pSet->aEntries[j].cRefs = 0;
733 }
734 }
735 }
736 }
737}
738
739
740/* documented elsewhere - a bit of a mess. */
741VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
742{
743 /*
744 * Validate state.
745 */
746 AssertMsgReturn(pVM->pgm.s.pvR0DynMapUsed == g_pPGMR0DynMap,
747 ("%p != %p\n", pVM->pgm.s.pvR0DynMapUsed, g_pPGMR0DynMap),
748 VERR_ACCESS_DENIED);
749 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
750 PVMCPU pVCpu = VMMGetCpu(pVM);
751 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
752 AssertPtrReturn(pVCpu, VERR_INTERNAL_ERROR);
753 AssertMsgReturn(pSet->cEntries > RT_ELEMENTS(pSet->aEntries),
754 ("%#x (%u)\n", pSet->cEntries, pSet->cEntries), VERR_WRONG_ORDER);
755
756 /*
757 * Map it.
758 */
759 uint32_t iPage;
760 void *pvPage = pgmR0DynMapPage(g_pPGMR0DynMap, HCPhys, &iPage);
761 if (RT_UNLIKELY(!pvPage))
762 {
763 static uint32_t s_cBitched = 0;
764 if (++s_cBitched < 10)
765 LogRel(("PGMDynMapHCPage: cLoad=%u/%u cPages=%u\n",
766 g_pPGMR0DynMap->cLoad, g_pPGMR0DynMap->cMaxLoad, g_pPGMR0DynMap->cPages));
767 return VERR_PGM_DYNMAP_FAILED;
768 }
769
770 /*
771 * Add the page to the auto reference set.
772 * If it's less than half full, don't bother looking for duplicates.
773 */
774 if (pSet->cEntries < RT_ELEMENTS(pSet->aEntries) / 2)
775 {
776 pSet->aEntries[pSet->cEntries].cRefs = 1;
777 pSet->aEntries[pSet->cEntries].iPage = iPage;
778 }
779 else
780 {
781 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
782 int32_t i = pSet->cEntries;
783 while (i-- > 0)
784 if (pSet->aEntries[i].iPage)
785 {
786 pSet->aEntries[i].cRefs++;
787 break;
788 }
789 if (i < 0)
790 {
791 if (RT_UNLIKELY(pSet->cEntries >= RT_ELEMENTS(pSet->aEntries)))
792 pgmDynMapOptimizeAutoSet(pSet);
793 if (RT_LIKELY(pSet->cEntries < RT_ELEMENTS(pSet->aEntries)))
794 {
795 pSet->aEntries[pSet->cEntries].cRefs = 1;
796 pSet->aEntries[pSet->cEntries].iPage = iPage;
797 }
798 else
799 {
800 /* We're screwed. */
801 pgmR0DynMapReleasePage(g_pPGMR0DynMap, iPage, 1);
802
803 static uint32_t s_cBitched = 0;
804 if (++s_cBitched < 10)
805 LogRel(("PGMDynMapHCPage: set is full!\n"));
806 return VERR_PGM_DYNMAP_FULL_SET;
807 }
808 }
809 }
810
811 return VINF_SUCCESS;
812}
813
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette