VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp@ 31794

Last change on this file since 31794 was 31775, checked in by vboxsync, 15 years ago

PGM: Wrap up all access to PAE/LM PTEs so that we can treat the invalid entries used by PGM_WITH_MMIO_OPTIMIZATIONS as not-present.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 198.6 KB
Line 
1/* $Id: PGMAllPool.cpp 31775 2010-08-19 09:48:24Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_POOL
23#include <VBox/pgm.h>
24#include <VBox/mm.h>
25#include <VBox/em.h>
26#include <VBox/cpum.h>
27#ifdef IN_RC
28# include <VBox/patm.h>
29#endif
30#include "../PGMInternal.h"
31#include <VBox/vm.h>
32#include "../PGMInline.h"
33#include <VBox/disopcode.h>
34#include <VBox/hwacc_vmx.h>
35
36#include <VBox/log.h>
37#include <VBox/err.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41
42
43/*******************************************************************************
44* Internal Functions *
45*******************************************************************************/
46RT_C_DECLS_BEGIN
47static void pgmPoolFlushAllInt(PPGMPOOL pPool);
48DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind);
49DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind);
50static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
51static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
52static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
53#ifndef IN_RING3
54DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
55#endif
56#ifdef LOG_ENABLED
57static const char *pgmPoolPoolKindToStr(uint8_t enmKind);
58#endif
59#if defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)
60static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT);
61#endif
62
63int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
64PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
65void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
66void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
67
68RT_C_DECLS_END
69
70
71/**
72 * Checks if the specified page pool kind is for a 4MB or 2MB guest page.
73 *
74 * @returns true if it's the shadow of a 4MB or 2MB guest page, otherwise false.
75 * @param enmKind The page kind.
76 */
77DECLINLINE(bool) pgmPoolIsBigPage(PGMPOOLKIND enmKind)
78{
79 switch (enmKind)
80 {
81 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
82 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
83 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
84 return true;
85 default:
86 return false;
87 }
88}
89
90
91/**
92 * Flushes a chain of pages sharing the same access monitor.
93 *
94 * @returns VBox status code suitable for scheduling.
95 * @param pPool The pool.
96 * @param pPage A page in the chain.
97 * @todo VBOXSTRICTRC
98 */
99int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
100{
101 LogFlow(("pgmPoolMonitorChainFlush: Flush page %RGp type=%d\n", pPage->GCPhys, pPage->enmKind));
102
103 /*
104 * Find the list head.
105 */
106 uint16_t idx = pPage->idx;
107 if (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
108 {
109 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
110 {
111 idx = pPage->iMonitoredPrev;
112 Assert(idx != pPage->idx);
113 pPage = &pPool->aPages[idx];
114 }
115 }
116
117 /*
118 * Iterate the list flushing each shadow page.
119 */
120 int rc = VINF_SUCCESS;
121 for (;;)
122 {
123 idx = pPage->iMonitoredNext;
124 Assert(idx != pPage->idx);
125 if (pPage->idx >= PGMPOOL_IDX_FIRST)
126 {
127 int rc2 = pgmPoolFlushPage(pPool, pPage);
128 AssertRC(rc2);
129 }
130 /* next */
131 if (idx == NIL_PGMPOOL_IDX)
132 break;
133 pPage = &pPool->aPages[idx];
134 }
135 return rc;
136}
137
138
139/**
140 * Wrapper for getting the current context pointer to the entry being modified.
141 *
142 * @returns VBox status code suitable for scheduling.
143 * @param pVM VM Handle.
144 * @param pvDst Destination address
145 * @param pvSrc Source guest virtual address.
146 * @param GCPhysSrc The source guest physical address.
147 * @param cb Size of data to read
148 */
149DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvSrc, RTGCPHYS GCPhysSrc, size_t cb)
150{
151#if defined(IN_RING3)
152 memcpy(pvDst, (RTHCPTR)((uintptr_t)pvSrc & ~(RTHCUINTPTR)(cb - 1)), cb);
153 return VINF_SUCCESS;
154#else
155 /* @todo in RC we could attempt to use the virtual address, although this can cause many faults (PAE Windows XP guest). */
156 return PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysSrc & ~(RTGCPHYS)(cb - 1), cb);
157#endif
158}
159
160/**
161 * Process shadow entries before they are changed by the guest.
162 *
163 * For PT entries we will clear them. For PD entries, we'll simply check
164 * for mapping conflicts and set the SyncCR3 FF if found.
165 *
166 * @param pVCpu VMCPU handle
167 * @param pPool The pool.
168 * @param pPage The head page.
169 * @param GCPhysFault The guest physical fault address.
170 * @param uAddress In R0 and GC this is the guest context fault address (flat).
171 * In R3 this is the host context 'fault' address.
172 * @param cbWrite Write size; might be zero if the caller knows we're not crossing entry boundaries
173 */
174void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite)
175{
176 AssertMsg(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX, ("%#x (idx=%#x)\n", pPage->iMonitoredPrev, pPage->idx));
177 const unsigned off = GCPhysFault & PAGE_OFFSET_MASK;
178 PVM pVM = pPool->CTX_SUFF(pVM);
179
180 LogFlow(("pgmPoolMonitorChainChanging: %RGv phys=%RGp cbWrite=%d\n", (RTGCPTR)(CTXTYPE(RTGCPTR, uintptr_t, RTGCPTR))pvAddress, GCPhysFault, cbWrite));
181
182 for (;;)
183 {
184 union
185 {
186 void *pv;
187 PX86PT pPT;
188 PPGMSHWPTPAE pPTPae;
189 PX86PD pPD;
190 PX86PDPAE pPDPae;
191 PX86PDPT pPDPT;
192 PX86PML4 pPML4;
193 } uShw;
194
195 LogFlow(("pgmPoolMonitorChainChanging: page idx=%d phys=%RGp (next=%d) kind=%s\n", pPage->idx, pPage->GCPhys, pPage->iMonitoredNext, pgmPoolPoolKindToStr(pPage->enmKind), cbWrite));
196
197 uShw.pv = NULL;
198 switch (pPage->enmKind)
199 {
200 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
201 {
202 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
203 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
204 const unsigned iShw = off / sizeof(X86PTE);
205 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
206 if (uShw.pPT->a[iShw].n.u1Present)
207 {
208 X86PTE GstPte;
209
210 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
211 AssertRC(rc);
212 Log4(("pgmPoolMonitorChainChanging 32_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
213 pgmPoolTracDerefGCPhysHint(pPool, pPage,
214 uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK,
215 GstPte.u & X86_PTE_PG_MASK,
216 iShw);
217 ASMAtomicWriteSize(&uShw.pPT->a[iShw], 0);
218 }
219 break;
220 }
221
222 /* page/2 sized */
223 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
224 {
225 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
226 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
227 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
228 {
229 const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1);
230 LogFlow(("PGMPOOLKIND_PAE_PT_FOR_32BIT_PT iShw=%x\n", iShw));
231 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
232 {
233 X86PTE GstPte;
234 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
235 AssertRC(rc);
236
237 Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
238 pgmPoolTracDerefGCPhysHint(pPool, pPage,
239 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]),
240 GstPte.u & X86_PTE_PG_MASK,
241 iShw);
242 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0);
243 }
244 }
245 break;
246 }
247
248 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
249 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
250 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
251 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
252 {
253 unsigned iGst = off / sizeof(X86PDE);
254 unsigned iShwPdpt = iGst / 256;
255 unsigned iShw = (iGst % 256) * 2;
256 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
257
258 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
259 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
260 if (iShwPdpt == pPage->enmKind - (unsigned)PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD)
261 {
262 for (unsigned i = 0; i < 2; i++)
263 {
264# ifndef IN_RING0
265 if ((uShw.pPDPae->a[iShw + i].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
266 {
267 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
268 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
269 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i));
270 break;
271 }
272 else
273# endif /* !IN_RING0 */
274 if (uShw.pPDPae->a[iShw+i].n.u1Present)
275 {
276 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw+i, uShw.pPDPae->a[iShw+i].u));
277 pgmPoolFree(pVM,
278 uShw.pPDPae->a[iShw+i].u & X86_PDE_PAE_PG_MASK,
279 pPage->idx,
280 iShw + i);
281 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw+i], 0);
282 }
283
284 /* paranoia / a bit assumptive. */
285 if ( (off & 3)
286 && (off & 3) + cbWrite > 4)
287 {
288 const unsigned iShw2 = iShw + 2 + i;
289 if (iShw2 < RT_ELEMENTS(uShw.pPDPae->a))
290 {
291# ifndef IN_RING0
292 if ((uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
293 {
294 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
295 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
296 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
297 break;
298 }
299 else
300# endif /* !IN_RING0 */
301 if (uShw.pPDPae->a[iShw2].n.u1Present)
302 {
303 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
304 pgmPoolFree(pVM,
305 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
306 pPage->idx,
307 iShw2);
308 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
309 }
310 }
311 }
312 }
313 }
314 break;
315 }
316
317 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
318 {
319 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
320 const unsigned iShw = off / sizeof(X86PTEPAE);
321 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
322 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
323 {
324 X86PTEPAE GstPte;
325 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
326 AssertRC(rc);
327
328 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]), GstPte.u & X86_PTE_PAE_PG_MASK));
329 pgmPoolTracDerefGCPhysHint(pPool, pPage,
330 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]),
331 GstPte.u & X86_PTE_PAE_PG_MASK,
332 iShw);
333 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0);
334 }
335
336 /* paranoia / a bit assumptive. */
337 if ( (off & 7)
338 && (off & 7) + cbWrite > sizeof(X86PTEPAE))
339 {
340 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTEPAE);
341 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPTPae->a));
342
343 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw2]))
344 {
345 X86PTEPAE GstPte;
346# ifdef IN_RING3
347 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, (RTHCPTR)((RTHCUINTPTR)pvAddress + sizeof(GstPte)), GCPhysFault + sizeof(GstPte), sizeof(GstPte));
348# else
349 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress + sizeof(GstPte), GCPhysFault + sizeof(GstPte), sizeof(GstPte));
350# endif
351 AssertRC(rc);
352 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]), GstPte.u & X86_PTE_PAE_PG_MASK));
353 pgmPoolTracDerefGCPhysHint(pPool, pPage,
354 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]),
355 GstPte.u & X86_PTE_PAE_PG_MASK,
356 iShw2);
357 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw2], 0);
358 }
359 }
360 break;
361 }
362
363 case PGMPOOLKIND_32BIT_PD:
364 {
365 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
366 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging!
367
368 LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw));
369 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
370# ifndef IN_RING0
371 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING)
372 {
373 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
374 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
375 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
376 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
377 break;
378 }
379# endif /* !IN_RING0 */
380# ifndef IN_RING0
381 else
382# endif /* !IN_RING0 */
383 {
384 if (uShw.pPD->a[iShw].n.u1Present)
385 {
386 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
387 pgmPoolFree(pVM,
388 uShw.pPD->a[iShw].u & X86_PDE_PAE_PG_MASK,
389 pPage->idx,
390 iShw);
391 ASMAtomicWriteSize(&uShw.pPD->a[iShw].u, 0);
392 }
393 }
394 /* paranoia / a bit assumptive. */
395 if ( (off & 3)
396 && (off & 3) + cbWrite > sizeof(X86PTE))
397 {
398 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTE);
399 if ( iShw2 != iShw
400 && iShw2 < RT_ELEMENTS(uShw.pPD->a))
401 {
402# ifndef IN_RING0
403 if (uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING)
404 {
405 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
406 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
407 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
408 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
409 break;
410 }
411# endif /* !IN_RING0 */
412# ifndef IN_RING0
413 else
414# endif /* !IN_RING0 */
415 {
416 if (uShw.pPD->a[iShw2].n.u1Present)
417 {
418 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPD->a[iShw2].u));
419 pgmPoolFree(pVM,
420 uShw.pPD->a[iShw2].u & X86_PDE_PAE_PG_MASK,
421 pPage->idx,
422 iShw2);
423 ASMAtomicWriteSize(&uShw.pPD->a[iShw2].u, 0);
424 }
425 }
426 }
427 }
428#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
429 if ( uShw.pPD->a[iShw].n.u1Present
430 && !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
431 {
432 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
433# ifdef IN_RC /* TLB load - we're pushing things a bit... */
434 ASMProbeReadByte(pvAddress);
435# endif
436 pgmPoolFree(pVM, uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw);
437 ASMAtomicWriteSize(&uShw.pPD->a[iShw].u, 0);
438 }
439#endif
440 break;
441 }
442
443 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
444 {
445 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
446 const unsigned iShw = off / sizeof(X86PDEPAE);
447 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
448#ifndef IN_RING0
449 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING)
450 {
451 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
452 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
453 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
454 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
455 break;
456 }
457#endif /* !IN_RING0 */
458 /*
459 * Causes trouble when the guest uses a PDE to refer to the whole page table level
460 * structure. (Invalidate here; faults later on when it tries to change the page
461 * table entries -> recheck; probably only applies to the RC case.)
462 */
463# ifndef IN_RING0
464 else
465# endif /* !IN_RING0 */
466 {
467 if (uShw.pPDPae->a[iShw].n.u1Present)
468 {
469 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
470 pgmPoolFree(pVM,
471 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
472 pPage->idx,
473 iShw);
474 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw].u, 0);
475 }
476 }
477 /* paranoia / a bit assumptive. */
478 if ( (off & 7)
479 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
480 {
481 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
482 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
483
484#ifndef IN_RING0
485 if ( iShw2 != iShw
486 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING)
487 {
488 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
489 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
490 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
491 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
492 break;
493 }
494#endif /* !IN_RING0 */
495# ifndef IN_RING0
496 else
497# endif /* !IN_RING0 */
498 if (uShw.pPDPae->a[iShw2].n.u1Present)
499 {
500 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
501 pgmPoolFree(pVM,
502 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
503 pPage->idx,
504 iShw2);
505 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
506 }
507 }
508 break;
509 }
510
511 case PGMPOOLKIND_PAE_PDPT:
512 {
513 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
514 /*
515 * Hopefully this doesn't happen very often:
516 * - touching unused parts of the page
517 * - messing with the bits of pd pointers without changing the physical address
518 */
519 /* PDPT roots are not page aligned; 32 byte only! */
520 const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
521
522 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
523 const unsigned iShw = offPdpt / sizeof(X86PDPE);
524 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
525 {
526# ifndef IN_RING0
527 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING)
528 {
529 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
530 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
531 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
532 LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));
533 break;
534 }
535# endif /* !IN_RING0 */
536# ifndef IN_RING0
537 else
538# endif /* !IN_RING0 */
539 if (uShw.pPDPT->a[iShw].n.u1Present)
540 {
541 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
542 pgmPoolFree(pVM,
543 uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK,
544 pPage->idx,
545 iShw);
546 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw].u, 0);
547 }
548
549 /* paranoia / a bit assumptive. */
550 if ( (offPdpt & 7)
551 && (offPdpt & 7) + cbWrite > sizeof(X86PDPE))
552 {
553 const unsigned iShw2 = (offPdpt + cbWrite - 1) / sizeof(X86PDPE);
554 if ( iShw2 != iShw
555 && iShw2 < X86_PG_PAE_PDPE_ENTRIES)
556 {
557# ifndef IN_RING0
558 if (uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING)
559 {
560 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
561 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
562 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
563 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
564 break;
565 }
566# endif /* !IN_RING0 */
567# ifndef IN_RING0
568 else
569# endif /* !IN_RING0 */
570 if (uShw.pPDPT->a[iShw2].n.u1Present)
571 {
572 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
573 pgmPoolFree(pVM,
574 uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK,
575 pPage->idx,
576 iShw2);
577 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw2].u, 0);
578 }
579 }
580 }
581 }
582 break;
583 }
584
585#ifndef IN_RC
586 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
587 {
588 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
589 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
590 const unsigned iShw = off / sizeof(X86PDEPAE);
591 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
592 if (uShw.pPDPae->a[iShw].n.u1Present)
593 {
594 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
595 pgmPoolFree(pVM,
596 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
597 pPage->idx,
598 iShw);
599 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw].u, 0);
600 }
601 /* paranoia / a bit assumptive. */
602 if ( (off & 7)
603 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
604 {
605 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
606 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
607
608 Assert(!(uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING));
609 if (uShw.pPDPae->a[iShw2].n.u1Present)
610 {
611 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
612 pgmPoolFree(pVM,
613 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
614 pPage->idx,
615 iShw2);
616 ASMAtomicWriteSize(&uShw.pPDPae->a[iShw2].u, 0);
617 }
618 }
619 break;
620 }
621
622 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
623 {
624 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
625 /*
626 * Hopefully this doesn't happen very often:
627 * - messing with the bits of pd pointers without changing the physical address
628 */
629 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
630 const unsigned iShw = off / sizeof(X86PDPE);
631 if (uShw.pPDPT->a[iShw].n.u1Present)
632 {
633 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
634 pgmPoolFree(pVM, uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw);
635 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw].u, 0);
636 }
637 /* paranoia / a bit assumptive. */
638 if ( (off & 7)
639 && (off & 7) + cbWrite > sizeof(X86PDPE))
640 {
641 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE);
642 if (uShw.pPDPT->a[iShw2].n.u1Present)
643 {
644 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
645 pgmPoolFree(pVM, uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2);
646 ASMAtomicWriteSize(&uShw.pPDPT->a[iShw2].u, 0);
647 }
648 }
649 break;
650 }
651
652 case PGMPOOLKIND_64BIT_PML4:
653 {
654 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPML4));
655 /*
656 * Hopefully this doesn't happen very often:
657 * - messing with the bits of pd pointers without changing the physical address
658 */
659 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
660 const unsigned iShw = off / sizeof(X86PDPE);
661 if (uShw.pPML4->a[iShw].n.u1Present)
662 {
663 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPML4->a[iShw].u));
664 pgmPoolFree(pVM, uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw);
665 ASMAtomicWriteSize(&uShw.pPML4->a[iShw].u, 0);
666 }
667 /* paranoia / a bit assumptive. */
668 if ( (off & 7)
669 && (off & 7) + cbWrite > sizeof(X86PDPE))
670 {
671 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E);
672 if (uShw.pPML4->a[iShw2].n.u1Present)
673 {
674 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPML4->a[iShw2].u));
675 pgmPoolFree(pVM, uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2);
676 ASMAtomicWriteSize(&uShw.pPML4->a[iShw2].u, 0);
677 }
678 }
679 break;
680 }
681#endif /* IN_RING0 */
682
683 default:
684 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
685 }
686 PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv);
687
688 /* next */
689 if (pPage->iMonitoredNext == NIL_PGMPOOL_IDX)
690 return;
691 pPage = &pPool->aPages[pPage->iMonitoredNext];
692 }
693}
694
695# ifndef IN_RING3
696/**
697 * Checks if a access could be a fork operation in progress.
698 *
699 * Meaning, that the guest is setting up the parent process for Copy-On-Write.
700 *
701 * @returns true if it's likly that we're forking, otherwise false.
702 * @param pPool The pool.
703 * @param pDis The disassembled instruction.
704 * @param offFault The access offset.
705 */
706DECLINLINE(bool) pgmPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault)
707{
708 /*
709 * i386 linux is using btr to clear X86_PTE_RW.
710 * The functions involved are (2.6.16 source inspection):
711 * clear_bit
712 * ptep_set_wrprotect
713 * copy_one_pte
714 * copy_pte_range
715 * copy_pmd_range
716 * copy_pud_range
717 * copy_page_range
718 * dup_mmap
719 * dup_mm
720 * copy_mm
721 * copy_process
722 * do_fork
723 */
724 if ( pDis->pCurInstr->opcode == OP_BTR
725 && !(offFault & 4)
726 /** @todo Validate that the bit index is X86_PTE_RW. */
727 )
728 {
729 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,Fork));
730 return true;
731 }
732 return false;
733}
734
735
736/**
737 * Determine whether the page is likely to have been reused.
738 *
739 * @returns true if we consider the page as being reused for a different purpose.
740 * @returns false if we consider it to still be a paging page.
741 * @param pVM VM Handle.
742 * @param pVCpu VMCPU Handle.
743 * @param pRegFrame Trap register frame.
744 * @param pDis The disassembly info for the faulting instruction.
745 * @param pvFault The fault address.
746 *
747 * @remark The REP prefix check is left to the caller because of STOSD/W.
748 */
749DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault)
750{
751#ifndef IN_RC
752 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
753 if ( HWACCMHasPendingIrq(pVM)
754 && (pRegFrame->rsp - pvFault) < 32)
755 {
756 /* Fault caused by stack writes while trying to inject an interrupt event. */
757 Log(("pgmPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));
758 return true;
759 }
760#else
761 NOREF(pVM); NOREF(pvFault);
762#endif
763
764 LogFlow(("Reused instr %RGv %d at %RGv param1.flags=%x param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->opcode, pvFault, pDis->param1.flags, pDis->param1.base.reg_gen));
765
766 /* Non-supervisor mode write means it's used for something else. */
767 if (CPUMGetGuestCPL(pVCpu, pRegFrame) != 0)
768 return true;
769
770 switch (pDis->pCurInstr->opcode)
771 {
772 /* call implies the actual push of the return address faulted */
773 case OP_CALL:
774 Log4(("pgmPoolMonitorIsReused: CALL\n"));
775 return true;
776 case OP_PUSH:
777 Log4(("pgmPoolMonitorIsReused: PUSH\n"));
778 return true;
779 case OP_PUSHF:
780 Log4(("pgmPoolMonitorIsReused: PUSHF\n"));
781 return true;
782 case OP_PUSHA:
783 Log4(("pgmPoolMonitorIsReused: PUSHA\n"));
784 return true;
785 case OP_FXSAVE:
786 Log4(("pgmPoolMonitorIsReused: FXSAVE\n"));
787 return true;
788 case OP_MOVNTI: /* solaris - block_zero_no_xmm */
789 Log4(("pgmPoolMonitorIsReused: MOVNTI\n"));
790 return true;
791 case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */
792 Log4(("pgmPoolMonitorIsReused: MOVNTDQ\n"));
793 return true;
794 case OP_MOVSWD:
795 case OP_STOSWD:
796 if ( pDis->prefix == (PREFIX_REP|PREFIX_REX)
797 && pRegFrame->rcx >= 0x40
798 )
799 {
800 Assert(pDis->mode == CPUMODE_64BIT);
801
802 Log(("pgmPoolMonitorIsReused: OP_STOSQ\n"));
803 return true;
804 }
805 return false;
806 }
807 if ( ( (pDis->param1.flags & USE_REG_GEN32)
808 || (pDis->param1.flags & USE_REG_GEN64))
809 && (pDis->param1.base.reg_gen == USE_REG_ESP))
810 {
811 Log4(("pgmPoolMonitorIsReused: ESP\n"));
812 return true;
813 }
814
815 return false;
816}
817
818/**
819 * Flushes the page being accessed.
820 *
821 * @returns VBox status code suitable for scheduling.
822 * @param pVM The VM handle.
823 * @param pVCpu The VMCPU handle.
824 * @param pPool The pool.
825 * @param pPage The pool page (head).
826 * @param pDis The disassembly of the write instruction.
827 * @param pRegFrame The trap register frame.
828 * @param GCPhysFault The fault address as guest physical address.
829 * @param pvFault The fault address.
830 * @todo VBOXSTRICTRC
831 */
832static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
833 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
834{
835 /*
836 * First, do the flushing.
837 */
838 int rc = pgmPoolMonitorChainFlush(pPool, pPage);
839
840 /*
841 * Emulate the instruction (xp/w2k problem, requires pc/cr2/sp detection).
842 * Must do this in raw mode (!); XP boot will fail otherwise.
843 */
844 uint32_t cbWritten;
845 VBOXSTRICTRC rc2 = EMInterpretInstructionCPU(pVM, pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_ALL, &cbWritten);
846 if (RT_SUCCESS(rc2))
847 {
848 pRegFrame->rip += pDis->opsize;
849 AssertMsg(rc2 == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rc2))); /* ASSUMES no complicated stuff here. */
850 }
851 else if (rc2 == VERR_EM_INTERPRETER)
852 {
853#ifdef IN_RC
854 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
855 {
856 LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n",
857 pRegFrame->cs, (RTGCPTR)pRegFrame->eip));
858 rc = VINF_SUCCESS;
859 STAM_COUNTER_INC(&pPool->StatMonitorRZIntrFailPatch2);
860 }
861 else
862#endif
863 {
864 rc = VINF_EM_RAW_EMULATE_INSTR;
865 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
866 }
867 }
868 else
869 rc = VBOXSTRICTRC_VAL(rc2);
870
871 LogFlow(("pgmPoolAccessHandlerPT: returns %Rrc (flushed)\n", rc));
872 return rc;
873}
874
875/**
876 * Handles the STOSD write accesses.
877 *
878 * @returns VBox status code suitable for scheduling.
879 * @param pVM The VM handle.
880 * @param pPool The pool.
881 * @param pPage The pool page (head).
882 * @param pDis The disassembly of the write instruction.
883 * @param pRegFrame The trap register frame.
884 * @param GCPhysFault The fault address as guest physical address.
885 * @param pvFault The fault address.
886 */
887DECLINLINE(int) pgmPoolAccessHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
888 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
889{
890 unsigned uIncrement = pDis->param1.size;
891
892 Assert(pDis->mode == CPUMODE_32BIT || pDis->mode == CPUMODE_64BIT);
893 Assert(pRegFrame->rcx <= 0x20);
894
895#ifdef VBOX_STRICT
896 if (pDis->opmode == CPUMODE_32BIT)
897 Assert(uIncrement == 4);
898 else
899 Assert(uIncrement == 8);
900#endif
901
902 Log3(("pgmPoolAccessHandlerSTOSD\n"));
903
904 /*
905 * Increment the modification counter and insert it into the list
906 * of modified pages the first time.
907 */
908 if (!pPage->cModifications++)
909 pgmPoolMonitorModifiedInsert(pPool, pPage);
910
911 /*
912 * Execute REP STOSD.
913 *
914 * This ASSUMES that we're not invoked by Trap0e on in a out-of-sync
915 * write situation, meaning that it's safe to write here.
916 */
917 PVMCPU pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));
918 RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault;
919 while (pRegFrame->rcx)
920 {
921#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
922 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
923 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
924 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
925#else
926 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, uIncrement);
927#endif
928#ifdef IN_RC
929 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax;
930#else
931 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement);
932#endif
933 pu32 += uIncrement;
934 GCPhysFault += uIncrement;
935 pRegFrame->rdi += uIncrement;
936 pRegFrame->rcx--;
937 }
938 pRegFrame->rip += pDis->opsize;
939
940 LogFlow(("pgmPoolAccessHandlerSTOSD: returns\n"));
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Handles the simple write accesses.
947 *
948 * @returns VBox status code suitable for scheduling.
949 * @param pVM The VM handle.
950 * @param pVCpu The VMCPU handle.
951 * @param pPool The pool.
952 * @param pPage The pool page (head).
953 * @param pDis The disassembly of the write instruction.
954 * @param pRegFrame The trap register frame.
955 * @param GCPhysFault The fault address as guest physical address.
956 * @param pvFault The fault address.
957 * @param pfReused Reused state (out)
958 */
959DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
960 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused)
961{
962 Log3(("pgmPoolAccessHandlerSimple\n"));
963 /*
964 * Increment the modification counter and insert it into the list
965 * of modified pages the first time.
966 */
967 if (!pPage->cModifications++)
968 pgmPoolMonitorModifiedInsert(pPool, pPage);
969
970 /*
971 * Clear all the pages. ASSUMES that pvFault is readable.
972 */
973#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
974 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
975 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
976 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
977#else
978 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, DISGetParamSize(pDis, &pDis->param1));
979#endif
980
981 /*
982 * Interpret the instruction.
983 */
984 uint32_t cb;
985 VBOXSTRICTRC rc = EMInterpretInstructionCPU(pVM, pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_ALL, &cb);
986 if (RT_SUCCESS(rc))
987 {
988 pRegFrame->rip += pDis->opsize;
989 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rc))); /* ASSUMES no complicated stuff here. */
990 }
991 else if (rc == VERR_EM_INTERPRETER)
992 {
993 LogFlow(("pgmPoolAccessHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n",
994 pRegFrame->cs, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->opcode));
995 rc = VINF_EM_RAW_EMULATE_INSTR;
996 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
997 }
998
999#if 0 /* experimental code */
1000 if (rc == VINF_SUCCESS)
1001 {
1002 switch (pPage->enmKind)
1003 {
1004 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1005 {
1006 X86PTEPAE GstPte;
1007 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvFault, GCPhysFault, sizeof(GstPte));
1008 AssertRC(rc);
1009
1010 /* Check the new value written by the guest. If present and with a bogus physical address, then
1011 * it's fairly safe to assume the guest is reusing the PT.
1012 */
1013 if (PGMSHWPTEPAE_IS_P(GstPte))
1014 {
1015 RTHCPHYS HCPhys = -1;
1016 int rc = PGMPhysGCPhys2HCPhys(pVM, GstPte.u & X86_PTE_PAE_PG_MASK, &HCPhys);
1017 if (rc != VINF_SUCCESS)
1018 {
1019 *pfReused = true;
1020 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1021 }
1022 }
1023 break;
1024 }
1025 }
1026 }
1027#endif
1028
1029 LogFlow(("pgmPoolAccessHandlerSimple: returns %Rrc cb=%d\n", VBOXSTRICTRC_VAL(rc), cb));
1030 return VBOXSTRICTRC_VAL(rc);
1031}
1032
1033/**
1034 * \#PF Handler callback for PT write accesses.
1035 *
1036 * @returns VBox status code (appropriate for GC return).
1037 * @param pVM VM Handle.
1038 * @param uErrorCode CPU Error code.
1039 * @param pRegFrame Trap register frame.
1040 * NULL on DMA and other non CPU access.
1041 * @param pvFault The fault address (cr2).
1042 * @param GCPhysFault The GC physical address corresponding to pvFault.
1043 * @param pvUser User argument.
1044 */
1045DECLEXPORT(int) pgmPoolAccessHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1046{
1047 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1048 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1049 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
1050 PVMCPU pVCpu = VMMGetCpu(pVM);
1051 unsigned cMaxModifications;
1052 bool fForcedFlush = false;
1053
1054 LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
1055
1056 pgmLock(pVM);
1057 if (PHYS_PAGE_ADDRESS(GCPhysFault) != PHYS_PAGE_ADDRESS(pPage->GCPhys))
1058 {
1059 /* Pool page changed while we were waiting for the lock; ignore. */
1060 Log(("CPU%d: pgmPoolAccessHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys)));
1061 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1062 pgmUnlock(pVM);
1063 return VINF_SUCCESS;
1064 }
1065#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1066 if (pPage->fDirty)
1067 {
1068 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH));
1069 pgmUnlock(pVM);
1070 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */
1071 }
1072#endif
1073
1074#if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */
1075 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1076 {
1077 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
1078 void *pvGst;
1079 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1080 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
1081 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1082 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1083 }
1084#endif
1085
1086 /*
1087 * Disassemble the faulting instruction.
1088 */
1089 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
1090 int rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, NULL);
1091 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1092 {
1093 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc %d\n", rc));
1094 pgmUnlock(pVM);
1095 return rc;
1096 }
1097
1098 Assert(pPage->enmKind != PGMPOOLKIND_FREE);
1099
1100 /*
1101 * We should ALWAYS have the list head as user parameter. This
1102 * is because we use that page to record the changes.
1103 */
1104 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1105
1106#ifdef IN_RING0
1107 /* Maximum nr of modifications depends on the page type. */
1108 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1109 cMaxModifications = 4;
1110 else
1111 cMaxModifications = 24;
1112#else
1113 cMaxModifications = 48;
1114#endif
1115
1116 /*
1117 * Incremental page table updates should weigh more than random ones.
1118 * (Only applies when started from offset 0)
1119 */
1120 pVCpu->pgm.s.cPoolAccessHandler++;
1121 if ( pPage->pvLastAccessHandlerRip >= pRegFrame->rip - 0x40 /* observed loops in Windows 7 x64 */
1122 && pPage->pvLastAccessHandlerRip < pRegFrame->rip + 0x40
1123 && pvFault == (pPage->pvLastAccessHandlerFault + pDis->param1.size)
1124 && pVCpu->pgm.s.cPoolAccessHandler == (pPage->cLastAccessHandlerCount + 1))
1125 {
1126 Log(("Possible page reuse cMods=%d -> %d (locked=%d type=%s)\n", pPage->cModifications, pPage->cModifications * 2, pgmPoolIsPageLocked(&pVM->pgm.s, pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1127 Assert(pPage->cModifications < 32000);
1128 pPage->cModifications = pPage->cModifications * 2;
1129 pPage->pvLastAccessHandlerFault = pvFault;
1130 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1131 if (pPage->cModifications >= cMaxModifications)
1132 {
1133 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit));
1134 fForcedFlush = true;
1135 }
1136 }
1137
1138 if (pPage->cModifications >= cMaxModifications)
1139 Log(("Mod overflow %RGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked(&pVM->pgm.s, pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1140
1141 /*
1142 * Check if it's worth dealing with.
1143 */
1144 bool fReused = false;
1145 bool fNotReusedNotForking = false;
1146 if ( ( pPage->cModifications < cMaxModifications /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */
1147 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage)
1148 )
1149 && !(fReused = pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault))
1150 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1151 {
1152 /*
1153 * Simple instructions, no REP prefix.
1154 */
1155 if (!(pDis->prefix & (PREFIX_REP | PREFIX_REPNE)))
1156 {
1157 rc = pgmPoolAccessHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused);
1158 if (fReused)
1159 goto flushPage;
1160
1161 /* A mov instruction to change the first page table entry will be remembered so we can detect
1162 * full page table changes early on. This will reduce the amount of unnecessary traps we'll take.
1163 */
1164 if ( rc == VINF_SUCCESS
1165 && !pPage->cLocked /* only applies to unlocked pages as we can't free locked ones (e.g. cr3 root). */
1166 && pDis->pCurInstr->opcode == OP_MOV
1167 && (pvFault & PAGE_OFFSET_MASK) == 0)
1168 {
1169 pPage->pvLastAccessHandlerFault = pvFault;
1170 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1171 pPage->pvLastAccessHandlerRip = pRegFrame->rip;
1172 /* Make sure we don't kick out a page too quickly. */
1173 if (pPage->cModifications > 8)
1174 pPage->cModifications = 2;
1175 }
1176 else
1177 if (pPage->pvLastAccessHandlerFault == pvFault)
1178 {
1179 /* ignore the 2nd write to this page table entry. */
1180 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler;
1181 }
1182 else
1183 {
1184 pPage->pvLastAccessHandlerFault = 0;
1185 pPage->pvLastAccessHandlerRip = 0;
1186 }
1187
1188 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1189 pgmUnlock(pVM);
1190 return rc;
1191 }
1192
1193 /*
1194 * Windows is frequently doing small memset() operations (netio test 4k+).
1195 * We have to deal with these or we'll kill the cache and performance.
1196 */
1197 if ( pDis->pCurInstr->opcode == OP_STOSWD
1198 && !pRegFrame->eflags.Bits.u1DF
1199 && pDis->opmode == pDis->mode
1200 && pDis->addrmode == pDis->mode)
1201 {
1202 bool fValidStosd = false;
1203
1204 if ( pDis->mode == CPUMODE_32BIT
1205 && pDis->prefix == PREFIX_REP
1206 && pRegFrame->ecx <= 0x20
1207 && pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1208 && !((uintptr_t)pvFault & 3)
1209 && (pRegFrame->eax == 0 || pRegFrame->eax == 0x80) /* the two values observed. */
1210 )
1211 {
1212 fValidStosd = true;
1213 pRegFrame->rcx &= 0xffffffff; /* paranoia */
1214 }
1215 else
1216 if ( pDis->mode == CPUMODE_64BIT
1217 && pDis->prefix == (PREFIX_REP | PREFIX_REX)
1218 && pRegFrame->rcx <= 0x20
1219 && pRegFrame->rcx * 8 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1220 && !((uintptr_t)pvFault & 7)
1221 && (pRegFrame->rax == 0 || pRegFrame->rax == 0x80) /* the two values observed. */
1222 )
1223 {
1224 fValidStosd = true;
1225 }
1226
1227 if (fValidStosd)
1228 {
1229 rc = pgmPoolAccessHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1230 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a);
1231 pgmUnlock(pVM);
1232 return rc;
1233 }
1234 }
1235
1236 /* REP prefix, don't bother. */
1237 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix));
1238 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",
1239 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->opcode, pDis->prefix));
1240 fNotReusedNotForking = true;
1241 }
1242
1243#if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0)
1244 /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This
1245 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring.
1246 */
1247 if ( pPage->cModifications >= cMaxModifications
1248 && !fForcedFlush
1249 && pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
1250 && ( fNotReusedNotForking
1251 || ( !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)
1252 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1253 )
1254 )
1255 {
1256 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
1257 Assert(pPage->fDirty == false);
1258
1259 /* Flush any monitored duplicates as we will disable write protection. */
1260 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
1261 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
1262 {
1263 PPGMPOOLPAGE pPageHead = pPage;
1264
1265 /* Find the monitor head. */
1266 while (pPageHead->iMonitoredPrev != NIL_PGMPOOL_IDX)
1267 pPageHead = &pPool->aPages[pPageHead->iMonitoredPrev];
1268
1269 while (pPageHead)
1270 {
1271 unsigned idxNext = pPageHead->iMonitoredNext;
1272
1273 if (pPageHead != pPage)
1274 {
1275 STAM_COUNTER_INC(&pPool->StatDirtyPageDupFlush);
1276 Log(("Flush duplicate page idx=%d GCPhys=%RGp type=%s\n", pPageHead->idx, pPageHead->GCPhys, pgmPoolPoolKindToStr(pPageHead->enmKind)));
1277 int rc2 = pgmPoolFlushPage(pPool, pPageHead);
1278 AssertRC(rc2);
1279 }
1280
1281 if (idxNext == NIL_PGMPOOL_IDX)
1282 break;
1283
1284 pPageHead = &pPool->aPages[idxNext];
1285 }
1286 }
1287
1288 /* The flushing above might fail for locked pages, so double check. */
1289 if ( pPage->iMonitoredNext == NIL_PGMPOOL_IDX
1290 && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
1291 {
1292 pgmPoolAddDirtyPage(pVM, pPool, pPage);
1293
1294 /* Temporarily allow write access to the page table again. */
1295 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys, pPage->GCPhys);
1296 if (rc == VINF_SUCCESS)
1297 {
1298 rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
1299 AssertMsg(rc == VINF_SUCCESS
1300 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1301 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1302 || rc == VERR_PAGE_NOT_PRESENT,
1303 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc));
1304
1305 pPage->pvDirtyFault = pvFault;
1306
1307 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1308 pgmUnlock(pVM);
1309 return rc;
1310 }
1311 }
1312 }
1313#endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1314
1315 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushModOverflow));
1316flushPage:
1317 /*
1318 * Not worth it, so flush it.
1319 *
1320 * If we considered it to be reused, don't go back to ring-3
1321 * to emulate failed instructions since we usually cannot
1322 * interpret then. This may be a bit risky, in which case
1323 * the reuse detection must be fixed.
1324 */
1325 rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1326 if ( rc == VINF_EM_RAW_EMULATE_INSTR
1327 && fReused)
1328 {
1329 /* Make sure that the current instruction still has shadow page backing, otherwise we'll end up in a loop. */
1330 if (PGMShwGetPage(pVCpu, pRegFrame->rip, NULL, NULL) == VINF_SUCCESS)
1331 rc = VINF_SUCCESS; /* safe to restart the instruction. */
1332 }
1333 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);
1334 pgmUnlock(pVM);
1335 return rc;
1336}
1337
1338# endif /* !IN_RING3 */
1339
1340# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1341
1342# ifdef VBOX_STRICT
1343/**
1344 * Check references to guest physical memory in a PAE / PAE page table.
1345 *
1346 * @param pPool The pool.
1347 * @param pPage The page.
1348 * @param pShwPT The shadow page table (mapping of the page).
1349 * @param pGstPT The guest page table.
1350 */
1351static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT)
1352{
1353 unsigned cErrors = 0;
1354 int LastRc = -1; /* initialized to shut up gcc */
1355 unsigned LastPTE = ~0U; /* initialized to shut up gcc */
1356 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */
1357 PVM pVM = pPool->CTX_SUFF(pVM);
1358
1359#ifdef VBOX_STRICT
1360 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1361 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1362#endif
1363 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1364 {
1365 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1366 {
1367 RTHCPHYS HCPhys = NIL_RTHCPHYS;
1368 int rc = PGMPhysGCPhys2HCPhys(pVM, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1369 if ( rc != VINF_SUCCESS
1370 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) != HCPhys)
1371 {
1372 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1373 LastPTE = i;
1374 LastRc = rc;
1375 LastHCPhys = HCPhys;
1376 cErrors++;
1377
1378 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
1379 rc = PGMPhysGCPhys2HCPhys(pVM, pPage->GCPhys, &HCPhysPT);
1380 AssertRC(rc);
1381
1382 for (unsigned iPage = 0; iPage < pPool->cCurPages; iPage++)
1383 {
1384 PPGMPOOLPAGE pTempPage = &pPool->aPages[iPage];
1385
1386 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1387 {
1388 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pTempPage);
1389
1390 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
1391 {
1392 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[j])
1393 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[j]) == HCPhysPT)
1394 {
1395 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[j])));
1396 }
1397 }
1398
1399 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPT2);
1400 }
1401 }
1402 }
1403 }
1404 }
1405 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[LastPTE]), LastHCPhys));
1406}
1407# endif /* VBOX_STRICT */
1408
1409/**
1410 * Clear references to guest physical memory in a PAE / PAE page table.
1411 *
1412 * @returns nr of changed PTEs
1413 * @param pPool The pool.
1414 * @param pPage The page.
1415 * @param pShwPT The shadow page table (mapping of the page).
1416 * @param pGstPT The guest page table.
1417 * @param pOldGstPT The old cached guest page table.
1418 * @param fAllowRemoval Bail out as soon as we encounter an invalid PTE
1419 * @param pfFlush Flush reused page table (out)
1420 */
1421DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT,
1422 PCX86PTPAE pOldGstPT, bool fAllowRemoval, bool *pfFlush)
1423{
1424 unsigned cChanged = 0;
1425
1426#ifdef VBOX_STRICT
1427 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1428 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1429#endif
1430 *pfFlush = false;
1431
1432 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1433 {
1434 /* Check the new value written by the guest. If present and with a bogus physical address, then
1435 * it's fairly safe to assume the guest is reusing the PT.
1436 */
1437 if ( fAllowRemoval
1438 && pGstPT->a[i].n.u1Present)
1439 {
1440 if (!PGMPhysIsGCPhysValid(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1441 {
1442 *pfFlush = true;
1443 return ++cChanged;
1444 }
1445 }
1446 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1447 {
1448 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */
1449 if ((pGstPT->a[i].u & X86_PTE_PAE_PG_MASK) == (pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1450 {
1451#ifdef VBOX_STRICT
1452 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1453 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1454 AssertMsg(rc == VINF_SUCCESS && PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) == HCPhys, ("rc=%d guest %RX64 old %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1455#endif
1456 uint64_t uHostAttr = PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1457 bool fHostRW = !!(PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & X86_PTE_RW);
1458 uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1459 bool fGuestRW = !!(pGstPT->a[i].u & X86_PTE_RW);
1460
1461 if ( uHostAttr == uGuestAttr
1462 && fHostRW <= fGuestRW)
1463 continue;
1464 }
1465 cChanged++;
1466 /* Something was changed, so flush it. */
1467 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX64 hint=%RX64\n",
1468 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
1469 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i);
1470 PGMSHWPTEPAE_ATOMIC_SET(pShwPT->a[i], 0);
1471 }
1472 }
1473 return cChanged;
1474}
1475
1476
1477/**
1478 * Flush a dirty page
1479 *
1480 * @param pVM VM Handle.
1481 * @param pPool The pool.
1482 * @param idxSlot Dirty array slot index
1483 * @param fAllowRemoval Allow a reused page table to be removed
1484 */
1485static void pgmPoolFlushDirtyPage(PVM pVM, PPGMPOOL pPool, unsigned idxSlot, bool fAllowRemoval = false)
1486{
1487 PPGMPOOLPAGE pPage;
1488 unsigned idxPage;
1489
1490 Assert(idxSlot < RT_ELEMENTS(pPool->aIdxDirtyPages));
1491 if (pPool->aIdxDirtyPages[idxSlot] == NIL_PGMPOOL_IDX)
1492 return;
1493
1494 idxPage = pPool->aIdxDirtyPages[idxSlot];
1495 AssertRelease(idxPage != NIL_PGMPOOL_IDX);
1496 pPage = &pPool->aPages[idxPage];
1497 Assert(pPage->idx == idxPage);
1498 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1499
1500 AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, idxSlot));
1501 Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
1502
1503 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
1504 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys);
1505 Assert(rc == VINF_SUCCESS);
1506 pPage->fDirty = false;
1507
1508#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
1509 PVMCPU pVCpu = VMMGetCpu(pVM);
1510 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
1511#endif
1512
1513#ifdef VBOX_STRICT
1514 uint64_t fFlags = 0;
1515 RTHCPHYS HCPhys;
1516 rc = PGMShwGetPage(VMMGetCpu(pVM), pPage->pvDirtyFault, &fFlags, &HCPhys);
1517 AssertMsg( ( rc == VINF_SUCCESS
1518 && (!(fFlags & X86_PTE_RW) || HCPhys != pPage->Core.Key))
1519 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1520 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1521 || rc == VERR_PAGE_NOT_PRESENT,
1522 ("PGMShwGetPage -> GCPtr=%RGv rc=%d flags=%RX64\n", pPage->pvDirtyFault, rc, fFlags));
1523#endif
1524
1525 /* Flush those PTEs that have changed. */
1526 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
1527 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
1528 void *pvGst;
1529 rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1530 bool fFlush;
1531 unsigned cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst,
1532 (PCX86PTPAE)&pPool->aDirtyPages[idxSlot][0], fAllowRemoval, &fFlush);
1533 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1534 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1535 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
1536 /* Note: we might want to consider keeping the dirty page active in case there were many changes. */
1537
1538 /* This page is likely to be modified again, so reduce the nr of modifications just a bit here. */
1539 Assert(pPage->cModifications);
1540 if (cChanges < 4)
1541 pPage->cModifications = 1; /* must use > 0 here */
1542 else
1543 pPage->cModifications = RT_MAX(1, pPage->cModifications / 2);
1544
1545 STAM_COUNTER_INC(&pPool->StatResetDirtyPages);
1546 if (pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages))
1547 pPool->idxFreeDirtyPage = idxSlot;
1548
1549 pPool->cDirtyPages--;
1550 pPool->aIdxDirtyPages[idxSlot] = NIL_PGMPOOL_IDX;
1551 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1552 if (fFlush)
1553 {
1554 Assert(fAllowRemoval);
1555 Log(("Flush reused page table!\n"));
1556 pgmPoolFlushPage(pPool, pPage);
1557 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1558 }
1559 else
1560 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
1561
1562#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
1563 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
1564#endif
1565}
1566
1567# ifndef IN_RING3
1568/**
1569 * Add a new dirty page
1570 *
1571 * @param pVM VM Handle.
1572 * @param pPool The pool.
1573 * @param pPage The page.
1574 */
1575void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1576{
1577 unsigned idxFree;
1578
1579 Assert(PGMIsLocked(pVM));
1580 AssertCompile(RT_ELEMENTS(pPool->aIdxDirtyPages) == 8 || RT_ELEMENTS(pPool->aIdxDirtyPages) == 16);
1581 Assert(!pPage->fDirty);
1582
1583 idxFree = pPool->idxFreeDirtyPage;
1584 Assert(idxFree < RT_ELEMENTS(pPool->aIdxDirtyPages));
1585 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1586
1587 if (pPool->cDirtyPages >= RT_ELEMENTS(pPool->aIdxDirtyPages))
1588 {
1589 STAM_COUNTER_INC(&pPool->StatDirtyPageOverFlowFlush);
1590 pgmPoolFlushDirtyPage(pVM, pPool, idxFree, true /* allow removal of reused page tables*/);
1591 }
1592 Assert(pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages));
1593 AssertMsg(pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX, ("idxFree=%d cDirtyPages=%d\n", idxFree, pPool->cDirtyPages));
1594
1595 Log(("Add dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree));
1596
1597 /*
1598 * Make a copy of the guest page table as we require valid GCPhys addresses
1599 * when removing references to physical pages.
1600 * (The HCPhys linear lookup is *extremely* expensive!)
1601 */
1602 void *pvGst;
1603 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1604 memcpy(&pPool->aDirtyPages[idxFree][0], pvGst, PAGE_SIZE);
1605#ifdef VBOX_STRICT
1606 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
1607 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
1608 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1609#endif
1610 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1611
1612 STAM_COUNTER_INC(&pPool->StatDirtyPage);
1613 pPage->fDirty = true;
1614 pPage->idxDirty = idxFree;
1615 pPool->aIdxDirtyPages[idxFree] = pPage->idx;
1616 pPool->cDirtyPages++;
1617
1618 pPool->idxFreeDirtyPage = (pPool->idxFreeDirtyPage + 1) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
1619 if ( pPool->cDirtyPages < RT_ELEMENTS(pPool->aIdxDirtyPages)
1620 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1621 {
1622 unsigned i;
1623 for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1624 {
1625 idxFree = (pPool->idxFreeDirtyPage + i) & (RT_ELEMENTS(pPool->aIdxDirtyPages) - 1);
1626 if (pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX)
1627 {
1628 pPool->idxFreeDirtyPage = idxFree;
1629 break;
1630 }
1631 }
1632 Assert(i != RT_ELEMENTS(pPool->aIdxDirtyPages));
1633 }
1634
1635 Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages) || pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX);
1636 return;
1637}
1638# endif /* !IN_RING3 */
1639
1640/**
1641 * Check if the specified page is dirty (not write monitored)
1642 *
1643 * @return dirty or not
1644 * @param pVM VM Handle.
1645 * @param GCPhys Guest physical address
1646 */
1647bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys)
1648{
1649 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1650 Assert(PGMIsLocked(pVM));
1651 if (!pPool->cDirtyPages)
1652 return false;
1653
1654 GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
1655
1656 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1657 {
1658 if (pPool->aIdxDirtyPages[i] != NIL_PGMPOOL_IDX)
1659 {
1660 PPGMPOOLPAGE pPage;
1661 unsigned idxPage = pPool->aIdxDirtyPages[i];
1662
1663 pPage = &pPool->aPages[idxPage];
1664 if (pPage->GCPhys == GCPhys)
1665 return true;
1666 }
1667 }
1668 return false;
1669}
1670
1671/**
1672 * Reset all dirty pages by reinstating page monitoring.
1673 *
1674 * @param pVM VM Handle.
1675 */
1676void pgmPoolResetDirtyPages(PVM pVM)
1677{
1678 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1679 Assert(PGMIsLocked(pVM));
1680 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1681
1682 if (!pPool->cDirtyPages)
1683 return;
1684
1685 Log(("pgmPoolResetDirtyPages\n"));
1686 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1687 pgmPoolFlushDirtyPage(pVM, pPool, i, true /* allow removal of reused page tables*/);
1688
1689 pPool->idxFreeDirtyPage = 0;
1690 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aIdxDirtyPages)
1691 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1692 {
1693 unsigned i;
1694 for (i = 1; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1695 {
1696 if (pPool->aIdxDirtyPages[i] == NIL_PGMPOOL_IDX)
1697 {
1698 pPool->idxFreeDirtyPage = i;
1699 break;
1700 }
1701 }
1702 AssertMsg(i != RT_ELEMENTS(pPool->aIdxDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
1703 }
1704
1705 Assert(pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] == NIL_PGMPOOL_IDX || pPool->cDirtyPages == RT_ELEMENTS(pPool->aIdxDirtyPages));
1706 return;
1707}
1708
1709/**
1710 * Reset all dirty pages by reinstating page monitoring.
1711 *
1712 * @param pVM VM Handle.
1713 * @param GCPhysPT Physical address of the page table
1714 */
1715void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT)
1716{
1717 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1718 Assert(PGMIsLocked(pVM));
1719 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aIdxDirtyPages));
1720 unsigned idxDirtyPage = RT_ELEMENTS(pPool->aIdxDirtyPages);
1721
1722 if (!pPool->cDirtyPages)
1723 return;
1724
1725 GCPhysPT = GCPhysPT & ~(RTGCPHYS)(PAGE_SIZE - 1);
1726
1727 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1728 {
1729 if (pPool->aIdxDirtyPages[i] != NIL_PGMPOOL_IDX)
1730 {
1731 unsigned idxPage = pPool->aIdxDirtyPages[i];
1732
1733 PPGMPOOLPAGE pPage = &pPool->aPages[idxPage];
1734 if (pPage->GCPhys == GCPhysPT)
1735 {
1736 idxDirtyPage = i;
1737 break;
1738 }
1739 }
1740 }
1741
1742 if (idxDirtyPage != RT_ELEMENTS(pPool->aIdxDirtyPages))
1743 {
1744 pgmPoolFlushDirtyPage(pVM, pPool, idxDirtyPage, true /* allow removal of reused page tables*/);
1745 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aIdxDirtyPages)
1746 && pPool->aIdxDirtyPages[pPool->idxFreeDirtyPage] != NIL_PGMPOOL_IDX)
1747 {
1748 unsigned i;
1749 for (i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
1750 {
1751 if (pPool->aIdxDirtyPages[i] == NIL_PGMPOOL_IDX)
1752 {
1753 pPool->idxFreeDirtyPage = i;
1754 break;
1755 }
1756 }
1757 AssertMsg(i != RT_ELEMENTS(pPool->aIdxDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
1758 }
1759 }
1760}
1761
1762# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1763
1764/**
1765 * Inserts a page into the GCPhys hash table.
1766 *
1767 * @param pPool The pool.
1768 * @param pPage The page.
1769 */
1770DECLINLINE(void) pgmPoolHashInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1771{
1772 Log3(("pgmPoolHashInsert: %RGp\n", pPage->GCPhys));
1773 Assert(pPage->GCPhys != NIL_RTGCPHYS); Assert(pPage->iNext == NIL_PGMPOOL_IDX);
1774 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
1775 pPage->iNext = pPool->aiHash[iHash];
1776 pPool->aiHash[iHash] = pPage->idx;
1777}
1778
1779
1780/**
1781 * Removes a page from the GCPhys hash table.
1782 *
1783 * @param pPool The pool.
1784 * @param pPage The page.
1785 */
1786DECLINLINE(void) pgmPoolHashRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1787{
1788 Log3(("pgmPoolHashRemove: %RGp\n", pPage->GCPhys));
1789 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
1790 if (pPool->aiHash[iHash] == pPage->idx)
1791 pPool->aiHash[iHash] = pPage->iNext;
1792 else
1793 {
1794 uint16_t iPrev = pPool->aiHash[iHash];
1795 for (;;)
1796 {
1797 const int16_t i = pPool->aPages[iPrev].iNext;
1798 if (i == pPage->idx)
1799 {
1800 pPool->aPages[iPrev].iNext = pPage->iNext;
1801 break;
1802 }
1803 if (i == NIL_PGMPOOL_IDX)
1804 {
1805 AssertReleaseMsgFailed(("GCPhys=%RGp idx=%#x\n", pPage->GCPhys, pPage->idx));
1806 break;
1807 }
1808 iPrev = i;
1809 }
1810 }
1811 pPage->iNext = NIL_PGMPOOL_IDX;
1812}
1813
1814
1815/**
1816 * Frees up one cache page.
1817 *
1818 * @returns VBox status code.
1819 * @retval VINF_SUCCESS on success.
1820 * @param pPool The pool.
1821 * @param iUser The user index.
1822 */
1823static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser)
1824{
1825#ifndef IN_RC
1826 const PVM pVM = pPool->CTX_SUFF(pVM);
1827#endif
1828 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */
1829 STAM_COUNTER_INC(&pPool->StatCacheFreeUpOne);
1830
1831 /*
1832 * Select one page from the tail of the age list.
1833 */
1834 PPGMPOOLPAGE pPage;
1835 for (unsigned iLoop = 0; ; iLoop++)
1836 {
1837 uint16_t iToFree = pPool->iAgeTail;
1838 if (iToFree == iUser)
1839 iToFree = pPool->aPages[iToFree].iAgePrev;
1840/* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls.
1841 if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX)
1842 {
1843 uint16_t i = pPool->aPages[iToFree].iAgePrev;
1844 for (unsigned j = 0; j < 10 && i != NIL_PGMPOOL_USER_INDEX; j++, i = pPool->aPages[i].iAgePrev)
1845 {
1846 if (pPool->aPages[iToFree].iUserHead == NIL_PGMPOOL_USER_INDEX)
1847 continue;
1848 iToFree = i;
1849 break;
1850 }
1851 }
1852*/
1853 Assert(iToFree != iUser);
1854 AssertRelease(iToFree != NIL_PGMPOOL_IDX);
1855 pPage = &pPool->aPages[iToFree];
1856
1857 /*
1858 * Reject any attempts at flushing the currently active shadow CR3 mapping.
1859 * Call pgmPoolCacheUsed to move the page to the head of the age list.
1860 */
1861 if (!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage))
1862 break;
1863 LogFlow(("pgmPoolCacheFreeOne: refuse CR3 mapping\n"));
1864 pgmPoolCacheUsed(pPool, pPage);
1865 AssertLogRelReturn(iLoop < 8192, VERR_INTERNAL_ERROR);
1866 }
1867
1868 /*
1869 * Found a usable page, flush it and return.
1870 */
1871 int rc = pgmPoolFlushPage(pPool, pPage);
1872 /* This flush was initiated by us and not the guest, so explicitly flush the TLB. */
1873 /* todo: find out why this is necessary; pgmPoolFlushPage should trigger a flush if one is really needed. */
1874 if (rc == VINF_SUCCESS)
1875 PGM_INVL_ALL_VCPU_TLBS(pVM);
1876 return rc;
1877}
1878
1879
1880/**
1881 * Checks if a kind mismatch is really a page being reused
1882 * or if it's just normal remappings.
1883 *
1884 * @returns true if reused and the cached page (enmKind1) should be flushed
1885 * @returns false if not reused.
1886 * @param enmKind1 The kind of the cached page.
1887 * @param enmKind2 The kind of the requested page.
1888 */
1889static bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2)
1890{
1891 switch (enmKind1)
1892 {
1893 /*
1894 * Never reuse them. There is no remapping in non-paging mode.
1895 */
1896 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1897 case PGMPOOLKIND_32BIT_PD_PHYS:
1898 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1899 case PGMPOOLKIND_PAE_PD_PHYS:
1900 case PGMPOOLKIND_PAE_PDPT_PHYS:
1901 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1902 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1903 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1904 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1905 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1906 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: /* never reuse them for other types */
1907 return false;
1908
1909 /*
1910 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
1911 */
1912 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
1913 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
1914 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
1915 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
1916 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
1917 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
1918 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
1919 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
1920 case PGMPOOLKIND_32BIT_PD:
1921 case PGMPOOLKIND_PAE_PDPT:
1922 switch (enmKind2)
1923 {
1924 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
1925 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1926 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
1927 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
1928 case PGMPOOLKIND_64BIT_PML4:
1929 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
1930 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1931 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1932 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1933 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1934 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1935 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1936 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1937 return true;
1938 default:
1939 return false;
1940 }
1941
1942 /*
1943 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
1944 */
1945 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
1946 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1947 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
1948 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
1949 case PGMPOOLKIND_64BIT_PML4:
1950 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
1951 switch (enmKind2)
1952 {
1953 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
1954 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
1955 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
1956 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
1957 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
1958 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
1959 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
1960 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
1961 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
1962 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
1963 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
1964 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
1965 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
1966 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
1967 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
1968 return true;
1969 default:
1970 return false;
1971 }
1972
1973 /*
1974 * These cannot be flushed, and it's common to reuse the PDs as PTs.
1975 */
1976 case PGMPOOLKIND_ROOT_NESTED:
1977 return false;
1978
1979 default:
1980 AssertFatalMsgFailed(("enmKind1=%d\n", enmKind1));
1981 }
1982}
1983
1984
1985/**
1986 * Attempts to satisfy a pgmPoolAlloc request from the cache.
1987 *
1988 * @returns VBox status code.
1989 * @retval VINF_PGM_CACHED_PAGE on success.
1990 * @retval VERR_FILE_NOT_FOUND if not found.
1991 * @param pPool The pool.
1992 * @param GCPhys The GC physical address of the page we're gonna shadow.
1993 * @param enmKind The kind of mapping.
1994 * @param enmAccess Access type for the mapping (only relevant for big pages)
1995 * @param iUser The shadow page pool index of the user table.
1996 * @param iUserTable The index into the user table (shadowed).
1997 * @param ppPage Where to store the pointer to the page.
1998 */
1999static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
2000{
2001#ifndef IN_RC
2002 const PVM pVM = pPool->CTX_SUFF(pVM);
2003#endif
2004 /*
2005 * Look up the GCPhys in the hash.
2006 */
2007 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2008 Log3(("pgmPoolCacheAlloc: %RGp kind %s iUser=%x iUserTable=%x SLOT=%d\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable, i));
2009 if (i != NIL_PGMPOOL_IDX)
2010 {
2011 do
2012 {
2013 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2014 Log4(("pgmPoolCacheAlloc: slot %d found page %RGp\n", i, pPage->GCPhys));
2015 if (pPage->GCPhys == GCPhys)
2016 {
2017 if ( (PGMPOOLKIND)pPage->enmKind == enmKind
2018 && (PGMPOOLACCESS)pPage->enmAccess == enmAccess)
2019 {
2020 /* Put it at the start of the use list to make sure pgmPoolTrackAddUser
2021 * doesn't flush it in case there are no more free use records.
2022 */
2023 pgmPoolCacheUsed(pPool, pPage);
2024
2025 int rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
2026 if (RT_SUCCESS(rc))
2027 {
2028 Assert((PGMPOOLKIND)pPage->enmKind == enmKind);
2029 *ppPage = pPage;
2030 if (pPage->cModifications)
2031 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
2032 STAM_COUNTER_INC(&pPool->StatCacheHits);
2033 return VINF_PGM_CACHED_PAGE;
2034 }
2035 return rc;
2036 }
2037
2038 if ((PGMPOOLKIND)pPage->enmKind != enmKind)
2039 {
2040 /*
2041 * The kind is different. In some cases we should now flush the page
2042 * as it has been reused, but in most cases this is normal remapping
2043 * of PDs as PT or big pages using the GCPhys field in a slightly
2044 * different way than the other kinds.
2045 */
2046 if (pgmPoolCacheReusedByKind((PGMPOOLKIND)pPage->enmKind, enmKind))
2047 {
2048 STAM_COUNTER_INC(&pPool->StatCacheKindMismatches);
2049 pgmPoolFlushPage(pPool, pPage);
2050 break;
2051 }
2052 }
2053 }
2054
2055 /* next */
2056 i = pPage->iNext;
2057 } while (i != NIL_PGMPOOL_IDX);
2058 }
2059
2060 Log3(("pgmPoolCacheAlloc: Missed GCPhys=%RGp enmKind=%s\n", GCPhys, pgmPoolPoolKindToStr(enmKind)));
2061 STAM_COUNTER_INC(&pPool->StatCacheMisses);
2062 return VERR_FILE_NOT_FOUND;
2063}
2064
2065
2066/**
2067 * Inserts a page into the cache.
2068 *
2069 * @param pPool The pool.
2070 * @param pPage The cached page.
2071 * @param fCanBeCached Set if the page is fit for caching from the caller's point of view.
2072 */
2073static void pgmPoolCacheInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCanBeCached)
2074{
2075 /*
2076 * Insert into the GCPhys hash if the page is fit for that.
2077 */
2078 Assert(!pPage->fCached);
2079 if (fCanBeCached)
2080 {
2081 pPage->fCached = true;
2082 pgmPoolHashInsert(pPool, pPage);
2083 Log3(("pgmPoolCacheInsert: Caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2084 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2085 STAM_COUNTER_INC(&pPool->StatCacheCacheable);
2086 }
2087 else
2088 {
2089 Log3(("pgmPoolCacheInsert: Not caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2090 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2091 STAM_COUNTER_INC(&pPool->StatCacheUncacheable);
2092 }
2093
2094 /*
2095 * Insert at the head of the age list.
2096 */
2097 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2098 pPage->iAgeNext = pPool->iAgeHead;
2099 if (pPool->iAgeHead != NIL_PGMPOOL_IDX)
2100 pPool->aPages[pPool->iAgeHead].iAgePrev = pPage->idx;
2101 else
2102 pPool->iAgeTail = pPage->idx;
2103 pPool->iAgeHead = pPage->idx;
2104}
2105
2106
2107/**
2108 * Flushes a cached page.
2109 *
2110 * @param pPool The pool.
2111 * @param pPage The cached page.
2112 */
2113static void pgmPoolCacheFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2114{
2115 Log3(("pgmPoolCacheFlushPage: %RGp\n", pPage->GCPhys));
2116
2117 /*
2118 * Remove the page from the hash.
2119 */
2120 if (pPage->fCached)
2121 {
2122 pPage->fCached = false;
2123 pgmPoolHashRemove(pPool, pPage);
2124 }
2125 else
2126 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
2127
2128 /*
2129 * Remove it from the age list.
2130 */
2131 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2132 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2133 else
2134 pPool->iAgeTail = pPage->iAgePrev;
2135 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2136 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2137 else
2138 pPool->iAgeHead = pPage->iAgeNext;
2139 pPage->iAgeNext = NIL_PGMPOOL_IDX;
2140 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2141}
2142
2143
2144/**
2145 * Looks for pages sharing the monitor.
2146 *
2147 * @returns Pointer to the head page.
2148 * @returns NULL if not found.
2149 * @param pPool The Pool
2150 * @param pNewPage The page which is going to be monitored.
2151 */
2152static PPGMPOOLPAGE pgmPoolMonitorGetPageByGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pNewPage)
2153{
2154 /*
2155 * Look up the GCPhys in the hash.
2156 */
2157 RTGCPHYS GCPhys = pNewPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
2158 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2159 if (i == NIL_PGMPOOL_IDX)
2160 return NULL;
2161 do
2162 {
2163 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2164 if ( pPage->GCPhys - GCPhys < PAGE_SIZE
2165 && pPage != pNewPage)
2166 {
2167 switch (pPage->enmKind)
2168 {
2169 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2170 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2171 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2172 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2173 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2174 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2175 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2176 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2177 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2178 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2179 case PGMPOOLKIND_64BIT_PML4:
2180 case PGMPOOLKIND_32BIT_PD:
2181 case PGMPOOLKIND_PAE_PDPT:
2182 {
2183 /* find the head */
2184 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2185 {
2186 Assert(pPage->iMonitoredPrev != pPage->idx);
2187 pPage = &pPool->aPages[pPage->iMonitoredPrev];
2188 }
2189 return pPage;
2190 }
2191
2192 /* ignore, no monitoring. */
2193 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2194 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2195 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2196 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2197 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2198 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2199 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2200 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2201 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2202 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2203 case PGMPOOLKIND_ROOT_NESTED:
2204 case PGMPOOLKIND_PAE_PD_PHYS:
2205 case PGMPOOLKIND_PAE_PDPT_PHYS:
2206 case PGMPOOLKIND_32BIT_PD_PHYS:
2207 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2208 break;
2209 default:
2210 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
2211 }
2212 }
2213
2214 /* next */
2215 i = pPage->iNext;
2216 } while (i != NIL_PGMPOOL_IDX);
2217 return NULL;
2218}
2219
2220
2221/**
2222 * Enabled write monitoring of a guest page.
2223 *
2224 * @returns VBox status code.
2225 * @retval VINF_SUCCESS on success.
2226 * @param pPool The pool.
2227 * @param pPage The cached page.
2228 */
2229static int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2230{
2231 LogFlow(("pgmPoolMonitorInsert %RGp\n", pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1)));
2232
2233 /*
2234 * Filter out the relevant kinds.
2235 */
2236 switch (pPage->enmKind)
2237 {
2238 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2239 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2240 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2241 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2242 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2243 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2244 case PGMPOOLKIND_64BIT_PML4:
2245 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2246 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2247 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2248 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2249 case PGMPOOLKIND_32BIT_PD:
2250 case PGMPOOLKIND_PAE_PDPT:
2251 break;
2252
2253 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2254 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2255 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2256 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2257 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2258 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2259 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2260 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2261 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2262 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2263 case PGMPOOLKIND_ROOT_NESTED:
2264 /* Nothing to monitor here. */
2265 return VINF_SUCCESS;
2266
2267 case PGMPOOLKIND_32BIT_PD_PHYS:
2268 case PGMPOOLKIND_PAE_PDPT_PHYS:
2269 case PGMPOOLKIND_PAE_PD_PHYS:
2270 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2271 /* Nothing to monitor here. */
2272 return VINF_SUCCESS;
2273 default:
2274 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2275 }
2276
2277 /*
2278 * Install handler.
2279 */
2280 int rc;
2281 PPGMPOOLPAGE pPageHead = pgmPoolMonitorGetPageByGCPhys(pPool, pPage);
2282 if (pPageHead)
2283 {
2284 Assert(pPageHead != pPage); Assert(pPageHead->iMonitoredNext != pPage->idx);
2285 Assert(pPageHead->iMonitoredPrev != pPage->idx);
2286
2287#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2288 if (pPageHead->fDirty)
2289 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPageHead->idxDirty, false /* do not remove */);
2290#endif
2291
2292 pPage->iMonitoredPrev = pPageHead->idx;
2293 pPage->iMonitoredNext = pPageHead->iMonitoredNext;
2294 if (pPageHead->iMonitoredNext != NIL_PGMPOOL_IDX)
2295 pPool->aPages[pPageHead->iMonitoredNext].iMonitoredPrev = pPage->idx;
2296 pPageHead->iMonitoredNext = pPage->idx;
2297 rc = VINF_SUCCESS;
2298 }
2299 else
2300 {
2301 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
2302 PVM pVM = pPool->CTX_SUFF(pVM);
2303 const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
2304 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE,
2305 GCPhysPage, GCPhysPage + (PAGE_SIZE - 1),
2306 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
2307 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
2308 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
2309 pPool->pszAccessHandler);
2310 /** @todo we should probably deal with out-of-memory conditions here, but for now increasing
2311 * the heap size should suffice. */
2312 AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc));
2313 PVMCPU pVCpu = VMMGetCpu(pVM);
2314 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)));
2315 }
2316 pPage->fMonitored = true;
2317 return rc;
2318}
2319
2320
2321/**
2322 * Disables write monitoring of a guest page.
2323 *
2324 * @returns VBox status code.
2325 * @retval VINF_SUCCESS on success.
2326 * @param pPool The pool.
2327 * @param pPage The cached page.
2328 */
2329static int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2330{
2331 /*
2332 * Filter out the relevant kinds.
2333 */
2334 switch (pPage->enmKind)
2335 {
2336 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2337 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2338 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2339 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2340 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2341 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2342 case PGMPOOLKIND_64BIT_PML4:
2343 case PGMPOOLKIND_32BIT_PD:
2344 case PGMPOOLKIND_PAE_PDPT:
2345 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2346 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2347 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2348 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2349 break;
2350
2351 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2352 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2353 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2354 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2355 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2356 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2357 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2358 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2359 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2360 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2361 case PGMPOOLKIND_ROOT_NESTED:
2362 case PGMPOOLKIND_PAE_PD_PHYS:
2363 case PGMPOOLKIND_PAE_PDPT_PHYS:
2364 case PGMPOOLKIND_32BIT_PD_PHYS:
2365 /* Nothing to monitor here. */
2366 Assert(!pPage->fMonitored);
2367 return VINF_SUCCESS;
2368
2369 default:
2370 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2371 }
2372 Assert(pPage->fMonitored);
2373
2374 /*
2375 * Remove the page from the monitored list or uninstall it if last.
2376 */
2377 const PVM pVM = pPool->CTX_SUFF(pVM);
2378 int rc;
2379 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
2380 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2381 {
2382 if (pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
2383 {
2384 PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext];
2385 pNewHead->iMonitoredPrev = NIL_PGMPOOL_IDX;
2386 rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
2387 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pNewHead),
2388 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pNewHead),
2389 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pNewHead),
2390 pPool->pszAccessHandler);
2391 AssertFatalRCSuccess(rc);
2392 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2393 }
2394 else
2395 {
2396 pPool->aPages[pPage->iMonitoredPrev].iMonitoredNext = pPage->iMonitoredNext;
2397 if (pPage->iMonitoredNext != NIL_PGMPOOL_IDX)
2398 {
2399 pPool->aPages[pPage->iMonitoredNext].iMonitoredPrev = pPage->iMonitoredPrev;
2400 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2401 }
2402 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
2403 rc = VINF_SUCCESS;
2404 }
2405 }
2406 else
2407 {
2408 rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1));
2409 AssertFatalRC(rc);
2410 PVMCPU pVCpu = VMMGetCpu(pVM);
2411 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),
2412 ("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions));
2413 }
2414 pPage->fMonitored = false;
2415
2416 /*
2417 * Remove it from the list of modified pages (if in it).
2418 */
2419 pgmPoolMonitorModifiedRemove(pPool, pPage);
2420
2421 return rc;
2422}
2423
2424
2425/**
2426 * Inserts the page into the list of modified pages.
2427 *
2428 * @param pPool The pool.
2429 * @param pPage The page.
2430 */
2431void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2432{
2433 Log3(("pgmPoolMonitorModifiedInsert: idx=%d\n", pPage->idx));
2434 AssertMsg( pPage->iModifiedNext == NIL_PGMPOOL_IDX
2435 && pPage->iModifiedPrev == NIL_PGMPOOL_IDX
2436 && pPool->iModifiedHead != pPage->idx,
2437 ("Next=%d Prev=%d idx=%d cModifications=%d Head=%d cModifiedPages=%d\n",
2438 pPage->iModifiedNext, pPage->iModifiedPrev, pPage->idx, pPage->cModifications,
2439 pPool->iModifiedHead, pPool->cModifiedPages));
2440
2441 pPage->iModifiedNext = pPool->iModifiedHead;
2442 if (pPool->iModifiedHead != NIL_PGMPOOL_IDX)
2443 pPool->aPages[pPool->iModifiedHead].iModifiedPrev = pPage->idx;
2444 pPool->iModifiedHead = pPage->idx;
2445 pPool->cModifiedPages++;
2446#ifdef VBOX_WITH_STATISTICS
2447 if (pPool->cModifiedPages > pPool->cModifiedPagesHigh)
2448 pPool->cModifiedPagesHigh = pPool->cModifiedPages;
2449#endif
2450}
2451
2452
2453/**
2454 * Removes the page from the list of modified pages and resets the
2455 * moficiation counter.
2456 *
2457 * @param pPool The pool.
2458 * @param pPage The page which is believed to be in the list of modified pages.
2459 */
2460static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2461{
2462 Log3(("pgmPoolMonitorModifiedRemove: idx=%d cModifications=%d\n", pPage->idx, pPage->cModifications));
2463 if (pPool->iModifiedHead == pPage->idx)
2464 {
2465 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2466 pPool->iModifiedHead = pPage->iModifiedNext;
2467 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2468 {
2469 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = NIL_PGMPOOL_IDX;
2470 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2471 }
2472 pPool->cModifiedPages--;
2473 }
2474 else if (pPage->iModifiedPrev != NIL_PGMPOOL_IDX)
2475 {
2476 pPool->aPages[pPage->iModifiedPrev].iModifiedNext = pPage->iModifiedNext;
2477 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2478 {
2479 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = pPage->iModifiedPrev;
2480 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2481 }
2482 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2483 pPool->cModifiedPages--;
2484 }
2485 else
2486 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2487 pPage->cModifications = 0;
2488}
2489
2490
2491/**
2492 * Zaps the list of modified pages, resetting their modification counters in the process.
2493 *
2494 * @param pVM The VM handle.
2495 */
2496static void pgmPoolMonitorModifiedClearAll(PVM pVM)
2497{
2498 pgmLock(pVM);
2499 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2500 LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages));
2501
2502 unsigned cPages = 0; NOREF(cPages);
2503
2504#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2505 pgmPoolResetDirtyPages(pVM);
2506#endif
2507
2508 uint16_t idx = pPool->iModifiedHead;
2509 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
2510 while (idx != NIL_PGMPOOL_IDX)
2511 {
2512 PPGMPOOLPAGE pPage = &pPool->aPages[idx];
2513 idx = pPage->iModifiedNext;
2514 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2515 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2516 pPage->cModifications = 0;
2517 Assert(++cPages);
2518 }
2519 AssertMsg(cPages == pPool->cModifiedPages, ("%d != %d\n", cPages, pPool->cModifiedPages));
2520 pPool->cModifiedPages = 0;
2521 pgmUnlock(pVM);
2522}
2523
2524
2525/**
2526 * Handle SyncCR3 pool tasks
2527 *
2528 * @returns VBox status code.
2529 * @retval VINF_SUCCESS if successfully added.
2530 * @retval VINF_PGM_SYNC_CR3 is it needs to be deferred to ring 3 (GC only)
2531 * @param pVCpu The VMCPU handle.
2532 * @remark Should only be used when monitoring is available, thus placed in
2533 * the PGMPOOL_WITH_MONITORING #ifdef.
2534 */
2535int pgmPoolSyncCR3(PVMCPU pVCpu)
2536{
2537 PVM pVM = pVCpu->CTX_SUFF(pVM);
2538 LogFlow(("pgmPoolSyncCR3 fSyncFlags=%x\n", pVCpu->pgm.s.fSyncFlags));
2539
2540 /*
2541 * When monitoring shadowed pages, we reset the modification counters on CR3 sync.
2542 * Occasionally we will have to clear all the shadow page tables because we wanted
2543 * to monitor a page which was mapped by too many shadowed page tables. This operation
2544 * sometimes refered to as a 'lightweight flush'.
2545 */
2546# ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */
2547 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2548 pgmR3PoolClearAll(pVM, false /*fFlushRemTlb*/);
2549# else /* !IN_RING3 */
2550 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2551 {
2552 Log(("SyncCR3: PGM_SYNC_CLEAR_PGM_POOL is set -> VINF_PGM_SYNC_CR3\n"));
2553 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
2554
2555 /* Make sure all other VCPUs return to ring 3. */
2556 if (pVM->cCpus > 1)
2557 {
2558 VM_FF_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
2559 PGM_INVL_ALL_VCPU_TLBS(pVM);
2560 }
2561 return VINF_PGM_SYNC_CR3;
2562 }
2563# endif /* !IN_RING3 */
2564 else
2565 {
2566 pgmPoolMonitorModifiedClearAll(pVM);
2567
2568 /* pgmPoolMonitorModifiedClearAll can cause a pgm pool flush (dirty page clearing), so make sure we handle this! */
2569 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2570 {
2571 Log(("pgmPoolMonitorModifiedClearAll caused a pgm flush -> call pgmPoolSyncCR3 again!\n"));
2572 return pgmPoolSyncCR3(pVCpu);
2573 }
2574 }
2575 return VINF_SUCCESS;
2576}
2577
2578
2579/**
2580 * Frees up at least one user entry.
2581 *
2582 * @returns VBox status code.
2583 * @retval VINF_SUCCESS if successfully added.
2584 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2585 * @param pPool The pool.
2586 * @param iUser The user index.
2587 */
2588static int pgmPoolTrackFreeOneUser(PPGMPOOL pPool, uint16_t iUser)
2589{
2590 STAM_COUNTER_INC(&pPool->StatTrackFreeUpOneUser);
2591 /*
2592 * Just free cached pages in a braindead fashion.
2593 */
2594 /** @todo walk the age list backwards and free the first with usage. */
2595 int rc = VINF_SUCCESS;
2596 do
2597 {
2598 int rc2 = pgmPoolCacheFreeOne(pPool, iUser);
2599 if (RT_FAILURE(rc2) && rc == VINF_SUCCESS)
2600 rc = rc2;
2601 } while (pPool->iUserFreeHead == NIL_PGMPOOL_USER_INDEX);
2602 return rc;
2603}
2604
2605
2606/**
2607 * Inserts a page into the cache.
2608 *
2609 * This will create user node for the page, insert it into the GCPhys
2610 * hash, and insert it into the age list.
2611 *
2612 * @returns VBox status code.
2613 * @retval VINF_SUCCESS if successfully added.
2614 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2615 * @param pPool The pool.
2616 * @param pPage The cached page.
2617 * @param GCPhys The GC physical address of the page we're gonna shadow.
2618 * @param iUser The user index.
2619 * @param iUserTable The user table index.
2620 */
2621DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhys, uint16_t iUser, uint32_t iUserTable)
2622{
2623 int rc = VINF_SUCCESS;
2624 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2625
2626 LogFlow(("pgmPoolTrackInsert GCPhys=%RGp iUser %x iUserTable %x\n", GCPhys, iUser, iUserTable));
2627
2628#ifdef VBOX_STRICT
2629 /*
2630 * Check that the entry doesn't already exists.
2631 */
2632 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
2633 {
2634 uint16_t i = pPage->iUserHead;
2635 do
2636 {
2637 Assert(i < pPool->cMaxUsers);
2638 AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
2639 i = paUsers[i].iNext;
2640 } while (i != NIL_PGMPOOL_USER_INDEX);
2641 }
2642#endif
2643
2644 /*
2645 * Find free a user node.
2646 */
2647 uint16_t i = pPool->iUserFreeHead;
2648 if (i == NIL_PGMPOOL_USER_INDEX)
2649 {
2650 rc = pgmPoolTrackFreeOneUser(pPool, iUser);
2651 if (RT_FAILURE(rc))
2652 return rc;
2653 i = pPool->iUserFreeHead;
2654 }
2655
2656 /*
2657 * Unlink the user node from the free list,
2658 * initialize and insert it into the user list.
2659 */
2660 pPool->iUserFreeHead = paUsers[i].iNext;
2661 paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX;
2662 paUsers[i].iUser = iUser;
2663 paUsers[i].iUserTable = iUserTable;
2664 pPage->iUserHead = i;
2665
2666 /*
2667 * Insert into cache and enable monitoring of the guest page if enabled.
2668 *
2669 * Until we implement caching of all levels, including the CR3 one, we'll
2670 * have to make sure we don't try monitor & cache any recursive reuse of
2671 * a monitored CR3 page. Because all windows versions are doing this we'll
2672 * have to be able to do combined access monitoring, CR3 + PT and
2673 * PD + PT (guest PAE).
2674 *
2675 * Update:
2676 * We're now cooperating with the CR3 monitor if an uncachable page is found.
2677 */
2678 const bool fCanBeMonitored = true;
2679 pgmPoolCacheInsert(pPool, pPage, fCanBeMonitored); /* This can be expanded. */
2680 if (fCanBeMonitored)
2681 {
2682 rc = pgmPoolMonitorInsert(pPool, pPage);
2683 AssertRC(rc);
2684 }
2685 return rc;
2686}
2687
2688
2689/**
2690 * Adds a user reference to a page.
2691 *
2692 * This will move the page to the head of the
2693 *
2694 * @returns VBox status code.
2695 * @retval VINF_SUCCESS if successfully added.
2696 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2697 * @param pPool The pool.
2698 * @param pPage The cached page.
2699 * @param iUser The user index.
2700 * @param iUserTable The user table.
2701 */
2702static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
2703{
2704 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2705
2706 Log3(("pgmPoolTrackAddUser GCPhys = %RGp iUser %x iUserTable %x\n", pPage->GCPhys, iUser, iUserTable));
2707
2708# ifdef VBOX_STRICT
2709 /*
2710 * Check that the entry doesn't already exists. We only allow multiple users of top-level paging structures (SHW_POOL_ROOT_IDX).
2711 */
2712 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
2713 {
2714 uint16_t i = pPage->iUserHead;
2715 do
2716 {
2717 Assert(i < pPool->cMaxUsers);
2718 AssertMsg(iUser != PGMPOOL_IDX_PD || iUser != PGMPOOL_IDX_PDPT || iUser != PGMPOOL_IDX_NESTED_ROOT || iUser != PGMPOOL_IDX_AMD64_CR3 ||
2719 paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
2720 i = paUsers[i].iNext;
2721 } while (i != NIL_PGMPOOL_USER_INDEX);
2722 }
2723# endif
2724
2725 /*
2726 * Allocate a user node.
2727 */
2728 uint16_t i = pPool->iUserFreeHead;
2729 if (i == NIL_PGMPOOL_USER_INDEX)
2730 {
2731 int rc = pgmPoolTrackFreeOneUser(pPool, iUser);
2732 if (RT_FAILURE(rc))
2733 return rc;
2734 i = pPool->iUserFreeHead;
2735 }
2736 pPool->iUserFreeHead = paUsers[i].iNext;
2737
2738 /*
2739 * Initialize the user node and insert it.
2740 */
2741 paUsers[i].iNext = pPage->iUserHead;
2742 paUsers[i].iUser = iUser;
2743 paUsers[i].iUserTable = iUserTable;
2744 pPage->iUserHead = i;
2745
2746# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2747 if (pPage->fDirty)
2748 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPage->idxDirty, false /* do not remove */);
2749# endif
2750
2751 /*
2752 * Tell the cache to update its replacement stats for this page.
2753 */
2754 pgmPoolCacheUsed(pPool, pPage);
2755 return VINF_SUCCESS;
2756}
2757
2758
2759/**
2760 * Frees a user record associated with a page.
2761 *
2762 * This does not clear the entry in the user table, it simply replaces the
2763 * user record to the chain of free records.
2764 *
2765 * @param pPool The pool.
2766 * @param HCPhys The HC physical address of the shadow page.
2767 * @param iUser The shadow page pool index of the user table.
2768 * @param iUserTable The index into the user table (shadowed).
2769 */
2770static void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
2771{
2772 /*
2773 * Unlink and free the specified user entry.
2774 */
2775 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2776
2777 Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable));
2778 /* Special: For PAE and 32-bit paging, there is usually no more than one user. */
2779 uint16_t i = pPage->iUserHead;
2780 if ( i != NIL_PGMPOOL_USER_INDEX
2781 && paUsers[i].iUser == iUser
2782 && paUsers[i].iUserTable == iUserTable)
2783 {
2784 pPage->iUserHead = paUsers[i].iNext;
2785
2786 paUsers[i].iUser = NIL_PGMPOOL_IDX;
2787 paUsers[i].iNext = pPool->iUserFreeHead;
2788 pPool->iUserFreeHead = i;
2789 return;
2790 }
2791
2792 /* General: Linear search. */
2793 uint16_t iPrev = NIL_PGMPOOL_USER_INDEX;
2794 while (i != NIL_PGMPOOL_USER_INDEX)
2795 {
2796 if ( paUsers[i].iUser == iUser
2797 && paUsers[i].iUserTable == iUserTable)
2798 {
2799 if (iPrev != NIL_PGMPOOL_USER_INDEX)
2800 paUsers[iPrev].iNext = paUsers[i].iNext;
2801 else
2802 pPage->iUserHead = paUsers[i].iNext;
2803
2804 paUsers[i].iUser = NIL_PGMPOOL_IDX;
2805 paUsers[i].iNext = pPool->iUserFreeHead;
2806 pPool->iUserFreeHead = i;
2807 return;
2808 }
2809 iPrev = i;
2810 i = paUsers[i].iNext;
2811 }
2812
2813 /* Fatal: didn't find it */
2814 AssertFatalMsgFailed(("Didn't find the user entry! iUser=%#x iUserTable=%#x GCPhys=%RGp\n",
2815 iUser, iUserTable, pPage->GCPhys));
2816}
2817
2818
2819/**
2820 * Gets the entry size of a shadow table.
2821 *
2822 * @param enmKind The kind of page.
2823 *
2824 * @returns The size of the entry in bytes. That is, 4 or 8.
2825 * @returns If the kind is not for a table, an assertion is raised and 0 is
2826 * returned.
2827 */
2828DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind)
2829{
2830 switch (enmKind)
2831 {
2832 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2833 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2834 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2835 case PGMPOOLKIND_32BIT_PD:
2836 case PGMPOOLKIND_32BIT_PD_PHYS:
2837 return 4;
2838
2839 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2840 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2841 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2842 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2843 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2844 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2845 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2846 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2847 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2848 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2849 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2850 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2851 case PGMPOOLKIND_64BIT_PML4:
2852 case PGMPOOLKIND_PAE_PDPT:
2853 case PGMPOOLKIND_ROOT_NESTED:
2854 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2855 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2856 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2857 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2858 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2859 case PGMPOOLKIND_PAE_PD_PHYS:
2860 case PGMPOOLKIND_PAE_PDPT_PHYS:
2861 return 8;
2862
2863 default:
2864 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
2865 }
2866}
2867
2868
2869/**
2870 * Gets the entry size of a guest table.
2871 *
2872 * @param enmKind The kind of page.
2873 *
2874 * @returns The size of the entry in bytes. That is, 0, 4 or 8.
2875 * @returns If the kind is not for a table, an assertion is raised and 0 is
2876 * returned.
2877 */
2878DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind)
2879{
2880 switch (enmKind)
2881 {
2882 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2883 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2884 case PGMPOOLKIND_32BIT_PD:
2885 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2886 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2887 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2888 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2889 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2890 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2891 return 4;
2892
2893 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2894 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2895 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2896 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2897 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2898 case PGMPOOLKIND_64BIT_PML4:
2899 case PGMPOOLKIND_PAE_PDPT:
2900 return 8;
2901
2902 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2903 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2904 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2905 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2906 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2907 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2908 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2909 case PGMPOOLKIND_ROOT_NESTED:
2910 case PGMPOOLKIND_PAE_PD_PHYS:
2911 case PGMPOOLKIND_PAE_PDPT_PHYS:
2912 case PGMPOOLKIND_32BIT_PD_PHYS:
2913 /** @todo can we return 0? (nobody is calling this...) */
2914 AssertFailed();
2915 return 0;
2916
2917 default:
2918 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
2919 }
2920}
2921
2922
2923/**
2924 * Checks one shadow page table entry for a mapping of a physical page.
2925 *
2926 * @returns true / false indicating removal of all relevant PTEs
2927 *
2928 * @param pVM The VM handle.
2929 * @param pPhysPage The guest page in question.
2930 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
2931 * @param iShw The shadow page table.
2932 * @param iPte Page table entry or NIL_PGMPOOL_PHYSEXT_IDX_PTE if unknown
2933 */
2934static bool pgmPoolTrackFlushGCPhysPTInt(PVM pVM, PCPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw, uint16_t iPte)
2935{
2936 LogFlow(("pgmPoolTrackFlushGCPhysPTInt: pPhysPage=%RHp iShw=%d iPte=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw, iPte));
2937 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2938 bool fRet = false;
2939
2940 /*
2941 * Assert sanity.
2942 */
2943 Assert(iPte != NIL_PGMPOOL_PHYSEXT_IDX_PTE);
2944 AssertFatalMsg(iShw < pPool->cCurPages && iShw != NIL_PGMPOOL_IDX, ("iShw=%d\n", iShw));
2945 PPGMPOOLPAGE pPage = &pPool->aPages[iShw];
2946
2947 /*
2948 * Then, clear the actual mappings to the page in the shadow PT.
2949 */
2950 switch (pPage->enmKind)
2951 {
2952 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2953 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2954 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2955 {
2956 const uint32_t u32 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
2957 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
2958 uint32_t u32AndMask = 0;
2959 uint32_t u32OrMask = 0;
2960
2961 if (!fFlushPTEs)
2962 {
2963 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
2964 {
2965 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /** No handler installed. */
2966 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /** Monitoring is temporarily disabled. */
2967 u32OrMask = X86_PTE_RW;
2968 u32AndMask = UINT32_MAX;
2969 fRet = true;
2970 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
2971 break;
2972
2973 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /** Write access is monitored. */
2974 u32OrMask = 0;
2975 u32AndMask = ~X86_PTE_RW;
2976 fRet = true;
2977 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
2978 break;
2979 default:
2980 /* (shouldn't be here, will assert below) */
2981 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
2982 break;
2983 }
2984 }
2985 else
2986 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
2987
2988 /* Update the counter if we're removing references. */
2989 if (!u32AndMask)
2990 {
2991 Assert(pPage->cPresent );
2992 Assert(pPool->cPresent);
2993 pPage->cPresent--;
2994 pPool->cPresent--;
2995 }
2996
2997 if ((pPT->a[iPte].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
2998 {
2999 X86PTE Pte;
3000
3001 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX32\n", iPte, pPT->a[iPte]));
3002 Pte.u = (pPT->a[iPte].u & u32AndMask) | u32OrMask;
3003 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3004 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3005
3006 ASMAtomicWriteSize(&pPT->a[iPte].u, Pte.u);
3007 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3008 return fRet;
3009 }
3010#ifdef LOG_ENABLED
3011 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3012 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPT->a); i++)
3013 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3014 {
3015 Log(("i=%d cFound=%d\n", i, ++cFound));
3016 }
3017#endif
3018 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u32=%RX32 poolkind=%x\n", pPage->iFirstPresent, pPage->cPresent, u32, pPage->enmKind));
3019 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3020 break;
3021 }
3022
3023 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3024 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3025 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3026 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3027 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3028 case PGMPOOLKIND_EPT_PT_FOR_PHYS: /* physical mask the same as PAE; RW bit as well; be careful! */
3029 {
3030 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3031 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3032 uint64_t u64OrMask = 0;
3033 uint64_t u64AndMask = 0;
3034
3035 if (!fFlushPTEs)
3036 {
3037 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
3038 {
3039 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /* No handler installed. */
3040 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /* Monitoring is temporarily disabled. */
3041 u64OrMask = X86_PTE_RW;
3042 u64AndMask = UINT64_MAX;
3043 fRet = true;
3044 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3045 break;
3046
3047 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /* Write access is monitored. */
3048 u64OrMask = 0;
3049 u64AndMask = ~((uint64_t)X86_PTE_RW);
3050 fRet = true;
3051 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3052 break;
3053
3054 default:
3055 /* (shouldn't be here, will assert below) */
3056 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3057 break;
3058 }
3059 }
3060 else
3061 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3062
3063 /* Update the counter if we're removing references. */
3064 if (!u64AndMask)
3065 {
3066 Assert(pPage->cPresent);
3067 Assert(pPool->cPresent);
3068 pPage->cPresent--;
3069 pPool->cPresent--;
3070 }
3071
3072 if ((PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)
3073 {
3074 X86PTEPAE Pte;
3075
3076 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64\n", iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte])));
3077 Pte.u = (PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & u64AndMask) | u64OrMask;
3078 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3079 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3080
3081 PGMSHWPTEPAE_ATOMIC_SET(pPT->a[iPte], Pte.u);
3082 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3083 return fRet;
3084 }
3085#ifdef LOG_ENABLED
3086 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3087 Log(("Found %RX64 expected %RX64\n", PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX), u64));
3088 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPT->a); i++)
3089 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)
3090 Log(("i=%d cFound=%d\n", i, ++cFound));
3091#endif
3092 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte])));
3093 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3094 break;
3095 }
3096
3097#ifdef PGM_WITH_LARGE_PAGES
3098 /* Large page case only. */
3099 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3100 {
3101 Assert(pVM->pgm.s.fNestedPaging);
3102
3103 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3104 PEPTPD pPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3105
3106 if ((pPD->a[iPte].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3107 {
3108 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64\n", iPte, pPD->a[iPte]));
3109 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3110 pPD->a[iPte].u = 0;
3111 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3112
3113 /* Update the counter as we're removing references. */
3114 Assert(pPage->cPresent);
3115 Assert(pPool->cPresent);
3116 pPage->cPresent--;
3117 pPool->cPresent--;
3118
3119 return fRet;
3120 }
3121# ifdef LOG_ENABLED
3122 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3123 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPD->a); i++)
3124 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3125 Log(("i=%d cFound=%d\n", i, ++cFound));
3126# endif
3127 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3128 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3129 break;
3130 }
3131
3132 /* AMD-V nested paging */ /** @todo merge with EPT as we only check the parts that are identical. */
3133 case PGMPOOLKIND_PAE_PD_PHYS:
3134 {
3135 Assert(pVM->pgm.s.fNestedPaging);
3136
3137 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3138 PX86PD pPD = (PX86PD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3139
3140 if ((pPD->a[iPte].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3141 {
3142 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64\n", iPte, pPD->a[iPte]));
3143 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3144 pPD->a[iPte].u = 0;
3145 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3146
3147 /* Update the counter as we're removing references. */
3148 Assert(pPage->cPresent);
3149 Assert(pPool->cPresent);
3150 pPage->cPresent--;
3151 pPool->cPresent--;
3152 return fRet;
3153 }
3154# ifdef LOG_ENABLED
3155 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3156 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPD->a); i++)
3157 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3158 Log(("i=%d cFound=%d\n", i, ++cFound));
3159# endif
3160 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3161 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3162 break;
3163 }
3164#endif /* PGM_WITH_LARGE_PAGES */
3165
3166 default:
3167 AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw));
3168 }
3169 return fRet;
3170}
3171
3172
3173/**
3174 * Scans one shadow page table for mappings of a physical page.
3175 *
3176 * @param pVM The VM handle.
3177 * @param pPhysPage The guest page in question.
3178 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3179 * @param iShw The shadow page table.
3180 */
3181static void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw)
3182{
3183 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool);
3184
3185 /* We should only come here with when there's only one reference to this physical page. */
3186 Assert(PGMPOOL_TD_GET_CREFS(PGM_PAGE_GET_TRACKING(pPhysPage)) == 1);
3187
3188 Log2(("pgmPoolTrackFlushGCPhysPT: pPhysPage=%RHp iShw=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw));
3189 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f);
3190 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, iShw, PGM_PAGE_GET_PTE_INDEX(pPhysPage));
3191 if (!fKeptPTEs)
3192 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3193 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f);
3194}
3195
3196
3197/**
3198 * Flushes a list of shadow page tables mapping the same physical page.
3199 *
3200 * @param pVM The VM handle.
3201 * @param pPhysPage The guest page in question.
3202 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3203 * @param iPhysExt The physical cross reference extent list to flush.
3204 */
3205static void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iPhysExt)
3206{
3207 Assert(PGMIsLockOwner(pVM));
3208 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3209 bool fKeepList = false;
3210
3211 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f);
3212 Log2(("pgmPoolTrackFlushGCPhysPTs: pPhysPage=%RHp iPhysExt\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iPhysExt));
3213
3214 const uint16_t iPhysExtStart = iPhysExt;
3215 PPGMPOOLPHYSEXT pPhysExt;
3216 do
3217 {
3218 Assert(iPhysExt < pPool->cMaxPhysExts);
3219 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3220 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3221 {
3222 if (pPhysExt->aidx[i] != NIL_PGMPOOL_IDX)
3223 {
3224 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, pPhysExt->aidx[i], pPhysExt->apte[i]);
3225 if (!fKeptPTEs)
3226 {
3227 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3228 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
3229 }
3230 else
3231 fKeepList = true;
3232 }
3233 }
3234 /* next */
3235 iPhysExt = pPhysExt->iNext;
3236 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3237
3238 if (!fKeepList)
3239 {
3240 /* insert the list into the free list and clear the ram range entry. */
3241 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3242 pPool->iPhysExtFreeHead = iPhysExtStart;
3243 /* Invalidate the tracking data. */
3244 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3245 }
3246
3247 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTs, f);
3248}
3249
3250
3251/**
3252 * Flushes all shadow page table mappings of the given guest page.
3253 *
3254 * This is typically called when the host page backing the guest one has been
3255 * replaced or when the page protection was changed due to a guest access
3256 * caught by the monitoring.
3257 *
3258 * @returns VBox status code.
3259 * @retval VINF_SUCCESS if all references has been successfully cleared.
3260 * @retval VINF_PGM_SYNC_CR3 if we're better off with a CR3 sync and a page
3261 * pool cleaning. FF and sync flags are set.
3262 *
3263 * @param pVM The VM handle.
3264 * @param GCPhysPage GC physical address of the page in question
3265 * @param pPhysPage The guest page in question.
3266 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3267 * @param pfFlushTLBs This is set to @a true if the shadow TLBs should be
3268 * flushed, it is NOT touched if this isn't necessary.
3269 * The caller MUST initialized this to @a false.
3270 */
3271int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs)
3272{
3273 PVMCPU pVCpu = VMMGetCpu(pVM);
3274 pgmLock(pVM);
3275 int rc = VINF_SUCCESS;
3276
3277#ifdef PGM_WITH_LARGE_PAGES
3278 /* Is this page part of a large page? */
3279 if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE)
3280 {
3281 PPGMPAGE pPhysBase;
3282 RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK;
3283
3284 GCPhysPage &= X86_PDE_PAE_PG_MASK;
3285
3286 /* Fetch the large page base. */
3287 if (GCPhysBase != GCPhysPage)
3288 {
3289 pPhysBase = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
3290 AssertFatal(pPhysBase);
3291 }
3292 else
3293 pPhysBase = pPhysPage;
3294
3295 Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage));
3296
3297 if (PGM_PAGE_GET_PDE_TYPE(pPhysBase) == PGM_PAGE_PDE_TYPE_PDE)
3298 {
3299 /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
3300 PGM_PAGE_SET_PDE_TYPE(pPhysBase, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
3301
3302 /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */
3303 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pPhysBase, fFlushPTEs, pfFlushTLBs);
3304
3305 *pfFlushTLBs = true;
3306 pgmUnlock(pVM);
3307 return rc;
3308 }
3309 }
3310#else
3311 NOREF(GCPhysPage);
3312#endif /* PGM_WITH_LARGE_PAGES */
3313
3314 const uint16_t u16 = PGM_PAGE_GET_TRACKING(pPhysPage);
3315 if (u16)
3316 {
3317 /*
3318 * The zero page is currently screwing up the tracking and we'll
3319 * have to flush the whole shebang. Unless VBOX_WITH_NEW_LAZY_PAGE_ALLOC
3320 * is defined, zero pages won't normally be mapped. Some kind of solution
3321 * will be needed for this problem of course, but it will have to wait...
3322 */
3323 if ( PGM_PAGE_IS_ZERO(pPhysPage)
3324 || PGM_PAGE_IS_BALLOONED(pPhysPage))
3325 rc = VINF_PGM_GCPHYS_ALIASED;
3326 else
3327 {
3328# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) /** @todo we can drop this now. */
3329 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
3330 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
3331 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
3332# endif
3333
3334 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
3335 {
3336 Assert(PGMPOOL_TD_GET_CREFS(u16) == 1);
3337 pgmPoolTrackFlushGCPhysPT(pVM,
3338 pPhysPage,
3339 fFlushPTEs,
3340 PGMPOOL_TD_GET_IDX(u16));
3341 }
3342 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
3343 pgmPoolTrackFlushGCPhysPTs(pVM, pPhysPage, fFlushPTEs, PGMPOOL_TD_GET_IDX(u16));
3344 else
3345 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
3346 *pfFlushTLBs = true;
3347
3348# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
3349 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
3350# endif
3351 }
3352 }
3353
3354 if (rc == VINF_PGM_GCPHYS_ALIASED)
3355 {
3356 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3357 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3358 rc = VINF_PGM_SYNC_CR3;
3359 }
3360 pgmUnlock(pVM);
3361 return rc;
3362}
3363
3364
3365/**
3366 * Scans all shadow page tables for mappings of a physical page.
3367 *
3368 * This may be slow, but it's most likely more efficient than cleaning
3369 * out the entire page pool / cache.
3370 *
3371 * @returns VBox status code.
3372 * @retval VINF_SUCCESS if all references has been successfully cleared.
3373 * @retval VINF_PGM_GCPHYS_ALIASED if we're better off with a CR3 sync and
3374 * a page pool cleaning.
3375 *
3376 * @param pVM The VM handle.
3377 * @param pPhysPage The guest page in question.
3378 */
3379int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage)
3380{
3381 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3382 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3383 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d pPhysPage=%R[pgmpage]\n",
3384 pPool->cUsedPages, pPool->cPresent, pPhysPage));
3385
3386 /*
3387 * There is a limit to what makes sense.
3388 */
3389 if ( pPool->cPresent > 1024
3390 && pVM->cCpus == 1)
3391 {
3392 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
3393 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3394 return VINF_PGM_GCPHYS_ALIASED;
3395 }
3396
3397 /*
3398 * Iterate all the pages until we've encountered all that in use.
3399 * This is simple but not quite optimal solution.
3400 */
3401 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; /** @todo drop X86_PTE_P here as we always test if present separately, anyway. */
3402 const uint32_t u32 = u64; /** @todo move into the 32BIT_PT_xx case */
3403 unsigned cLeft = pPool->cUsedPages;
3404 unsigned iPage = pPool->cCurPages;
3405 while (--iPage >= PGMPOOL_IDX_FIRST)
3406 {
3407 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
3408 if ( pPage->GCPhys != NIL_RTGCPHYS
3409 && pPage->cPresent)
3410 {
3411 switch (pPage->enmKind)
3412 {
3413 /*
3414 * We only care about shadow page tables.
3415 */
3416 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3417 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3418 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3419 {
3420 unsigned cPresent = pPage->cPresent;
3421 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3422 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3423 if (pPT->a[i].n.u1Present)
3424 {
3425 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3426 {
3427 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX32\n", iPage, i, pPT->a[i]));
3428 pPT->a[i].u = 0;
3429
3430 /* Update the counter as we're removing references. */
3431 Assert(pPage->cPresent);
3432 Assert(pPool->cPresent);
3433 pPage->cPresent--;
3434 pPool->cPresent--;
3435 }
3436 if (!--cPresent)
3437 break;
3438 }
3439 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3440 break;
3441 }
3442
3443 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3444 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3445 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3446 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3447 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3448 {
3449 unsigned cPresent = pPage->cPresent;
3450 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3451 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3452 if (PGMSHWPTEPAE_IS_P(pPT->a[i]))
3453 {
3454 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
3455 {
3456 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
3457 PGMSHWPTEPAE_SET(pPT->a[i], 0); /// @todo why not atomic?
3458
3459 /* Update the counter as we're removing references. */
3460 Assert(pPage->cPresent);
3461 Assert(pPool->cPresent);
3462 pPage->cPresent--;
3463 pPool->cPresent--;
3464 }
3465 if (!--cPresent)
3466 break;
3467 }
3468 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3469 break;
3470 }
3471#ifndef IN_RC
3472 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
3473 {
3474 unsigned cPresent = pPage->cPresent;
3475 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3476 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3477 if (pPT->a[i].n.u1Present)
3478 {
3479 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
3480 {
3481 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
3482 pPT->a[i].u = 0;
3483
3484 /* Update the counter as we're removing references. */
3485 Assert(pPage->cPresent);
3486 Assert(pPool->cPresent);
3487 pPage->cPresent--;
3488 pPool->cPresent--;
3489 }
3490 if (!--cPresent)
3491 break;
3492 }
3493 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3494 break;
3495 }
3496#endif
3497 }
3498 if (!--cLeft)
3499 break;
3500 }
3501 }
3502
3503 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3504 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3505
3506 /*
3507 * There is a limit to what makes sense. The above search is very expensive, so force a pgm pool flush.
3508 */
3509 if (pPool->cPresent > 1024)
3510 {
3511 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
3512 return VINF_PGM_GCPHYS_ALIASED;
3513 }
3514
3515 return VINF_SUCCESS;
3516}
3517
3518
3519/**
3520 * Clears the user entry in a user table.
3521 *
3522 * This is used to remove all references to a page when flushing it.
3523 */
3524static void pgmPoolTrackClearPageUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PCPGMPOOLUSER pUser)
3525{
3526 Assert(pUser->iUser != NIL_PGMPOOL_IDX);
3527 Assert(pUser->iUser < pPool->cCurPages);
3528 uint32_t iUserTable = pUser->iUserTable;
3529
3530 /*
3531 * Map the user page.
3532 */
3533 PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser];
3534 union
3535 {
3536 uint64_t *pau64;
3537 uint32_t *pau32;
3538 } u;
3539 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pUserPage);
3540
3541 LogFlow(("pgmPoolTrackClearPageUser: clear %x in %s (%RGp) (flushing %s)\n", iUserTable, pgmPoolPoolKindToStr(pUserPage->enmKind), pUserPage->Core.Key, pgmPoolPoolKindToStr(pPage->enmKind)));
3542
3543 /* Safety precaution in case we change the paging for other modes too in the future. */
3544 Assert(!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage));
3545
3546#ifdef VBOX_STRICT
3547 /*
3548 * Some sanity checks.
3549 */
3550 switch (pUserPage->enmKind)
3551 {
3552 case PGMPOOLKIND_32BIT_PD:
3553 case PGMPOOLKIND_32BIT_PD_PHYS:
3554 Assert(iUserTable < X86_PG_ENTRIES);
3555 break;
3556 case PGMPOOLKIND_PAE_PDPT:
3557 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3558 case PGMPOOLKIND_PAE_PDPT_PHYS:
3559 Assert(iUserTable < 4);
3560 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3561 break;
3562 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3563 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3564 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3565 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3566 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3567 case PGMPOOLKIND_PAE_PD_PHYS:
3568 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3569 break;
3570 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3571 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3572 Assert(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING));
3573 break;
3574 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3575 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3576 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3577 break;
3578 case PGMPOOLKIND_64BIT_PML4:
3579 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3580 /* GCPhys >> PAGE_SHIFT is the index here */
3581 break;
3582 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3583 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3584 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3585 break;
3586
3587 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3588 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3589 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3590 break;
3591
3592 case PGMPOOLKIND_ROOT_NESTED:
3593 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3594 break;
3595
3596 default:
3597 AssertMsgFailed(("enmKind=%d\n", pUserPage->enmKind));
3598 break;
3599 }
3600#endif /* VBOX_STRICT */
3601
3602 /*
3603 * Clear the entry in the user page.
3604 */
3605 switch (pUserPage->enmKind)
3606 {
3607 /* 32-bit entries */
3608 case PGMPOOLKIND_32BIT_PD:
3609 case PGMPOOLKIND_32BIT_PD_PHYS:
3610 ASMAtomicWriteSize(&u.pau32[iUserTable], 0);
3611 break;
3612
3613 /* 64-bit entries */
3614 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3615 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3616 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3617 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3618 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3619#if defined(IN_RC)
3620 /*
3621 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
3622 * PDPT entry; the CPU fetches them only during cr3 load, so any
3623 * non-present PDPT will continue to cause page faults.
3624 */
3625 ASMReloadCR3();
3626 /* no break */
3627#endif
3628 case PGMPOOLKIND_PAE_PD_PHYS:
3629 case PGMPOOLKIND_PAE_PDPT_PHYS:
3630 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3631 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3632 case PGMPOOLKIND_64BIT_PML4:
3633 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3634 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3635 case PGMPOOLKIND_PAE_PDPT:
3636 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3637 case PGMPOOLKIND_ROOT_NESTED:
3638 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3639 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3640 ASMAtomicWriteSize(&u.pau64[iUserTable], 0);
3641 break;
3642
3643 default:
3644 AssertFatalMsgFailed(("enmKind=%d iUser=%#x iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
3645 }
3646 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64);
3647}
3648
3649
3650/**
3651 * Clears all users of a page.
3652 */
3653static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
3654{
3655 /*
3656 * Free all the user records.
3657 */
3658 LogFlow(("pgmPoolTrackClearPageUsers %RGp\n", pPage->GCPhys));
3659
3660 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
3661 uint16_t i = pPage->iUserHead;
3662 while (i != NIL_PGMPOOL_USER_INDEX)
3663 {
3664 /* Clear enter in user table. */
3665 pgmPoolTrackClearPageUser(pPool, pPage, &paUsers[i]);
3666
3667 /* Free it. */
3668 const uint16_t iNext = paUsers[i].iNext;
3669 paUsers[i].iUser = NIL_PGMPOOL_IDX;
3670 paUsers[i].iNext = pPool->iUserFreeHead;
3671 pPool->iUserFreeHead = i;
3672
3673 /* Next. */
3674 i = iNext;
3675 }
3676 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
3677}
3678
3679
3680/**
3681 * Allocates a new physical cross reference extent.
3682 *
3683 * @returns Pointer to the allocated extent on success. NULL if we're out of them.
3684 * @param pVM The VM handle.
3685 * @param piPhysExt Where to store the phys ext index.
3686 */
3687PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt)
3688{
3689 Assert(PGMIsLockOwner(pVM));
3690 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3691 uint16_t iPhysExt = pPool->iPhysExtFreeHead;
3692 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX)
3693 {
3694 STAM_COUNTER_INC(&pPool->StamTrackPhysExtAllocFailures);
3695 return NULL;
3696 }
3697 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3698 pPool->iPhysExtFreeHead = pPhysExt->iNext;
3699 pPhysExt->iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
3700 *piPhysExt = iPhysExt;
3701 return pPhysExt;
3702}
3703
3704
3705/**
3706 * Frees a physical cross reference extent.
3707 *
3708 * @param pVM The VM handle.
3709 * @param iPhysExt The extent to free.
3710 */
3711void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt)
3712{
3713 Assert(PGMIsLockOwner(pVM));
3714 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3715 Assert(iPhysExt < pPool->cMaxPhysExts);
3716 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3717 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3718 {
3719 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3720 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
3721 }
3722 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3723 pPool->iPhysExtFreeHead = iPhysExt;
3724}
3725
3726
3727/**
3728 * Frees a physical cross reference extent.
3729 *
3730 * @param pVM The VM handle.
3731 * @param iPhysExt The extent to free.
3732 */
3733void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt)
3734{
3735 Assert(PGMIsLockOwner(pVM));
3736 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3737
3738 const uint16_t iPhysExtStart = iPhysExt;
3739 PPGMPOOLPHYSEXT pPhysExt;
3740 do
3741 {
3742 Assert(iPhysExt < pPool->cMaxPhysExts);
3743 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3744 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3745 {
3746 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3747 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
3748 }
3749
3750 /* next */
3751 iPhysExt = pPhysExt->iNext;
3752 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3753
3754 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3755 pPool->iPhysExtFreeHead = iPhysExtStart;
3756}
3757
3758
3759/**
3760 * Insert a reference into a list of physical cross reference extents.
3761 *
3762 * @returns The new tracking data for PGMPAGE.
3763 *
3764 * @param pVM The VM handle.
3765 * @param iPhysExt The physical extent index of the list head.
3766 * @param iShwPT The shadow page table index.
3767 * @param iPte Page table entry
3768 *
3769 */
3770static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT, uint16_t iPte)
3771{
3772 Assert(PGMIsLockOwner(pVM));
3773 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3774 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
3775
3776 /* special common case. */
3777 if (paPhysExts[iPhysExt].aidx[2] == NIL_PGMPOOL_IDX)
3778 {
3779 paPhysExts[iPhysExt].aidx[2] = iShwPT;
3780 paPhysExts[iPhysExt].apte[2] = iPte;
3781 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedMany);
3782 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{,,%d pte %d}\n", iPhysExt, iShwPT, iPte));
3783 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3784 }
3785
3786 /* general treatment. */
3787 const uint16_t iPhysExtStart = iPhysExt;
3788 unsigned cMax = 15;
3789 for (;;)
3790 {
3791 Assert(iPhysExt < pPool->cMaxPhysExts);
3792 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3793 if (paPhysExts[iPhysExt].aidx[i] == NIL_PGMPOOL_IDX)
3794 {
3795 paPhysExts[iPhysExt].aidx[i] = iShwPT;
3796 paPhysExts[iPhysExt].apte[i] = iPte;
3797 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedMany);
3798 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{%d pte %d} i=%d cMax=%d\n", iPhysExt, iShwPT, iPte, i, cMax));
3799 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtStart);
3800 }
3801 if (!--cMax)
3802 {
3803 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackOverflows);
3804 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
3805 LogFlow(("pgmPoolTrackPhysExtInsert: overflow (1) iShwPT=%d\n", iShwPT));
3806 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3807 }
3808 }
3809
3810 /* add another extent to the list. */
3811 PPGMPOOLPHYSEXT pNew = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
3812 if (!pNew)
3813 {
3814 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackNoExtentsLeft);
3815 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
3816 LogFlow(("pgmPoolTrackPhysExtInsert: pgmPoolTrackPhysExtAlloc failed iShwPT=%d\n", iShwPT));
3817 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3818 }
3819 pNew->iNext = iPhysExtStart;
3820 pNew->aidx[0] = iShwPT;
3821 pNew->apte[0] = iPte;
3822 LogFlow(("pgmPoolTrackPhysExtInsert: added new extent %d:{%d pte %d}->%d\n", iPhysExt, iShwPT, iPte, iPhysExtStart));
3823 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3824}
3825
3826
3827/**
3828 * Add a reference to guest physical page where extents are in use.
3829 *
3830 * @returns The new tracking data for PGMPAGE.
3831 *
3832 * @param pVM The VM handle.
3833 * @param pPhysPage Pointer to the aPages entry in the ram range.
3834 * @param u16 The ram range flags (top 16-bits).
3835 * @param iShwPT The shadow page table index.
3836 * @param iPte Page table entry
3837 */
3838uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte)
3839{
3840 pgmLock(pVM);
3841 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
3842 {
3843 /*
3844 * Convert to extent list.
3845 */
3846 Assert(PGMPOOL_TD_GET_CREFS(u16) == 1);
3847 uint16_t iPhysExt;
3848 PPGMPOOLPHYSEXT pPhysExt = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
3849 if (pPhysExt)
3850 {
3851 LogFlow(("pgmPoolTrackPhysExtAddref: new extent: %d:{%d, %d}\n", iPhysExt, PGMPOOL_TD_GET_IDX(u16), iShwPT));
3852 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliased);
3853 pPhysExt->aidx[0] = PGMPOOL_TD_GET_IDX(u16);
3854 pPhysExt->apte[0] = PGM_PAGE_GET_PTE_INDEX(pPhysPage);
3855 pPhysExt->aidx[1] = iShwPT;
3856 pPhysExt->apte[1] = iPte;
3857 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
3858 }
3859 else
3860 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
3861 }
3862 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
3863 {
3864 /*
3865 * Insert into the extent list.
3866 */
3867 u16 = pgmPoolTrackPhysExtInsert(pVM, PGMPOOL_TD_GET_IDX(u16), iShwPT, iPte);
3868 }
3869 else
3870 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedLots);
3871 pgmUnlock(pVM);
3872 return u16;
3873}
3874
3875
3876/**
3877 * Clear references to guest physical memory.
3878 *
3879 * @param pPool The pool.
3880 * @param pPage The page.
3881 * @param pPhysPage Pointer to the aPages entry in the ram range.
3882 * @param iPte Shadow PTE index
3883 */
3884void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage, uint16_t iPte)
3885{
3886 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
3887 AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
3888
3889 uint16_t iPhysExt = PGM_PAGE_GET_TD_IDX(pPhysPage);
3890 if (iPhysExt != PGMPOOL_TD_IDX_OVERFLOWED)
3891 {
3892 PVM pVM = pPool->CTX_SUFF(pVM);
3893 pgmLock(pVM);
3894
3895 uint16_t iPhysExtPrev = NIL_PGMPOOL_PHYSEXT_INDEX;
3896 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
3897 do
3898 {
3899 Assert(iPhysExt < pPool->cMaxPhysExts);
3900
3901 /*
3902 * Look for the shadow page and check if it's all freed.
3903 */
3904 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3905 {
3906 if ( paPhysExts[iPhysExt].aidx[i] == pPage->idx
3907 && paPhysExts[iPhysExt].apte[i] == iPte)
3908 {
3909 paPhysExts[iPhysExt].aidx[i] = NIL_PGMPOOL_IDX;
3910 paPhysExts[iPhysExt].apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
3911
3912 for (i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
3913 if (paPhysExts[iPhysExt].aidx[i] != NIL_PGMPOOL_IDX)
3914 {
3915 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d\n", pPhysPage, pPage->idx));
3916 pgmUnlock(pVM);
3917 return;
3918 }
3919
3920 /* we can free the node. */
3921 const uint16_t iPhysExtNext = paPhysExts[iPhysExt].iNext;
3922 if ( iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX
3923 && iPhysExtNext == NIL_PGMPOOL_PHYSEXT_INDEX)
3924 {
3925 /* lonely node */
3926 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3927 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d lonely\n", pPhysPage, pPage->idx));
3928 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
3929 }
3930 else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX)
3931 {
3932 /* head */
3933 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d head\n", pPhysPage, pPage->idx));
3934 PGM_PAGE_SET_TRACKING(pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));
3935 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3936 }
3937 else
3938 {
3939 /* in list */
3940 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d in list\n", pPhysPage, pPage->idx));
3941 paPhysExts[iPhysExtPrev].iNext = iPhysExtNext;
3942 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
3943 }
3944 iPhysExt = iPhysExtNext;
3945 pgmUnlock(pVM);
3946 return;
3947 }
3948 }
3949
3950 /* next */
3951 iPhysExtPrev = iPhysExt;
3952 iPhysExt = paPhysExts[iPhysExt].iNext;
3953 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3954
3955 pgmUnlock(pVM);
3956 AssertFatalMsgFailed(("not-found! cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
3957 }
3958 else /* nothing to do */
3959 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage]\n", pPhysPage));
3960}
3961
3962/**
3963 * Clear references to guest physical memory.
3964 *
3965 * This is the same as pgmPoolTracDerefGCPhys except that the guest physical address
3966 * is assumed to be correct, so the linear search can be skipped and we can assert
3967 * at an earlier point.
3968 *
3969 * @param pPool The pool.
3970 * @param pPage The page.
3971 * @param HCPhys The host physical address corresponding to the guest page.
3972 * @param GCPhys The guest physical address corresponding to HCPhys.
3973 * @param iPte Shadow PTE index
3974 */
3975static void pgmPoolTracDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhys, uint16_t iPte)
3976{
3977 /*
3978 * Walk range list.
3979 */
3980 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
3981 while (pRam)
3982 {
3983 RTGCPHYS off = GCPhys - pRam->GCPhys;
3984 if (off < pRam->cb)
3985 {
3986 /* does it match? */
3987 const unsigned iPage = off >> PAGE_SHIFT;
3988 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]));
3989#ifdef LOG_ENABLED
3990 RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]);
3991 Log2(("pgmPoolTracDerefGCPhys %RHp vs %RHp\n", HCPhysPage, HCPhys));
3992#endif
3993 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
3994 {
3995 Assert(pPage->cPresent);
3996 Assert(pPool->cPresent);
3997 pPage->cPresent--;
3998 pPool->cPresent--;
3999 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte);
4000 return;
4001 }
4002 break;
4003 }
4004 pRam = pRam->CTX_SUFF(pNext);
4005 }
4006 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys));
4007}
4008
4009
4010/**
4011 * Clear references to guest physical memory.
4012 *
4013 * @param pPool The pool.
4014 * @param pPage The page.
4015 * @param HCPhys The host physical address corresponding to the guest page.
4016 * @param GCPhysHint The guest physical address which may corresponding to HCPhys.
4017 * @param iPte Shadow pte index
4018 */
4019void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte)
4020{
4021 RTHCPHYS HCPhysExpected = 0xDEADBEEFDEADBEEFULL;
4022
4023 Log4(("pgmPoolTracDerefGCPhysHint %RHp %RGp\n", HCPhys, GCPhysHint));
4024
4025 /*
4026 * Walk range list.
4027 */
4028 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
4029 while (pRam)
4030 {
4031 RTGCPHYS off = GCPhysHint - pRam->GCPhys;
4032 if (off < pRam->cb)
4033 {
4034 /* does it match? */
4035 const unsigned iPage = off >> PAGE_SHIFT;
4036 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]));
4037 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
4038 {
4039 Assert(pPage->cPresent);
4040 Assert(pPool->cPresent);
4041 pPage->cPresent--;
4042 pPool->cPresent--;
4043 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte);
4044 return;
4045 }
4046 HCPhysExpected = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]);
4047 break;
4048 }
4049 pRam = pRam->CTX_SUFF(pNext);
4050 }
4051
4052 /*
4053 * Damn, the hint didn't work. We'll have to do an expensive linear search.
4054 */
4055 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches);
4056 pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);
4057 while (pRam)
4058 {
4059 unsigned iPage = pRam->cb >> PAGE_SHIFT;
4060 while (iPage-- > 0)
4061 {
4062 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
4063 {
4064 Log4(("pgmPoolTracDerefGCPhysHint: Linear HCPhys=%RHp GCPhysHint=%RGp GCPhysReal=%RGp\n",
4065 HCPhys, GCPhysHint, pRam->GCPhys + (iPage << PAGE_SHIFT)));
4066 Assert(pPage->cPresent);
4067 Assert(pPool->cPresent);
4068 pPage->cPresent--;
4069 pPool->cPresent--;
4070 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte);
4071 return;
4072 }
4073 }
4074 pRam = pRam->CTX_SUFF(pNext);
4075 }
4076
4077 AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp (Expected HCPhys with hint = %RHp)\n", HCPhys, GCPhysHint, HCPhysExpected));
4078}
4079
4080
4081/**
4082 * Clear references to guest physical memory in a 32-bit / 32-bit page table.
4083 *
4084 * @param pPool The pool.
4085 * @param pPage The page.
4086 * @param pShwPT The shadow page table (mapping of the page).
4087 * @param pGstPT The guest page table.
4088 */
4089DECLINLINE(void) pgmPoolTrackDerefPT32Bit32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT, PCX86PT pGstPT)
4090{
4091 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4092 if (pShwPT->a[i].n.u1Present)
4093 {
4094 Log4(("pgmPoolTrackDerefPT32Bit32Bit: i=%d pte=%RX32 hint=%RX32\n",
4095 i, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
4096 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK, i);
4097 if (!pPage->cPresent)
4098 break;
4099 }
4100}
4101
4102
4103/**
4104 * Clear references to guest physical memory in a PAE / 32-bit page table.
4105 *
4106 * @param pPool The pool.
4107 * @param pPage The page.
4108 * @param pShwPT The shadow page table (mapping of the page).
4109 * @param pGstPT The guest page table (just a half one).
4110 */
4111DECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PT pGstPT)
4112{
4113 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4114 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4115 {
4116 Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX64 hint=%RX32\n",
4117 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PG_MASK));
4118 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PG_MASK, i);
4119 if (!pPage->cPresent)
4120 break;
4121 }
4122}
4123
4124
4125/**
4126 * Clear references to guest physical memory in a PAE / PAE page table.
4127 *
4128 * @param pPool The pool.
4129 * @param pPage The page.
4130 * @param pShwPT The shadow page table (mapping of the page).
4131 * @param pGstPT The guest page table.
4132 */
4133DECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT)
4134{
4135 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4136 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4137 {
4138 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n",
4139 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
4140 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i);
4141 if (!pPage->cPresent)
4142 break;
4143 }
4144}
4145
4146
4147/**
4148 * Clear references to guest physical memory in a 32-bit / 4MB page table.
4149 *
4150 * @param pPool The pool.
4151 * @param pPage The page.
4152 * @param pShwPT The shadow page table (mapping of the page).
4153 */
4154DECLINLINE(void) pgmPoolTrackDerefPT32Bit4MB(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT)
4155{
4156 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4157 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4158 if (pShwPT->a[i].n.u1Present)
4159 {
4160 Log4(("pgmPoolTrackDerefPT32Bit4MB: i=%d pte=%RX32 GCPhys=%RGp\n",
4161 i, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys));
4162 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys, i);
4163 if (!pPage->cPresent)
4164 break;
4165 }
4166}
4167
4168
4169/**
4170 * Clear references to guest physical memory in a PAE / 2/4MB page table.
4171 *
4172 * @param pPool The pool.
4173 * @param pPage The page.
4174 * @param pShwPT The shadow page table (mapping of the page).
4175 */
4176DECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT)
4177{
4178 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4179 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4180 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4181 {
4182 Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n",
4183 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys));
4184 pgmPoolTracDerefGCPhys(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys, i);
4185 if (!pPage->cPresent)
4186 break;
4187 }
4188}
4189
4190
4191/**
4192 * Clear references to shadowed pages in an EPT page table.
4193 *
4194 * @param pPool The pool.
4195 * @param pPage The page.
4196 * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
4197 */
4198DECLINLINE(void) pgmPoolTrackDerefPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPT pShwPT)
4199{
4200 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4201 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4202 if (pShwPT->a[i].n.u1Present)
4203 {
4204 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n",
4205 i, pShwPT->a[i].u & EPT_PTE_PG_MASK, pPage->GCPhys));
4206 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & EPT_PTE_PG_MASK, GCPhys, i);
4207 if (!pPage->cPresent)
4208 break;
4209 }
4210}
4211
4212
4213
4214/**
4215 * Clear references to shadowed pages in a 32 bits page directory.
4216 *
4217 * @param pPool The pool.
4218 * @param pPage The page.
4219 * @param pShwPD The shadow page directory (mapping of the page).
4220 */
4221DECLINLINE(void) pgmPoolTrackDerefPD(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PD pShwPD)
4222{
4223 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4224 {
4225 if ( pShwPD->a[i].n.u1Present
4226 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
4227 )
4228 {
4229 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK);
4230 if (pSubPage)
4231 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4232 else
4233 AssertFatalMsgFailed(("%x\n", pShwPD->a[i].u & X86_PDE_PG_MASK));
4234 }
4235 }
4236}
4237
4238/**
4239 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
4240 *
4241 * @param pPool The pool.
4242 * @param pPage The page.
4243 * @param pShwPD The shadow page directory (mapping of the page).
4244 */
4245DECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD)
4246{
4247 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4248 {
4249 if ( pShwPD->a[i].n.u1Present
4250 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
4251 )
4252 {
4253#ifdef PGM_WITH_LARGE_PAGES
4254 if (pShwPD->a[i].b.u1Size)
4255 {
4256 Log4(("pgmPoolTrackDerefPDPae: i=%d pde=%RX64 GCPhys=%RX64\n",
4257 i, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys));
4258 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys /* == base of 2 MB page */, i);
4259 }
4260 else
4261#endif
4262 {
4263 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK);
4264 if (pSubPage)
4265 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4266 else
4267 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & X86_PDE_PAE_PG_MASK));
4268 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4269 }
4270 }
4271 }
4272}
4273
4274/**
4275 * Clear references to shadowed pages in a PAE page directory pointer table.
4276 *
4277 * @param pPool The pool.
4278 * @param pPage The page.
4279 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4280 */
4281DECLINLINE(void) pgmPoolTrackDerefPDPTPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4282{
4283 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4284 {
4285 if ( pShwPDPT->a[i].n.u1Present
4286 && !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING)
4287 )
4288 {
4289 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4290 if (pSubPage)
4291 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4292 else
4293 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4294 }
4295 }
4296}
4297
4298
4299/**
4300 * Clear references to shadowed pages in a 64-bit page directory pointer table.
4301 *
4302 * @param pPool The pool.
4303 * @param pPage The page.
4304 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4305 */
4306DECLINLINE(void) pgmPoolTrackDerefPDPT64Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4307{
4308 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4309 {
4310 Assert(!(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING));
4311 if (pShwPDPT->a[i].n.u1Present)
4312 {
4313 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4314 if (pSubPage)
4315 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4316 else
4317 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4318 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4319 }
4320 }
4321}
4322
4323
4324/**
4325 * Clear references to shadowed pages in a 64-bit level 4 page table.
4326 *
4327 * @param pPool The pool.
4328 * @param pPage The page.
4329 * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
4330 */
4331DECLINLINE(void) pgmPoolTrackDerefPML464Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PML4 pShwPML4)
4332{
4333 for (unsigned i = 0; i < RT_ELEMENTS(pShwPML4->a); i++)
4334 {
4335 if (pShwPML4->a[i].n.u1Present)
4336 {
4337 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPML4->a[i].u & X86_PDPE_PG_MASK);
4338 if (pSubPage)
4339 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4340 else
4341 AssertFatalMsgFailed(("%RX64\n", pShwPML4->a[i].u & X86_PML4E_PG_MASK));
4342 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4343 }
4344 }
4345}
4346
4347
4348/**
4349 * Clear references to shadowed pages in an EPT page directory.
4350 *
4351 * @param pPool The pool.
4352 * @param pPage The page.
4353 * @param pShwPD The shadow page directory (mapping of the page).
4354 */
4355DECLINLINE(void) pgmPoolTrackDerefPDEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPD pShwPD)
4356{
4357 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4358 {
4359 if (pShwPD->a[i].n.u1Present)
4360 {
4361#ifdef PGM_WITH_LARGE_PAGES
4362 if (pShwPD->a[i].b.u1Size)
4363 {
4364 Log4(("pgmPoolTrackDerefPDEPT: i=%d pde=%RX64 GCPhys=%RX64\n",
4365 i, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys));
4366 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys /* == base of 2 MB page */, i);
4367 }
4368 else
4369#endif
4370 {
4371 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & EPT_PDE_PG_MASK);
4372 if (pSubPage)
4373 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4374 else
4375 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & EPT_PDE_PG_MASK));
4376 }
4377 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4378 }
4379 }
4380}
4381
4382
4383/**
4384 * Clear references to shadowed pages in an EPT page directory pointer table.
4385 *
4386 * @param pPool The pool.
4387 * @param pPage The page.
4388 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4389 */
4390DECLINLINE(void) pgmPoolTrackDerefPDPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPDPT pShwPDPT)
4391{
4392 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4393 {
4394 if (pShwPDPT->a[i].n.u1Present)
4395 {
4396 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK);
4397 if (pSubPage)
4398 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4399 else
4400 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK));
4401 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4402 }
4403 }
4404}
4405
4406
4407/**
4408 * Clears all references made by this page.
4409 *
4410 * This includes other shadow pages and GC physical addresses.
4411 *
4412 * @param pPool The pool.
4413 * @param pPage The page.
4414 */
4415static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4416{
4417 /*
4418 * Map the shadow page and take action according to the page kind.
4419 */
4420 PVM pVM = pPool->CTX_SUFF(pVM);
4421 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
4422 switch (pPage->enmKind)
4423 {
4424 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
4425 {
4426 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4427 void *pvGst;
4428 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4429 pgmPoolTrackDerefPT32Bit32Bit(pPool, pPage, (PX86PT)pvShw, (PCX86PT)pvGst);
4430 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4431 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4432 break;
4433 }
4434
4435 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
4436 {
4437 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4438 void *pvGst;
4439 int rc = PGM_GCPHYS_2_PTR_EX(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4440 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst);
4441 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4442 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4443 break;
4444 }
4445
4446 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
4447 {
4448 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4449 void *pvGst;
4450 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4451 pgmPoolTrackDerefPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
4452 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4453 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4454 break;
4455 }
4456
4457 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: /* treat it like a 4 MB page */
4458 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
4459 {
4460 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4461 pgmPoolTrackDerefPT32Bit4MB(pPool, pPage, (PX86PT)pvShw);
4462 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4463 break;
4464 }
4465
4466 case PGMPOOLKIND_PAE_PT_FOR_PHYS: /* treat it like a 2 MB page */
4467 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
4468 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
4469 {
4470 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4471 pgmPoolTrackDerefPTPaeBig(pPool, pPage, (PPGMSHWPTPAE)pvShw);
4472 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4473 break;
4474 }
4475
4476 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
4477 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
4478 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
4479 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
4480 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
4481 case PGMPOOLKIND_PAE_PD_PHYS:
4482 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
4483 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
4484 pgmPoolTrackDerefPDPae(pPool, pPage, (PX86PDPAE)pvShw);
4485 break;
4486
4487 case PGMPOOLKIND_32BIT_PD_PHYS:
4488 case PGMPOOLKIND_32BIT_PD:
4489 pgmPoolTrackDerefPD(pPool, pPage, (PX86PD)pvShw);
4490 break;
4491
4492 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
4493 case PGMPOOLKIND_PAE_PDPT:
4494 case PGMPOOLKIND_PAE_PDPT_PHYS:
4495 pgmPoolTrackDerefPDPTPae(pPool, pPage, (PX86PDPT)pvShw);
4496 break;
4497
4498 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
4499 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
4500 pgmPoolTrackDerefPDPT64Bit(pPool, pPage, (PX86PDPT)pvShw);
4501 break;
4502
4503 case PGMPOOLKIND_64BIT_PML4:
4504 pgmPoolTrackDerefPML464Bit(pPool, pPage, (PX86PML4)pvShw);
4505 break;
4506
4507 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
4508 pgmPoolTrackDerefPTEPT(pPool, pPage, (PEPTPT)pvShw);
4509 break;
4510
4511 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
4512 pgmPoolTrackDerefPDEPT(pPool, pPage, (PEPTPD)pvShw);
4513 break;
4514
4515 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
4516 pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw);
4517 break;
4518
4519 default:
4520 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
4521 }
4522
4523 /* paranoia, clear the shadow page. Remove this laser (i.e. let Alloc and ClearAll do it). */
4524 STAM_PROFILE_START(&pPool->StatZeroPage, z);
4525 ASMMemZeroPage(pvShw);
4526 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
4527 pPage->fZeroed = true;
4528 Assert(!pPage->cPresent);
4529 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
4530}
4531
4532/**
4533 * Flushes a pool page.
4534 *
4535 * This moves the page to the free list after removing all user references to it.
4536 *
4537 * @returns VBox status code.
4538 * @retval VINF_SUCCESS on success.
4539 * @param pPool The pool.
4540 * @param HCPhys The HC physical address of the shadow page.
4541 * @param fFlush Flush the TLBS when required (should only be false in very specific use cases!!)
4542 */
4543int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush)
4544{
4545 PVM pVM = pPool->CTX_SUFF(pVM);
4546 bool fFlushRequired = false;
4547
4548 int rc = VINF_SUCCESS;
4549 STAM_PROFILE_START(&pPool->StatFlushPage, f);
4550 LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
4551 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
4552
4553 /*
4554 * Quietly reject any attempts at flushing any of the special root pages.
4555 */
4556 if (pPage->idx < PGMPOOL_IDX_FIRST)
4557 {
4558 AssertFailed(); /* can no longer happen */
4559 Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
4560 return VINF_SUCCESS;
4561 }
4562
4563 pgmLock(pVM);
4564
4565 /*
4566 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
4567 */
4568 if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
4569 {
4570 AssertMsg( pPage->enmKind == PGMPOOLKIND_64BIT_PML4
4571 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
4572 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
4573 || pPage->enmKind == PGMPOOLKIND_32BIT_PD
4574 || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
4575 || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
4576 || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
4577 || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
4578 || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
4579 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
4580 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
4581 pgmUnlock(pVM);
4582 return VINF_SUCCESS;
4583 }
4584
4585#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
4586 /* Start a subset so we won't run out of mapping space. */
4587 PVMCPU pVCpu = VMMGetCpu(pVM);
4588 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
4589#endif
4590
4591 /*
4592 * Mark the page as being in need of an ASMMemZeroPage().
4593 */
4594 pPage->fZeroed = false;
4595
4596#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4597 if (pPage->fDirty)
4598 pgmPoolFlushDirtyPage(pVM, pPool, pPage->idxDirty, false /* do not remove */);
4599#endif
4600
4601 /* If there are any users of this table, then we *must* issue a tlb flush on all VCPUs. */
4602 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
4603 fFlushRequired = true;
4604
4605 /*
4606 * Clear the page.
4607 */
4608 pgmPoolTrackClearPageUsers(pPool, pPage);
4609 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
4610 pgmPoolTrackDeref(pPool, pPage);
4611 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
4612
4613 /*
4614 * Flush it from the cache.
4615 */
4616 pgmPoolCacheFlushPage(pPool, pPage);
4617
4618#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_R0) || defined(IN_RC)
4619 /* Heavy stuff done. */
4620 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
4621#endif
4622
4623 /*
4624 * Deregistering the monitoring.
4625 */
4626 if (pPage->fMonitored)
4627 rc = pgmPoolMonitorFlush(pPool, pPage);
4628
4629 /*
4630 * Free the page.
4631 */
4632 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
4633 pPage->iNext = pPool->iFreeHead;
4634 pPool->iFreeHead = pPage->idx;
4635 pPage->enmKind = PGMPOOLKIND_FREE;
4636 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4637 pPage->GCPhys = NIL_RTGCPHYS;
4638 pPage->fReusedFlushPending = false;
4639
4640 pPool->cUsedPages--;
4641
4642 /* Flush the TLBs of all VCPUs if required. */
4643 if ( fFlushRequired
4644 && fFlush)
4645 {
4646 PGM_INVL_ALL_VCPU_TLBS(pVM);
4647 }
4648
4649 pgmUnlock(pVM);
4650 STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
4651 return rc;
4652}
4653
4654
4655/**
4656 * Frees a usage of a pool page.
4657 *
4658 * The caller is responsible to updating the user table so that it no longer
4659 * references the shadow page.
4660 *
4661 * @param pPool The pool.
4662 * @param HCPhys The HC physical address of the shadow page.
4663 * @param iUser The shadow page pool index of the user table.
4664 * @param iUserTable The index into the user table (shadowed).
4665 */
4666void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
4667{
4668 PVM pVM = pPool->CTX_SUFF(pVM);
4669
4670 STAM_PROFILE_START(&pPool->StatFree, a);
4671 LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
4672 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
4673 Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
4674 pgmLock(pVM);
4675 pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
4676 if (!pPage->fCached)
4677 pgmPoolFlushPage(pPool, pPage);
4678 pgmUnlock(pVM);
4679 STAM_PROFILE_STOP(&pPool->StatFree, a);
4680}
4681
4682
4683/**
4684 * Makes one or more free page free.
4685 *
4686 * @returns VBox status code.
4687 * @retval VINF_SUCCESS on success.
4688 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
4689 *
4690 * @param pPool The pool.
4691 * @param enmKind Page table kind
4692 * @param iUser The user of the page.
4693 */
4694static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
4695{
4696 PVM pVM = pPool->CTX_SUFF(pVM);
4697
4698 LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
4699
4700 /*
4701 * If the pool isn't full grown yet, expand it.
4702 */
4703 if ( pPool->cCurPages < pPool->cMaxPages
4704#if defined(IN_RC)
4705 /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
4706 && enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
4707 && (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
4708#endif
4709 )
4710 {
4711 STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
4712#ifdef IN_RING3
4713 int rc = PGMR3PoolGrow(pVM);
4714#else
4715 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_POOL_GROW, 0);
4716#endif
4717 if (RT_FAILURE(rc))
4718 return rc;
4719 STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
4720 if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
4721 return VINF_SUCCESS;
4722 }
4723
4724 /*
4725 * Free one cached page.
4726 */
4727 return pgmPoolCacheFreeOne(pPool, iUser);
4728}
4729
4730/**
4731 * Allocates a page from the pool.
4732 *
4733 * This page may actually be a cached page and not in need of any processing
4734 * on the callers part.
4735 *
4736 * @returns VBox status code.
4737 * @retval VINF_SUCCESS if a NEW page was allocated.
4738 * @retval VINF_PGM_CACHED_PAGE if a CACHED page was returned.
4739 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
4740 * @param pVM The VM handle.
4741 * @param GCPhys The GC physical address of the page we're gonna shadow.
4742 * For 4MB and 2MB PD entries, it's the first address the
4743 * shadow PT is covering.
4744 * @param enmKind The kind of mapping.
4745 * @param enmAccess Access type for the mapping (only relevant for big pages)
4746 * @param iUser The shadow page pool index of the user table.
4747 * @param iUserTable The index into the user table (shadowed).
4748 * @param ppPage Where to store the pointer to the page. NULL is stored here on failure.
4749 * @param fLockPage Lock the page
4750 */
4751int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage)
4752{
4753 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4754 STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
4755 LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
4756 *ppPage = NULL;
4757 /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
4758 * (TRPMR3SyncIDT) because of FF priority. Try fix that?
4759 * Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */
4760
4761 pgmLock(pVM);
4762
4763 if (pPool->fCacheEnabled)
4764 {
4765 int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, enmAccess, iUser, iUserTable, ppPage);
4766 if (RT_SUCCESS(rc2))
4767 {
4768 if (fLockPage)
4769 pgmPoolLockPage(pPool, *ppPage);
4770 pgmUnlock(pVM);
4771 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4772 LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
4773 return rc2;
4774 }
4775 }
4776
4777 /*
4778 * Allocate a new one.
4779 */
4780 int rc = VINF_SUCCESS;
4781 uint16_t iNew = pPool->iFreeHead;
4782 if (iNew == NIL_PGMPOOL_IDX)
4783 {
4784 rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
4785 if (RT_FAILURE(rc))
4786 {
4787 pgmUnlock(pVM);
4788 Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
4789 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4790 return rc;
4791 }
4792 iNew = pPool->iFreeHead;
4793 AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
4794 }
4795
4796 /* unlink the free head */
4797 PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
4798 pPool->iFreeHead = pPage->iNext;
4799 pPage->iNext = NIL_PGMPOOL_IDX;
4800
4801 /*
4802 * Initialize it.
4803 */
4804 pPool->cUsedPages++; /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
4805 pPage->enmKind = enmKind;
4806 pPage->enmAccess = enmAccess;
4807 pPage->GCPhys = GCPhys;
4808 pPage->fSeenNonGlobal = false; /* Set this to 'true' to disable this feature. */
4809 pPage->fMonitored = false;
4810 pPage->fCached = false;
4811#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4812 pPage->fDirty = false;
4813#endif
4814 pPage->fReusedFlushPending = false;
4815 pPage->cModifications = 0;
4816 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
4817 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
4818 pPage->cLocked = 0;
4819 pPage->cPresent = 0;
4820 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
4821 pPage->pvLastAccessHandlerFault = 0;
4822 pPage->cLastAccessHandlerCount = 0;
4823 pPage->pvLastAccessHandlerRip = 0;
4824
4825 /*
4826 * Insert into the tracking and cache. If this fails, free the page.
4827 */
4828 int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
4829 if (RT_FAILURE(rc3))
4830 {
4831 pPool->cUsedPages--;
4832 pPage->enmKind = PGMPOOLKIND_FREE;
4833 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4834 pPage->GCPhys = NIL_RTGCPHYS;
4835 pPage->iNext = pPool->iFreeHead;
4836 pPool->iFreeHead = pPage->idx;
4837 pgmUnlock(pVM);
4838 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4839 Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
4840 return rc3;
4841 }
4842
4843 /*
4844 * Commit the allocation, clear the page and return.
4845 */
4846#ifdef VBOX_WITH_STATISTICS
4847 if (pPool->cUsedPages > pPool->cUsedPagesHigh)
4848 pPool->cUsedPagesHigh = pPool->cUsedPages;
4849#endif
4850
4851 if (!pPage->fZeroed)
4852 {
4853 STAM_PROFILE_START(&pPool->StatZeroPage, z);
4854 void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
4855 ASMMemZeroPage(pv);
4856 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
4857 }
4858
4859 *ppPage = pPage;
4860 if (fLockPage)
4861 pgmPoolLockPage(pPool, pPage);
4862 pgmUnlock(pVM);
4863 LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
4864 rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
4865 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
4866 return rc;
4867}
4868
4869
4870/**
4871 * Frees a usage of a pool page.
4872 *
4873 * @param pVM The VM handle.
4874 * @param HCPhys The HC physical address of the shadow page.
4875 * @param iUser The shadow page pool index of the user table.
4876 * @param iUserTable The index into the user table (shadowed).
4877 */
4878void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
4879{
4880 LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
4881 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4882 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
4883}
4884
4885/**
4886 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4887 *
4888 * @returns Pointer to the shadow page structure.
4889 * @param pPool The pool.
4890 * @param HCPhys The HC physical address of the shadow page.
4891 */
4892PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
4893{
4894 PVM pVM = pPool->CTX_SUFF(pVM);
4895
4896 Assert(PGMIsLockOwner(pVM));
4897
4898 /*
4899 * Look up the page.
4900 */
4901 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
4902
4903 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
4904 return pPage;
4905}
4906
4907#ifdef IN_RING3 /* currently only used in ring 3; save some space in the R0 & GC modules (left it here as we might need it elsewhere later on) */
4908/**
4909 * Flush the specified page if present
4910 *
4911 * @param pVM The VM handle.
4912 * @param GCPhys Guest physical address of the page to flush
4913 */
4914void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys)
4915{
4916 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4917
4918 VM_ASSERT_EMT(pVM);
4919
4920 /*
4921 * Look up the GCPhys in the hash.
4922 */
4923 GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1);
4924 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
4925 if (i == NIL_PGMPOOL_IDX)
4926 return;
4927
4928 do
4929 {
4930 PPGMPOOLPAGE pPage = &pPool->aPages[i];
4931 if (pPage->GCPhys - GCPhys < PAGE_SIZE)
4932 {
4933 switch (pPage->enmKind)
4934 {
4935 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
4936 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
4937 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
4938 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
4939 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
4940 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
4941 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
4942 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
4943 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
4944 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
4945 case PGMPOOLKIND_64BIT_PML4:
4946 case PGMPOOLKIND_32BIT_PD:
4947 case PGMPOOLKIND_PAE_PDPT:
4948 {
4949 Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys));
4950#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4951 if (pPage->fDirty)
4952 STAM_COUNTER_INC(&pPool->StatForceFlushDirtyPage);
4953 else
4954#endif
4955 STAM_COUNTER_INC(&pPool->StatForceFlushPage);
4956 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage));
4957 pgmPoolMonitorChainFlush(pPool, pPage);
4958 return;
4959 }
4960
4961 /* ignore, no monitoring. */
4962 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
4963 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
4964 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
4965 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
4966 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
4967 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
4968 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
4969 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
4970 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
4971 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
4972 case PGMPOOLKIND_ROOT_NESTED:
4973 case PGMPOOLKIND_PAE_PD_PHYS:
4974 case PGMPOOLKIND_PAE_PDPT_PHYS:
4975 case PGMPOOLKIND_32BIT_PD_PHYS:
4976 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
4977 break;
4978
4979 default:
4980 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
4981 }
4982 }
4983
4984 /* next */
4985 i = pPage->iNext;
4986 } while (i != NIL_PGMPOOL_IDX);
4987 return;
4988}
4989#endif /* IN_RING3 */
4990
4991#ifdef IN_RING3
4992
4993
4994/**
4995 * Reset CPU on hot plugging.
4996 *
4997 * @param pVM The VM handle.
4998 * @param pVCpu The virtual CPU.
4999 */
5000void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu)
5001{
5002 pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
5003
5004 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
5005 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
5006 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5007}
5008
5009
5010/**
5011 * Flushes the entire cache.
5012 *
5013 * It will assert a global CR3 flush (FF) and assumes the caller is aware of
5014 * this and execute this CR3 flush.
5015 *
5016 * @param pPool The pool.
5017 */
5018void pgmR3PoolReset(PVM pVM)
5019{
5020 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
5021
5022 Assert(PGMIsLockOwner(pVM));
5023 STAM_PROFILE_START(&pPool->StatR3Reset, a);
5024 LogFlow(("pgmR3PoolReset:\n"));
5025
5026 /*
5027 * If there are no pages in the pool, there is nothing to do.
5028 */
5029 if (pPool->cCurPages <= PGMPOOL_IDX_FIRST)
5030 {
5031 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
5032 return;
5033 }
5034
5035 /*
5036 * Exit the shadow mode since we're going to clear everything,
5037 * including the root page.
5038 */
5039 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5040 {
5041 PVMCPU pVCpu = &pVM->aCpus[i];
5042 pgmR3ExitShadowModeBeforePoolFlush(pVM, pVCpu);
5043 }
5044
5045 /*
5046 * Nuke the free list and reinsert all pages into it.
5047 */
5048 for (unsigned i = pPool->cCurPages - 1; i >= PGMPOOL_IDX_FIRST; i--)
5049 {
5050 PPGMPOOLPAGE pPage = &pPool->aPages[i];
5051
5052 Assert(pPage->Core.Key == MMPage2Phys(pVM, pPage->pvPageR3));
5053 if (pPage->fMonitored)
5054 pgmPoolMonitorFlush(pPool, pPage);
5055 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
5056 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
5057 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
5058 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
5059 pPage->cModifications = 0;
5060 pPage->GCPhys = NIL_RTGCPHYS;
5061 pPage->enmKind = PGMPOOLKIND_FREE;
5062 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
5063 Assert(pPage->idx == i);
5064 pPage->iNext = i + 1;
5065 pPage->fZeroed = false; /* This could probably be optimized, but better safe than sorry. */
5066 pPage->fSeenNonGlobal = false;
5067 pPage->fMonitored = false;
5068#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5069 pPage->fDirty = false;
5070#endif
5071 pPage->fCached = false;
5072 pPage->fReusedFlushPending = false;
5073 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
5074 pPage->iAgeNext = NIL_PGMPOOL_IDX;
5075 pPage->iAgePrev = NIL_PGMPOOL_IDX;
5076 pPage->cLocked = 0;
5077 }
5078 pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX;
5079 pPool->iFreeHead = PGMPOOL_IDX_FIRST;
5080 pPool->cUsedPages = 0;
5081
5082 /*
5083 * Zap and reinitialize the user records.
5084 */
5085 pPool->cPresent = 0;
5086 pPool->iUserFreeHead = 0;
5087 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
5088 const unsigned cMaxUsers = pPool->cMaxUsers;
5089 for (unsigned i = 0; i < cMaxUsers; i++)
5090 {
5091 paUsers[i].iNext = i + 1;
5092 paUsers[i].iUser = NIL_PGMPOOL_IDX;
5093 paUsers[i].iUserTable = 0xfffffffe;
5094 }
5095 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
5096
5097 /*
5098 * Clear all the GCPhys links and rebuild the phys ext free list.
5099 */
5100 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
5101 pRam;
5102 pRam = pRam->CTX_SUFF(pNext))
5103 {
5104 unsigned iPage = pRam->cb >> PAGE_SHIFT;
5105 while (iPage-- > 0)
5106 PGM_PAGE_SET_TRACKING(&pRam->aPages[iPage], 0);
5107 }
5108
5109 pPool->iPhysExtFreeHead = 0;
5110 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
5111 const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
5112 for (unsigned i = 0; i < cMaxPhysExts; i++)
5113 {
5114 paPhysExts[i].iNext = i + 1;
5115 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
5116 paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5117 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
5118 paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5119 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
5120 paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5121 }
5122 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
5123
5124 /*
5125 * Just zap the modified list.
5126 */
5127 pPool->cModifiedPages = 0;
5128 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
5129
5130 /*
5131 * Clear the GCPhys hash and the age list.
5132 */
5133 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
5134 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
5135 pPool->iAgeHead = NIL_PGMPOOL_IDX;
5136 pPool->iAgeTail = NIL_PGMPOOL_IDX;
5137
5138#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5139 /* Clear all dirty pages. */
5140 pPool->idxFreeDirtyPage = 0;
5141 pPool->cDirtyPages = 0;
5142 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aIdxDirtyPages); i++)
5143 pPool->aIdxDirtyPages[i] = NIL_PGMPOOL_IDX;
5144#endif
5145
5146 /*
5147 * Reinsert active pages into the hash and ensure monitoring chains are correct.
5148 */
5149 for (unsigned i = PGMPOOL_IDX_FIRST_SPECIAL; i < PGMPOOL_IDX_FIRST; i++)
5150 {
5151 PPGMPOOLPAGE pPage = &pPool->aPages[i];
5152 pPage->iNext = NIL_PGMPOOL_IDX;
5153 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
5154 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
5155 pPage->cModifications = 0;
5156 /* ASSUMES that we're not sharing with any of the other special pages (safe for now). */
5157 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
5158 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
5159 if (pPage->fMonitored)
5160 {
5161 int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
5162 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage),
5163 pPool->pfnAccessHandlerR0, MMHyperCCToR0(pVM, pPage),
5164 pPool->pfnAccessHandlerRC, MMHyperCCToRC(pVM, pPage),
5165 pPool->pszAccessHandler);
5166 AssertFatalRCSuccess(rc);
5167 pgmPoolHashInsert(pPool, pPage);
5168 }
5169 Assert(pPage->iUserHead == NIL_PGMPOOL_USER_INDEX); /* for now */
5170 Assert(pPage->iAgeNext == NIL_PGMPOOL_IDX);
5171 Assert(pPage->iAgePrev == NIL_PGMPOOL_IDX);
5172 }
5173
5174 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5175 {
5176 /*
5177 * Re-enter the shadowing mode and assert Sync CR3 FF.
5178 */
5179 PVMCPU pVCpu = &pVM->aCpus[i];
5180 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
5181 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
5182 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5183 }
5184
5185 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
5186}
5187#endif /* IN_RING3 */
5188
5189#ifdef LOG_ENABLED
5190static const char *pgmPoolPoolKindToStr(uint8_t enmKind)
5191{
5192 switch(enmKind)
5193 {
5194 case PGMPOOLKIND_INVALID:
5195 return "PGMPOOLKIND_INVALID";
5196 case PGMPOOLKIND_FREE:
5197 return "PGMPOOLKIND_FREE";
5198 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
5199 return "PGMPOOLKIND_32BIT_PT_FOR_PHYS";
5200 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
5201 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT";
5202 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
5203 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB";
5204 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
5205 return "PGMPOOLKIND_PAE_PT_FOR_PHYS";
5206 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
5207 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_PT";
5208 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
5209 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB";
5210 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
5211 return "PGMPOOLKIND_PAE_PT_FOR_PAE_PT";
5212 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
5213 return "PGMPOOLKIND_PAE_PT_FOR_PAE_2MB";
5214 case PGMPOOLKIND_32BIT_PD:
5215 return "PGMPOOLKIND_32BIT_PD";
5216 case PGMPOOLKIND_32BIT_PD_PHYS:
5217 return "PGMPOOLKIND_32BIT_PD_PHYS";
5218 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
5219 return "PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD";
5220 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
5221 return "PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD";
5222 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
5223 return "PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD";
5224 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
5225 return "PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD";
5226 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
5227 return "PGMPOOLKIND_PAE_PD_FOR_PAE_PD";
5228 case PGMPOOLKIND_PAE_PD_PHYS:
5229 return "PGMPOOLKIND_PAE_PD_PHYS";
5230 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
5231 return "PGMPOOLKIND_PAE_PDPT_FOR_32BIT";
5232 case PGMPOOLKIND_PAE_PDPT:
5233 return "PGMPOOLKIND_PAE_PDPT";
5234 case PGMPOOLKIND_PAE_PDPT_PHYS:
5235 return "PGMPOOLKIND_PAE_PDPT_PHYS";
5236 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
5237 return "PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT";
5238 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
5239 return "PGMPOOLKIND_64BIT_PDPT_FOR_PHYS";
5240 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
5241 return "PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD";
5242 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
5243 return "PGMPOOLKIND_64BIT_PD_FOR_PHYS";
5244 case PGMPOOLKIND_64BIT_PML4:
5245 return "PGMPOOLKIND_64BIT_PML4";
5246 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
5247 return "PGMPOOLKIND_EPT_PDPT_FOR_PHYS";
5248 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
5249 return "PGMPOOLKIND_EPT_PD_FOR_PHYS";
5250 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
5251 return "PGMPOOLKIND_EPT_PT_FOR_PHYS";
5252 case PGMPOOLKIND_ROOT_NESTED:
5253 return "PGMPOOLKIND_ROOT_NESTED";
5254 }
5255 return "Unknown kind!";
5256}
5257#endif /* LOG_ENABLED*/
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette