VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp@ 61212

Last change on this file since 61212 was 61212, checked in by vboxsync, 9 years ago

pgmPoolMonitorIsReused: Subject non-REP MOVSWDQ and STOSWDQ to the new tests too.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 214.6 KB
Line 
1/* $Id: PGMAllPool.cpp 61212 2016-05-26 14:14:27Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM_POOL
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/em.h>
26#include <VBox/vmm/cpum.h>
27#ifdef IN_RC
28# include <VBox/vmm/patm.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/disopcode.h>
34#include <VBox/vmm/hm_vmx.h>
35
36#include <VBox/log.h>
37#include <VBox/err.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41
42
43/*********************************************************************************************************************************
44* Internal Functions *
45*********************************************************************************************************************************/
46RT_C_DECLS_BEGIN
47DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind);
48DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind);
49static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
50static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
51static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
52static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
53#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
54static const char *pgmPoolPoolKindToStr(uint8_t enmKind);
55#endif
56#if 0 /*defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT)*/
57static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT);
58#endif
59
60int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
61PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
62void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
63void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
64
65RT_C_DECLS_END
66
67
68/**
69 * Checks if the specified page pool kind is for a 4MB or 2MB guest page.
70 *
71 * @returns true if it's the shadow of a 4MB or 2MB guest page, otherwise false.
72 * @param enmKind The page kind.
73 */
74DECLINLINE(bool) pgmPoolIsBigPage(PGMPOOLKIND enmKind)
75{
76 switch (enmKind)
77 {
78 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
79 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
80 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
81 return true;
82 default:
83 return false;
84 }
85}
86
87
88/**
89 * Flushes a chain of pages sharing the same access monitor.
90 *
91 * @returns VBox status code suitable for scheduling.
92 * @param pPool The pool.
93 * @param pPage A page in the chain.
94 * @todo VBOXSTRICTRC
95 */
96int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
97{
98 LogFlow(("pgmPoolMonitorChainFlush: Flush page %RGp type=%d\n", pPage->GCPhys, pPage->enmKind));
99
100 /*
101 * Find the list head.
102 */
103 uint16_t idx = pPage->idx;
104 if (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
105 {
106 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
107 {
108 idx = pPage->iMonitoredPrev;
109 Assert(idx != pPage->idx);
110 pPage = &pPool->aPages[idx];
111 }
112 }
113
114 /*
115 * Iterate the list flushing each shadow page.
116 */
117 int rc = VINF_SUCCESS;
118 for (;;)
119 {
120 idx = pPage->iMonitoredNext;
121 Assert(idx != pPage->idx);
122 if (pPage->idx >= PGMPOOL_IDX_FIRST)
123 {
124 int rc2 = pgmPoolFlushPage(pPool, pPage);
125 AssertRC(rc2);
126 }
127 /* next */
128 if (idx == NIL_PGMPOOL_IDX)
129 break;
130 pPage = &pPool->aPages[idx];
131 }
132 return rc;
133}
134
135
136/**
137 * Wrapper for getting the current context pointer to the entry being modified.
138 *
139 * @returns VBox status code suitable for scheduling.
140 * @param pVM The cross context VM structure.
141 * @param pvDst Destination address
142 * @param pvSrc Pointer to the mapping of @a GCPhysSrc or NULL depending
143 * on the context (e.g. \#PF in R0 & RC).
144 * @param GCPhysSrc The source guest physical address.
145 * @param cb Size of data to read
146 */
147DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, void const *pvSrc, RTGCPHYS GCPhysSrc, size_t cb)
148{
149#if defined(IN_RING3)
150 NOREF(pVM); NOREF(GCPhysSrc);
151 memcpy(pvDst, (RTHCPTR)((uintptr_t)pvSrc & ~(RTHCUINTPTR)(cb - 1)), cb);
152 return VINF_SUCCESS;
153#else
154 /* @todo in RC we could attempt to use the virtual address, although this can cause many faults (PAE Windows XP guest). */
155 NOREF(pvSrc);
156 return PGMPhysSimpleReadGCPhys(pVM, pvDst, GCPhysSrc & ~(RTGCPHYS)(cb - 1), cb);
157#endif
158}
159
160
161/**
162 * Process shadow entries before they are changed by the guest.
163 *
164 * For PT entries we will clear them. For PD entries, we'll simply check
165 * for mapping conflicts and set the SyncCR3 FF if found.
166 *
167 * @param pVCpu The cross context virtual CPU structure.
168 * @param pPool The pool.
169 * @param pPage The head page.
170 * @param GCPhysFault The guest physical fault address.
171 * @param pvAddress Pointer to the mapping of @a GCPhysFault or NULL
172 * depending on the context (e.g. \#PF in R0 & RC).
173 * @param cbWrite Write size; might be zero if the caller knows we're not crossing entry boundaries
174 */
175static void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault,
176 void const *pvAddress, unsigned cbWrite)
177{
178 AssertMsg(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX, ("%u (idx=%u)\n", pPage->iMonitoredPrev, pPage->idx));
179 const unsigned off = GCPhysFault & PAGE_OFFSET_MASK;
180 PVM pVM = pPool->CTX_SUFF(pVM);
181 NOREF(pVCpu);
182
183 LogFlow(("pgmPoolMonitorChainChanging: %RGv phys=%RGp cbWrite=%d\n", (RTGCPTR)(CTXTYPE(RTGCPTR, uintptr_t, RTGCPTR))pvAddress, GCPhysFault, cbWrite));
184
185 for (;;)
186 {
187 union
188 {
189 void *pv;
190 PX86PT pPT;
191 PPGMSHWPTPAE pPTPae;
192 PX86PD pPD;
193 PX86PDPAE pPDPae;
194 PX86PDPT pPDPT;
195 PX86PML4 pPML4;
196 } uShw;
197
198 LogFlow(("pgmPoolMonitorChainChanging: page idx=%d phys=%RGp (next=%d) kind=%s write=%#x\n",
199 pPage->idx, pPage->GCPhys, pPage->iMonitoredNext, pgmPoolPoolKindToStr(pPage->enmKind), cbWrite));
200
201 uShw.pv = NULL;
202 switch (pPage->enmKind)
203 {
204 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
205 {
206 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
207 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
208 const unsigned iShw = off / sizeof(X86PTE);
209 LogFlow(("PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT iShw=%x\n", iShw));
210 if (uShw.pPT->a[iShw].n.u1Present)
211 {
212 X86PTE GstPte;
213
214 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
215 AssertRC(rc);
216 Log4(("pgmPoolMonitorChainChanging 32_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
217 pgmPoolTracDerefGCPhysHint(pPool, pPage,
218 uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK,
219 GstPte.u & X86_PTE_PG_MASK,
220 iShw);
221 ASMAtomicWriteU32(&uShw.pPT->a[iShw].u, 0);
222 }
223 break;
224 }
225
226 /* page/2 sized */
227 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
228 {
229 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
230 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
231 if (!((off ^ pPage->GCPhys) & (PAGE_SIZE / 2)))
232 {
233 const unsigned iShw = (off / sizeof(X86PTE)) & (X86_PG_PAE_ENTRIES - 1);
234 LogFlow(("PGMPOOLKIND_PAE_PT_FOR_32BIT_PT iShw=%x\n", iShw));
235 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
236 {
237 X86PTE GstPte;
238 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
239 AssertRC(rc);
240
241 Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, GstPte.u & X86_PTE_PG_MASK));
242 pgmPoolTracDerefGCPhysHint(pPool, pPage,
243 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]),
244 GstPte.u & X86_PTE_PG_MASK,
245 iShw);
246 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0);
247 }
248 }
249 break;
250 }
251
252 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
253 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
254 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
255 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
256 {
257 unsigned iGst = off / sizeof(X86PDE);
258 unsigned iShwPdpt = iGst / 256;
259 unsigned iShw = (iGst % 256) * 2;
260 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
261
262 LogFlow(("pgmPoolMonitorChainChanging PAE for 32 bits: iGst=%x iShw=%x idx = %d page idx=%d\n", iGst, iShw, iShwPdpt, pPage->enmKind - PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD));
263 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
264 if (iShwPdpt == pPage->enmKind - (unsigned)PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD)
265 {
266 for (unsigned i = 0; i < 2; i++)
267 {
268# ifdef VBOX_WITH_RAW_MODE_NOT_R0
269 if ((uShw.pPDPae->a[iShw + i].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
270 {
271 Assert(pgmMapAreMappingsEnabled(pVM));
272 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
273 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i));
274 break;
275 }
276# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
277 if (uShw.pPDPae->a[iShw+i].n.u1Present)
278 {
279 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw+i, uShw.pPDPae->a[iShw+i].u));
280 pgmPoolFree(pVM,
281 uShw.pPDPae->a[iShw+i].u & X86_PDE_PAE_PG_MASK,
282 pPage->idx,
283 iShw + i);
284 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw+i].u, 0);
285 }
286
287 /* paranoia / a bit assumptive. */
288 if ( (off & 3)
289 && (off & 3) + cbWrite > 4)
290 {
291 const unsigned iShw2 = iShw + 2 + i;
292 if (iShw2 < RT_ELEMENTS(uShw.pPDPae->a))
293 {
294# ifdef VBOX_WITH_RAW_MODE_NOT_R0
295 if ((uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
296 {
297 Assert(pgmMapAreMappingsEnabled(pVM));
298 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
299 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
300 break;
301 }
302# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
303 if (uShw.pPDPae->a[iShw2].n.u1Present)
304 {
305 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
306 pgmPoolFree(pVM,
307 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
308 pPage->idx,
309 iShw2);
310 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw2].u, 0);
311 }
312 }
313 }
314 }
315 }
316 break;
317 }
318
319 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
320 {
321 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
322 const unsigned iShw = off / sizeof(X86PTEPAE);
323 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPT));
324 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
325 {
326 X86PTEPAE GstPte;
327 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvAddress, GCPhysFault, sizeof(GstPte));
328 AssertRC(rc);
329
330 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]), GstPte.u & X86_PTE_PAE_PG_MASK));
331 pgmPoolTracDerefGCPhysHint(pPool, pPage,
332 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw]),
333 GstPte.u & X86_PTE_PAE_PG_MASK,
334 iShw);
335 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw], 0);
336 }
337
338 /* paranoia / a bit assumptive. */
339 if ( (off & 7)
340 && (off & 7) + cbWrite > sizeof(X86PTEPAE))
341 {
342 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTEPAE);
343 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPTPae->a));
344
345 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw2]))
346 {
347 X86PTEPAE GstPte;
348 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte,
349 pvAddress ? (uint8_t const *)pvAddress + sizeof(GstPte) : NULL,
350 GCPhysFault + sizeof(GstPte), sizeof(GstPte));
351 AssertRC(rc);
352 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]), GstPte.u & X86_PTE_PAE_PG_MASK));
353 pgmPoolTracDerefGCPhysHint(pPool, pPage,
354 PGMSHWPTEPAE_GET_HCPHYS(uShw.pPTPae->a[iShw2]),
355 GstPte.u & X86_PTE_PAE_PG_MASK,
356 iShw2);
357 PGMSHWPTEPAE_ATOMIC_SET(uShw.pPTPae->a[iShw2], 0);
358 }
359 }
360 break;
361 }
362
363 case PGMPOOLKIND_32BIT_PD:
364 {
365 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
366 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging!
367
368 LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw));
369 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
370# ifdef VBOX_WITH_RAW_MODE_NOT_R0
371 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING)
372 {
373 Assert(pgmMapAreMappingsEnabled(pVM));
374 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
375 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
376 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
377 break;
378 }
379 else
380# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
381 {
382 if (uShw.pPD->a[iShw].n.u1Present)
383 {
384 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
385 pgmPoolFree(pVM,
386 uShw.pPD->a[iShw].u & X86_PDE_PAE_PG_MASK,
387 pPage->idx,
388 iShw);
389 ASMAtomicWriteU32(&uShw.pPD->a[iShw].u, 0);
390 }
391 }
392 /* paranoia / a bit assumptive. */
393 if ( (off & 3)
394 && (off & 3) + cbWrite > sizeof(X86PTE))
395 {
396 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PTE);
397 if ( iShw2 != iShw
398 && iShw2 < RT_ELEMENTS(uShw.pPD->a))
399 {
400# ifdef VBOX_WITH_RAW_MODE_NOT_R0
401 if (uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING)
402 {
403 Assert(pgmMapAreMappingsEnabled(pVM));
404 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
405 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
406 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
407 break;
408 }
409# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
410 if (uShw.pPD->a[iShw2].n.u1Present)
411 {
412 LogFlow(("pgmPoolMonitorChainChanging: 32 bit pd iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPD->a[iShw2].u));
413 pgmPoolFree(pVM,
414 uShw.pPD->a[iShw2].u & X86_PDE_PAE_PG_MASK,
415 pPage->idx,
416 iShw2);
417 ASMAtomicWriteU32(&uShw.pPD->a[iShw2].u, 0);
418 }
419 }
420 }
421#if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). - not working any longer... */
422 if ( uShw.pPD->a[iShw].n.u1Present
423 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
424 {
425 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u));
426# ifdef IN_RC /* TLB load - we're pushing things a bit... */
427 ASMProbeReadByte(pvAddress);
428# endif
429 pgmPoolFree(pVM, uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw);
430 ASMAtomicWriteU32(&uShw.pPD->a[iShw].u, 0);
431 }
432#endif
433 break;
434 }
435
436 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
437 {
438 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
439 const unsigned iShw = off / sizeof(X86PDEPAE);
440 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
441#ifdef VBOX_WITH_RAW_MODE_NOT_R0
442 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING)
443 {
444 Assert(pgmMapAreMappingsEnabled(pVM));
445 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
446 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
447 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw));
448 break;
449 }
450#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
451 /*
452 * Causes trouble when the guest uses a PDE to refer to the whole page table level
453 * structure. (Invalidate here; faults later on when it tries to change the page
454 * table entries -> recheck; probably only applies to the RC case.)
455 */
456#ifdef VBOX_WITH_RAW_MODE_NOT_R0
457 else
458#endif
459 {
460 if (uShw.pPDPae->a[iShw].n.u1Present)
461 {
462 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
463 pgmPoolFree(pVM,
464 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
465 pPage->idx,
466 iShw);
467 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw].u, 0);
468 }
469 }
470 /* paranoia / a bit assumptive. */
471 if ( (off & 7)
472 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
473 {
474 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
475 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
476
477#ifdef VBOX_WITH_RAW_MODE_NOT_R0
478 if ( iShw2 != iShw
479 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING)
480 {
481 Assert(pgmMapAreMappingsEnabled(pVM));
482 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
483 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
484 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
485 break;
486 }
487 else
488#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
489 if (uShw.pPDPae->a[iShw2].n.u1Present)
490 {
491 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
492 pgmPoolFree(pVM,
493 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
494 pPage->idx,
495 iShw2);
496 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw2].u, 0);
497 }
498 }
499 break;
500 }
501
502 case PGMPOOLKIND_PAE_PDPT:
503 {
504 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
505 /*
506 * Hopefully this doesn't happen very often:
507 * - touching unused parts of the page
508 * - messing with the bits of pd pointers without changing the physical address
509 */
510 /* PDPT roots are not page aligned; 32 byte only! */
511 const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
512
513 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
514 const unsigned iShw = offPdpt / sizeof(X86PDPE);
515 if (iShw < X86_PG_PAE_PDPE_ENTRIES) /* don't use RT_ELEMENTS(uShw.pPDPT->a), because that's for long mode only */
516 {
517# ifdef VBOX_WITH_RAW_MODE_NOT_R0
518 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING)
519 {
520 Assert(pgmMapAreMappingsEnabled(pVM));
521 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
522 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
523 LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw));
524 break;
525 }
526 else
527# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
528 if (uShw.pPDPT->a[iShw].n.u1Present)
529 {
530 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
531 pgmPoolFree(pVM,
532 uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK,
533 pPage->idx,
534 iShw);
535 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw].u, 0);
536 }
537
538 /* paranoia / a bit assumptive. */
539 if ( (offPdpt & 7)
540 && (offPdpt & 7) + cbWrite > sizeof(X86PDPE))
541 {
542 const unsigned iShw2 = (offPdpt + cbWrite - 1) / sizeof(X86PDPE);
543 if ( iShw2 != iShw
544 && iShw2 < X86_PG_PAE_PDPE_ENTRIES)
545 {
546# ifdef VBOX_WITH_RAW_MODE_NOT_R0
547 if (uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING)
548 {
549 Assert(pgmMapAreMappingsEnabled(pVM));
550 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict));
551 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
552 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2));
553 break;
554 }
555 else
556# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
557 if (uShw.pPDPT->a[iShw2].n.u1Present)
558 {
559 LogFlow(("pgmPoolMonitorChainChanging: pae pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
560 pgmPoolFree(pVM,
561 uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK,
562 pPage->idx,
563 iShw2);
564 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw2].u, 0);
565 }
566 }
567 }
568 }
569 break;
570 }
571
572#ifndef IN_RC
573 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
574 {
575 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPD));
576 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
577 const unsigned iShw = off / sizeof(X86PDEPAE);
578 Assert(!(uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING));
579 if (uShw.pPDPae->a[iShw].n.u1Present)
580 {
581 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u));
582 pgmPoolFree(pVM,
583 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
584 pPage->idx,
585 iShw);
586 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw].u, 0);
587 }
588 /* paranoia / a bit assumptive. */
589 if ( (off & 7)
590 && (off & 7) + cbWrite > sizeof(X86PDEPAE))
591 {
592 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDEPAE);
593 AssertBreak(iShw2 < RT_ELEMENTS(uShw.pPDPae->a));
594
595 Assert(!(uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING));
596 if (uShw.pPDPae->a[iShw2].n.u1Present)
597 {
598 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u));
599 pgmPoolFree(pVM,
600 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
601 pPage->idx,
602 iShw2);
603 ASMAtomicWriteU64(&uShw.pPDPae->a[iShw2].u, 0);
604 }
605 }
606 break;
607 }
608
609 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
610 {
611 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPDPT));
612 /*
613 * Hopefully this doesn't happen very often:
614 * - messing with the bits of pd pointers without changing the physical address
615 */
616 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
617 const unsigned iShw = off / sizeof(X86PDPE);
618 if (uShw.pPDPT->a[iShw].n.u1Present)
619 {
620 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u));
621 pgmPoolFree(pVM, uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw);
622 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw].u, 0);
623 }
624 /* paranoia / a bit assumptive. */
625 if ( (off & 7)
626 && (off & 7) + cbWrite > sizeof(X86PDPE))
627 {
628 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PDPE);
629 if (uShw.pPDPT->a[iShw2].n.u1Present)
630 {
631 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u));
632 pgmPoolFree(pVM, uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2);
633 ASMAtomicWriteU64(&uShw.pPDPT->a[iShw2].u, 0);
634 }
635 }
636 break;
637 }
638
639 case PGMPOOLKIND_64BIT_PML4:
640 {
641 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FaultPML4));
642 /*
643 * Hopefully this doesn't happen very often:
644 * - messing with the bits of pd pointers without changing the physical address
645 */
646 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
647 const unsigned iShw = off / sizeof(X86PDPE);
648 if (uShw.pPML4->a[iShw].n.u1Present)
649 {
650 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPML4->a[iShw].u));
651 pgmPoolFree(pVM, uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw);
652 ASMAtomicWriteU64(&uShw.pPML4->a[iShw].u, 0);
653 }
654 /* paranoia / a bit assumptive. */
655 if ( (off & 7)
656 && (off & 7) + cbWrite > sizeof(X86PDPE))
657 {
658 const unsigned iShw2 = (off + cbWrite - 1) / sizeof(X86PML4E);
659 if (uShw.pPML4->a[iShw2].n.u1Present)
660 {
661 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPML4->a[iShw2].u));
662 pgmPoolFree(pVM, uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2);
663 ASMAtomicWriteU64(&uShw.pPML4->a[iShw2].u, 0);
664 }
665 }
666 break;
667 }
668#endif /* IN_RING0 */
669
670 default:
671 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
672 }
673 PGM_DYNMAP_UNUSED_HINT_VM(pVM, uShw.pv);
674
675 /* next */
676 if (pPage->iMonitoredNext == NIL_PGMPOOL_IDX)
677 return;
678 pPage = &pPool->aPages[pPage->iMonitoredNext];
679 }
680}
681
682# ifndef IN_RING3
683
684/**
685 * Checks if a access could be a fork operation in progress.
686 *
687 * Meaning, that the guest is setting up the parent process for Copy-On-Write.
688 *
689 * @returns true if it's likely that we're forking, otherwise false.
690 * @param pPool The pool.
691 * @param pDis The disassembled instruction.
692 * @param offFault The access offset.
693 */
694DECLINLINE(bool) pgmPoolMonitorIsForking(PPGMPOOL pPool, PDISCPUSTATE pDis, unsigned offFault)
695{
696 /*
697 * i386 linux is using btr to clear X86_PTE_RW.
698 * The functions involved are (2.6.16 source inspection):
699 * clear_bit
700 * ptep_set_wrprotect
701 * copy_one_pte
702 * copy_pte_range
703 * copy_pmd_range
704 * copy_pud_range
705 * copy_page_range
706 * dup_mmap
707 * dup_mm
708 * copy_mm
709 * copy_process
710 * do_fork
711 */
712 if ( pDis->pCurInstr->uOpcode == OP_BTR
713 && !(offFault & 4)
714 /** @todo Validate that the bit index is X86_PTE_RW. */
715 )
716 {
717 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,Fork));
718 return true;
719 }
720 return false;
721}
722
723
724/**
725 * Determine whether the page is likely to have been reused.
726 *
727 * @returns true if we consider the page as being reused for a different purpose.
728 * @returns false if we consider it to still be a paging page.
729 * @param pVM The cross context VM structure.
730 * @param pVCpu The cross context virtual CPU structure.
731 * @param pRegFrame Trap register frame.
732 * @param pDis The disassembly info for the faulting instruction.
733 * @param pvFault The fault address.
734 *
735 * @remark The REP prefix check is left to the caller because of STOSD/W.
736 */
737DECLINLINE(bool) pgmPoolMonitorIsReused(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault)
738{
739#ifndef IN_RC
740 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */
741 if ( HMHasPendingIrq(pVM)
742 && (pRegFrame->rsp - pvFault) < 32)
743 {
744 /* Fault caused by stack writes while trying to inject an interrupt event. */
745 Log(("pgmPoolMonitorIsReused: reused %RGv for interrupt stack (rsp=%RGv).\n", pvFault, pRegFrame->rsp));
746 return true;
747 }
748#else
749 NOREF(pVM); NOREF(pvFault);
750#endif
751
752 LogFlow(("Reused instr %RGv %d at %RGv param1.fUse=%llx param1.reg=%d\n", pRegFrame->rip, pDis->pCurInstr->uOpcode, pvFault, pDis->Param1.fUse, pDis->Param1.Base.idxGenReg));
753
754 /* Non-supervisor mode write means it's used for something else. */
755 if (CPUMGetGuestCPL(pVCpu) == 3)
756 return true;
757
758 switch (pDis->pCurInstr->uOpcode)
759 {
760 /* call implies the actual push of the return address faulted */
761 case OP_CALL:
762 Log4(("pgmPoolMonitorIsReused: CALL\n"));
763 return true;
764 case OP_PUSH:
765 Log4(("pgmPoolMonitorIsReused: PUSH\n"));
766 return true;
767 case OP_PUSHF:
768 Log4(("pgmPoolMonitorIsReused: PUSHF\n"));
769 return true;
770 case OP_PUSHA:
771 Log4(("pgmPoolMonitorIsReused: PUSHA\n"));
772 return true;
773 case OP_FXSAVE:
774 Log4(("pgmPoolMonitorIsReused: FXSAVE\n"));
775 return true;
776 case OP_MOVNTI: /* solaris - block_zero_no_xmm */
777 Log4(("pgmPoolMonitorIsReused: MOVNTI\n"));
778 return true;
779 case OP_MOVNTDQ: /* solaris - hwblkclr & hwblkpagecopy */
780 Log4(("pgmPoolMonitorIsReused: MOVNTDQ\n"));
781 return true;
782 case OP_MOVSWD:
783 case OP_STOSWD:
784 if ( pDis->fPrefix == (DISPREFIX_REP|DISPREFIX_REX)
785 && pRegFrame->rcx >= 0x40
786 )
787 {
788 Assert(pDis->uCpuMode == DISCPUMODE_64BIT);
789
790 Log(("pgmPoolMonitorIsReused: OP_STOSQ\n"));
791 return true;
792 }
793 break;
794
795 default:
796 /*
797 * Anything having ESP on the left side means stack writes.
798 */
799 if ( ( (pDis->Param1.fUse & DISUSE_REG_GEN32)
800 || (pDis->Param1.fUse & DISUSE_REG_GEN64))
801 && (pDis->Param1.Base.idxGenReg == DISGREG_ESP))
802 {
803 Log4(("pgmPoolMonitorIsReused: ESP\n"));
804 return true;
805 }
806 break;
807 }
808
809 /*
810 * Page table updates are very very unlikely to be crossing page boundraries,
811 * and we don't want to deal with that in pgmPoolMonitorChainChanging and such.
812 */
813 uint32_t const cbWrite = DISGetParamSize(pDis, &pDis->Param1);
814 if ( (((uintptr_t)pvFault + cbWrite) >> X86_PAGE_SHIFT) != ((uintptr_t)pvFault >> X86_PAGE_SHIFT) )
815 {
816 Log4(("pgmPoolMonitorIsReused: cross page write\n"));
817 return true;
818 }
819
820 /*
821 * Nobody does an unaligned 8 byte write to a page table, right.
822 */
823 if (cbWrite >= 8 && ((uintptr_t)pvFault & 7) != 0)
824 {
825 Log4(("pgmPoolMonitorIsReused: Unaligned 8+ byte write\n"));
826 return true;
827 }
828
829 return false;
830}
831
832
833/**
834 * Flushes the page being accessed.
835 *
836 * @returns VBox status code suitable for scheduling.
837 * @param pVM The cross context VM structure.
838 * @param pVCpu The cross context virtual CPU structure.
839 * @param pPool The pool.
840 * @param pPage The pool page (head).
841 * @param pDis The disassembly of the write instruction.
842 * @param pRegFrame The trap register frame.
843 * @param GCPhysFault The fault address as guest physical address.
844 * @param pvFault The fault address.
845 * @todo VBOXSTRICTRC
846 */
847static int pgmPoolAccessPfHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
848 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
849{
850 NOREF(pVM); NOREF(GCPhysFault);
851
852 /*
853 * First, do the flushing.
854 */
855 int rc = pgmPoolMonitorChainFlush(pPool, pPage);
856
857 /*
858 * Emulate the instruction (xp/w2k problem, requires pc/cr2/sp detection).
859 * Must do this in raw mode (!); XP boot will fail otherwise.
860 */
861 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_ALL);
862 if (rc2 == VINF_SUCCESS)
863 { /* do nothing */ }
864 else if (rc2 == VINF_EM_RESCHEDULE)
865 {
866 if (rc == VINF_SUCCESS)
867 rc = VBOXSTRICTRC_VAL(rc2);
868#ifndef IN_RING3
869 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
870#endif
871 }
872 else if (rc2 == VERR_EM_INTERPRETER)
873 {
874#ifdef IN_RC
875 if (PATMIsPatchGCAddr(pVM, pRegFrame->eip))
876 {
877 LogFlow(("pgmPoolAccessPfHandlerPTWorker: Interpretation failed for patch code %04x:%RGv, ignoring.\n",
878 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->eip));
879 rc = VINF_SUCCESS;
880 STAM_COUNTER_INC(&pPool->StatMonitorRZIntrFailPatch2);
881 }
882 else
883#endif
884 {
885 rc = VINF_EM_RAW_EMULATE_INSTR;
886 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
887 }
888 }
889 else if (RT_FAILURE_NP(rc2))
890 rc = VBOXSTRICTRC_VAL(rc2);
891 else
892 AssertMsgFailed(("%Rrc\n", VBOXSTRICTRC_VAL(rc2))); /* ASSUMES no complicated stuff here. */
893
894 LogFlow(("pgmPoolAccessPfHandlerPT: returns %Rrc (flushed)\n", rc));
895 return rc;
896}
897
898
899/**
900 * Handles the STOSD write accesses.
901 *
902 * @returns VBox status code suitable for scheduling.
903 * @param pVM The cross context VM structure.
904 * @param pPool The pool.
905 * @param pPage The pool page (head).
906 * @param pDis The disassembly of the write instruction.
907 * @param pRegFrame The trap register frame.
908 * @param GCPhysFault The fault address as guest physical address.
909 * @param pvFault The fault address.
910 */
911DECLINLINE(int) pgmPoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
912 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault)
913{
914 unsigned uIncrement = pDis->Param1.cb;
915 NOREF(pVM);
916
917 Assert(pDis->uCpuMode == DISCPUMODE_32BIT || pDis->uCpuMode == DISCPUMODE_64BIT);
918 Assert(pRegFrame->rcx <= 0x20);
919
920#ifdef VBOX_STRICT
921 if (pDis->uOpMode == DISCPUMODE_32BIT)
922 Assert(uIncrement == 4);
923 else
924 Assert(uIncrement == 8);
925#endif
926
927 Log3(("pgmPoolAccessPfHandlerSTOSD\n"));
928
929 /*
930 * Increment the modification counter and insert it into the list
931 * of modified pages the first time.
932 */
933 if (!pPage->cModifications++)
934 pgmPoolMonitorModifiedInsert(pPool, pPage);
935
936 /*
937 * Execute REP STOSD.
938 *
939 * This ASSUMES that we're not invoked by Trap0e on in a out-of-sync
940 * write situation, meaning that it's safe to write here.
941 */
942 PVMCPU pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));
943 RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault;
944 while (pRegFrame->rcx)
945 {
946#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
947 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
948 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement);
949 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
950#else
951 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement);
952#endif
953#ifdef IN_RC
954 *(uint32_t *)(uintptr_t)pu32 = pRegFrame->eax;
955#else
956 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement);
957#endif
958 pu32 += uIncrement;
959 GCPhysFault += uIncrement;
960 pRegFrame->rdi += uIncrement;
961 pRegFrame->rcx--;
962 }
963 pRegFrame->rip += pDis->cbInstr;
964
965 LogFlow(("pgmPoolAccessPfHandlerSTOSD: returns\n"));
966 return VINF_SUCCESS;
967}
968
969
970/**
971 * Handles the simple write accesses.
972 *
973 * @returns VBox status code suitable for scheduling.
974 * @param pVM The cross context VM structure.
975 * @param pVCpu The cross context virtual CPU structure.
976 * @param pPool The pool.
977 * @param pPage The pool page (head).
978 * @param pDis The disassembly of the write instruction.
979 * @param pRegFrame The trap register frame.
980 * @param GCPhysFault The fault address as guest physical address.
981 * @param pvFault The fault address.
982 * @param pfReused Reused state (in/out)
983 */
984DECLINLINE(int) pgmPoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,
985 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused)
986{
987 Log3(("pgmPoolAccessPfHandlerSimple\n"));
988 NOREF(pVM);
989 NOREF(pfReused); /* initialized by caller */
990
991 /*
992 * Increment the modification counter and insert it into the list
993 * of modified pages the first time.
994 */
995 if (!pPage->cModifications++)
996 pgmPoolMonitorModifiedInsert(pPool, pPage);
997
998 /*
999 * Clear all the pages. ASSUMES that pvFault is readable.
1000 */
1001#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
1002 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
1003#endif
1004
1005 uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1);
1006 if (cbWrite <= 8)
1007 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, cbWrite);
1008 else if (cbWrite <= 16)
1009 {
1010 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, 8);
1011 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + 8, NULL, cbWrite - 8);
1012 }
1013 else
1014 {
1015 Assert(cbWrite <= 32);
1016 for (uint32_t off = 0; off < cbWrite; off += 8)
1017 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + off, NULL, RT_MIN(8, cbWrite - off));
1018 }
1019
1020#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
1021 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
1022#endif
1023
1024 /*
1025 * Interpret the instruction.
1026 */
1027 VBOXSTRICTRC rc = EMInterpretInstructionDisasState(pVCpu, pDis, pRegFrame, pvFault, EMCODETYPE_ALL);
1028 if (RT_SUCCESS(rc))
1029 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rc))); /* ASSUMES no complicated stuff here. */
1030 else if (rc == VERR_EM_INTERPRETER)
1031 {
1032 LogFlow(("pgmPoolAccessPfHandlerPTWorker: Interpretation failed for %04x:%RGv - opcode=%d\n",
1033 pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode));
1034 rc = VINF_EM_RAW_EMULATE_INSTR;
1035 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,EmulateInstr));
1036 }
1037
1038#if 0 /* experimental code */
1039 if (rc == VINF_SUCCESS)
1040 {
1041 switch (pPage->enmKind)
1042 {
1043 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
1044 {
1045 X86PTEPAE GstPte;
1046 int rc = pgmPoolPhysSimpleReadGCPhys(pVM, &GstPte, pvFault, GCPhysFault, sizeof(GstPte));
1047 AssertRC(rc);
1048
1049 /* Check the new value written by the guest. If present and with a bogus physical address, then
1050 * it's fairly safe to assume the guest is reusing the PT.
1051 */
1052 if (GstPte.n.u1Present)
1053 {
1054 RTHCPHYS HCPhys = -1;
1055 int rc = PGMPhysGCPhys2HCPhys(pVM, GstPte.u & X86_PTE_PAE_PG_MASK, &HCPhys);
1056 if (rc != VINF_SUCCESS)
1057 {
1058 *pfReused = true;
1059 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1060 }
1061 }
1062 break;
1063 }
1064 }
1065 }
1066#endif
1067
1068 LogFlow(("pgmPoolAccessPfHandlerSimple: returns %Rrc\n", VBOXSTRICTRC_VAL(rc)));
1069 return VBOXSTRICTRC_VAL(rc);
1070}
1071
1072
1073/**
1074 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1075 * \#PF access handler callback for page table pages.}
1076 *
1077 * @remarks The @a pvUser argument points to the PGMPOOLPAGE.
1078 */
1079DECLEXPORT(VBOXSTRICTRC) pgmPoolAccessPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,
1080 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
1081{
1082 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1083 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1084 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
1085 unsigned cMaxModifications;
1086 bool fForcedFlush = false;
1087 NOREF(uErrorCode);
1088
1089 LogFlow(("pgmPoolAccessPfHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault));
1090
1091 pgmLock(pVM);
1092 if (PHYS_PAGE_ADDRESS(GCPhysFault) != PHYS_PAGE_ADDRESS(pPage->GCPhys))
1093 {
1094 /* Pool page changed while we were waiting for the lock; ignore. */
1095 Log(("CPU%d: pgmPoolAccessPfHandler pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhysFault), PHYS_PAGE_ADDRESS(pPage->GCPhys)));
1096 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1097 pgmUnlock(pVM);
1098 return VINF_SUCCESS;
1099 }
1100#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1101 if (pPage->fDirty)
1102 {
1103 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH));
1104 pgmUnlock(pVM);
1105 return VINF_SUCCESS; /* SMP guest case where we were blocking on the pgm lock while the same page was being marked dirty. */
1106 }
1107#endif
1108
1109#if 0 /* test code defined(VBOX_STRICT) && defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) */
1110 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1111 {
1112 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
1113 void *pvGst;
1114 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1115 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
1116 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1117 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1118 }
1119#endif
1120
1121 /*
1122 * Disassemble the faulting instruction.
1123 */
1124 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
1125 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
1126 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1127 {
1128 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("Unexpected rc %d\n", rc));
1129 pgmUnlock(pVM);
1130 return rc;
1131 }
1132
1133 Assert(pPage->enmKind != PGMPOOLKIND_FREE);
1134
1135 /*
1136 * We should ALWAYS have the list head as user parameter. This
1137 * is because we use that page to record the changes.
1138 */
1139 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1140
1141#ifdef IN_RING0
1142 /* Maximum nr of modifications depends on the page type. */
1143 if ( pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
1144 || pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT)
1145 cMaxModifications = 4;
1146 else
1147 cMaxModifications = 24;
1148#else
1149 cMaxModifications = 48;
1150#endif
1151
1152 /*
1153 * Incremental page table updates should weigh more than random ones.
1154 * (Only applies when started from offset 0)
1155 */
1156 pVCpu->pgm.s.cPoolAccessHandler++;
1157 if ( pPage->GCPtrLastAccessHandlerRip >= pRegFrame->rip - 0x40 /* observed loops in Windows 7 x64 */
1158 && pPage->GCPtrLastAccessHandlerRip < pRegFrame->rip + 0x40
1159 && pvFault == (pPage->GCPtrLastAccessHandlerFault + pDis->Param1.cb)
1160 && pVCpu->pgm.s.cPoolAccessHandler == pPage->cLastAccessHandler + 1)
1161 {
1162 Log(("Possible page reuse cMods=%d -> %d (locked=%d type=%s)\n", pPage->cModifications, pPage->cModifications * 2, pgmPoolIsPageLocked(pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1163 Assert(pPage->cModifications < 32000);
1164 pPage->cModifications = pPage->cModifications * 2;
1165 pPage->GCPtrLastAccessHandlerFault = pvFault;
1166 pPage->cLastAccessHandler = pVCpu->pgm.s.cPoolAccessHandler;
1167 if (pPage->cModifications >= cMaxModifications)
1168 {
1169 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit));
1170 fForcedFlush = true;
1171 }
1172 }
1173
1174 if (pPage->cModifications >= cMaxModifications)
1175 Log(("Mod overflow %RGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked(pPage), pgmPoolPoolKindToStr(pPage->enmKind)));
1176
1177 /*
1178 * Check if it's worth dealing with.
1179 */
1180 bool fReused = false;
1181 bool fNotReusedNotForking = false;
1182 if ( ( pPage->cModifications < cMaxModifications /** @todo \#define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */
1183 || pgmPoolIsPageLocked(pPage)
1184 )
1185 && !(fReused = pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault))
1186 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1187 {
1188 /*
1189 * Simple instructions, no REP prefix.
1190 */
1191 if (!(pDis->fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE)))
1192 {
1193 rc = pgmPoolAccessPfHandlerSimple(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault, &fReused);
1194 if (fReused)
1195 goto flushPage;
1196
1197 /* A mov instruction to change the first page table entry will be remembered so we can detect
1198 * full page table changes early on. This will reduce the amount of unnecessary traps we'll take.
1199 */
1200 if ( rc == VINF_SUCCESS
1201 && !pPage->cLocked /* only applies to unlocked pages as we can't free locked ones (e.g. cr3 root). */
1202 && pDis->pCurInstr->uOpcode == OP_MOV
1203 && (pvFault & PAGE_OFFSET_MASK) == 0)
1204 {
1205 pPage->GCPtrLastAccessHandlerFault = pvFault;
1206 pPage->cLastAccessHandler = pVCpu->pgm.s.cPoolAccessHandler;
1207 pPage->GCPtrLastAccessHandlerRip = pRegFrame->rip;
1208 /* Make sure we don't kick out a page too quickly. */
1209 if (pPage->cModifications > 8)
1210 pPage->cModifications = 2;
1211 }
1212 else if (pPage->GCPtrLastAccessHandlerFault == pvFault)
1213 {
1214 /* ignore the 2nd write to this page table entry. */
1215 pPage->cLastAccessHandler = pVCpu->pgm.s.cPoolAccessHandler;
1216 }
1217 else
1218 {
1219 pPage->GCPtrLastAccessHandlerFault = NIL_RTGCPTR;
1220 pPage->GCPtrLastAccessHandlerRip = 0;
1221 }
1222
1223 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a);
1224 pgmUnlock(pVM);
1225 return rc;
1226 }
1227
1228 /*
1229 * Windows is frequently doing small memset() operations (netio test 4k+).
1230 * We have to deal with these or we'll kill the cache and performance.
1231 */
1232 if ( pDis->pCurInstr->uOpcode == OP_STOSWD
1233 && !pRegFrame->eflags.Bits.u1DF
1234 && pDis->uOpMode == pDis->uCpuMode
1235 && pDis->uAddrMode == pDis->uCpuMode)
1236 {
1237 bool fValidStosd = false;
1238
1239 if ( pDis->uCpuMode == DISCPUMODE_32BIT
1240 && pDis->fPrefix == DISPREFIX_REP
1241 && pRegFrame->ecx <= 0x20
1242 && pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1243 && !((uintptr_t)pvFault & 3)
1244 && (pRegFrame->eax == 0 || pRegFrame->eax == 0x80) /* the two values observed. */
1245 )
1246 {
1247 fValidStosd = true;
1248 pRegFrame->rcx &= 0xffffffff; /* paranoia */
1249 }
1250 else
1251 if ( pDis->uCpuMode == DISCPUMODE_64BIT
1252 && pDis->fPrefix == (DISPREFIX_REP | DISPREFIX_REX)
1253 && pRegFrame->rcx <= 0x20
1254 && pRegFrame->rcx * 8 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK)
1255 && !((uintptr_t)pvFault & 7)
1256 && (pRegFrame->rax == 0 || pRegFrame->rax == 0x80) /* the two values observed. */
1257 )
1258 {
1259 fValidStosd = true;
1260 }
1261
1262 if (fValidStosd)
1263 {
1264 rc = pgmPoolAccessPfHandlerSTOSD(pVM, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1265 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,RepStosd), a);
1266 pgmUnlock(pVM);
1267 return rc;
1268 }
1269 }
1270
1271 /* REP prefix, don't bother. */
1272 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix));
1273 Log4(("pgmPoolAccessPfHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n",
1274 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->uOpcode, pDis->fPrefix));
1275 fNotReusedNotForking = true;
1276 }
1277
1278#if defined(PGMPOOL_WITH_OPTIMIZED_DIRTY_PT) && defined(IN_RING0)
1279 /* E.g. Windows 7 x64 initializes page tables and touches some pages in the table during the process. This
1280 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring.
1281 */
1282 if ( pPage->cModifications >= cMaxModifications
1283 && !fForcedFlush
1284 && (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT || pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT)
1285 && ( fNotReusedNotForking
1286 || ( !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)
1287 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK))
1288 )
1289 )
1290 {
1291 Assert(!pgmPoolIsPageLocked(pPage));
1292 Assert(pPage->fDirty == false);
1293
1294 /* Flush any monitored duplicates as we will disable write protection. */
1295 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
1296 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
1297 {
1298 PPGMPOOLPAGE pPageHead = pPage;
1299
1300 /* Find the monitor head. */
1301 while (pPageHead->iMonitoredPrev != NIL_PGMPOOL_IDX)
1302 pPageHead = &pPool->aPages[pPageHead->iMonitoredPrev];
1303
1304 while (pPageHead)
1305 {
1306 unsigned idxNext = pPageHead->iMonitoredNext;
1307
1308 if (pPageHead != pPage)
1309 {
1310 STAM_COUNTER_INC(&pPool->StatDirtyPageDupFlush);
1311 Log(("Flush duplicate page idx=%d GCPhys=%RGp type=%s\n", pPageHead->idx, pPageHead->GCPhys, pgmPoolPoolKindToStr(pPageHead->enmKind)));
1312 int rc2 = pgmPoolFlushPage(pPool, pPageHead);
1313 AssertRC(rc2);
1314 }
1315
1316 if (idxNext == NIL_PGMPOOL_IDX)
1317 break;
1318
1319 pPageHead = &pPool->aPages[idxNext];
1320 }
1321 }
1322
1323 /* The flushing above might fail for locked pages, so double check. */
1324 if ( pPage->iMonitoredNext == NIL_PGMPOOL_IDX
1325 && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
1326 {
1327 pgmPoolAddDirtyPage(pVM, pPool, pPage);
1328
1329 /* Temporarily allow write access to the page table again. */
1330 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK, pPage->GCPhys & PAGE_BASE_GC_MASK);
1331 if (rc == VINF_SUCCESS)
1332 {
1333 rc = PGMShwMakePageWritable(pVCpu, pvFault, PGM_MK_PG_IS_WRITE_FAULT);
1334 AssertMsg(rc == VINF_SUCCESS
1335 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1336 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1337 || rc == VERR_PAGE_NOT_PRESENT,
1338 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc));
1339# ifdef VBOX_STRICT
1340 pPage->GCPtrDirtyFault = pvFault;
1341# endif
1342
1343 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a);
1344 pgmUnlock(pVM);
1345 return rc;
1346 }
1347 }
1348 }
1349#endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1350
1351 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushModOverflow));
1352flushPage:
1353 /*
1354 * Not worth it, so flush it.
1355 *
1356 * If we considered it to be reused, don't go back to ring-3
1357 * to emulate failed instructions since we usually cannot
1358 * interpret then. This may be a bit risky, in which case
1359 * the reuse detection must be fixed.
1360 */
1361 rc = pgmPoolAccessPfHandlerFlush(pVM, pVCpu, pPool, pPage, pDis, pRegFrame, GCPhysFault, pvFault);
1362 if ( rc == VINF_EM_RAW_EMULATE_INSTR
1363 && fReused)
1364 {
1365 /* Make sure that the current instruction still has shadow page backing, otherwise we'll end up in a loop. */
1366 if (PGMShwGetPage(pVCpu, pRegFrame->rip, NULL, NULL) == VINF_SUCCESS)
1367 rc = VINF_SUCCESS; /* safe to restart the instruction. */
1368 }
1369 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,FlushPage), a);
1370 pgmUnlock(pVM);
1371 return rc;
1372}
1373
1374# endif /* !IN_RING3 */
1375
1376/**
1377 * @callback_method_impl{FNPGMPHYSHANDLER,
1378 * Access handler for shadowed page table pages.}
1379 *
1380 * @remarks Only uses the VINF_PGM_HANDLER_DO_DEFAULT status.
1381 */
1382PGM_ALL_CB2_DECL(VBOXSTRICTRC)
1383pgmPoolAccessHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
1384 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1385{
1386 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1387 STAM_PROFILE_START(&pPool->StatMonitorR3, a);
1388 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser;
1389 LogFlow(("PGM_ALL_CB_DECL: GCPhys=%RGp %p:{.Core=%RHp, .idx=%d, .GCPhys=%RGp, .enmType=%d}\n",
1390 GCPhys, pPage, pPage->Core.Key, pPage->idx, pPage->GCPhys, pPage->enmKind));
1391
1392 NOREF(pvPhys); NOREF(pvBuf); NOREF(enmAccessType);
1393
1394 /*
1395 * Make sure the pool page wasn't modified by a different CPU.
1396 */
1397 pgmLock(pVM);
1398 if (PHYS_PAGE_ADDRESS(GCPhys) == PHYS_PAGE_ADDRESS(pPage->GCPhys))
1399 {
1400 Assert(pPage->enmKind != PGMPOOLKIND_FREE);
1401
1402 /* The max modification count before flushing depends on the context and page type. */
1403#ifdef IN_RING3
1404 uint16_t const cMaxModifications = 96; /* it's cheaper here, right? */
1405#else
1406 uint16_t cMaxModifications;
1407 if ( pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT
1408 || pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT)
1409 cMaxModifications = 4;
1410 else
1411 cMaxModifications = 24;
1412# ifdef IN_RC
1413 cMaxModifications *= 2; /* traps are cheaper than exists. */
1414# endif
1415#endif
1416
1417 /*
1418 * We don't have to be very sophisticated about this since there are relativly few calls here.
1419 * However, we must try our best to detect any non-cpu accesses (disk / networking).
1420 */
1421 if ( ( pPage->cModifications < cMaxModifications
1422 || pgmPoolIsPageLocked(pPage) )
1423 && enmOrigin != PGMACCESSORIGIN_DEVICE
1424 && cbBuf <= 16)
1425 {
1426 /* Clear the shadow entry. */
1427 if (!pPage->cModifications++)
1428 pgmPoolMonitorModifiedInsert(pPool, pPage);
1429
1430 if (cbBuf <= 8)
1431 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvBuf, (uint32_t)cbBuf);
1432 else
1433 {
1434 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys, pvBuf, 8);
1435 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhys + 8, (uint8_t *)pvBuf + 8, (uint32_t)cbBuf - 8);
1436 }
1437 }
1438 else
1439 {
1440 /* ASSUME that VERR_PGM_POOL_CLEARED can be ignored here and that FFs will deal with it in due time. */
1441 pgmPoolMonitorChainFlush(pPool, pPage);
1442 }
1443
1444 STAM_PROFILE_STOP_EX(&pPool->StatMonitorR3, &pPool->StatMonitorR3FlushPage, a);
1445 }
1446 else
1447 Log(("CPU%d: PGM_ALL_CB_DECL pgm pool page for %RGp changed (to %RGp) while waiting!\n", pVCpu->idCpu, PHYS_PAGE_ADDRESS(GCPhys), PHYS_PAGE_ADDRESS(pPage->GCPhys)));
1448 pgmUnlock(pVM);
1449 return VINF_PGM_HANDLER_DO_DEFAULT;
1450}
1451
1452
1453# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1454
1455# if defined(VBOX_STRICT) && !defined(IN_RING3)
1456
1457/**
1458 * Check references to guest physical memory in a PAE / PAE page table.
1459 *
1460 * @param pPool The pool.
1461 * @param pPage The page.
1462 * @param pShwPT The shadow page table (mapping of the page).
1463 * @param pGstPT The guest page table.
1464 */
1465static void pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT)
1466{
1467 unsigned cErrors = 0;
1468 int LastRc = -1; /* initialized to shut up gcc */
1469 unsigned LastPTE = ~0U; /* initialized to shut up gcc */
1470 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */
1471 PVM pVM = pPool->CTX_SUFF(pVM);
1472
1473#ifdef VBOX_STRICT
1474 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1475 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1476#endif
1477 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1478 {
1479 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1480 {
1481 RTHCPHYS HCPhys = NIL_RTHCPHYS;
1482 int rc = PGMPhysGCPhys2HCPhys(pVM, pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1483 if ( rc != VINF_SUCCESS
1484 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) != HCPhys)
1485 {
1486 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1487 LastPTE = i;
1488 LastRc = rc;
1489 LastHCPhys = HCPhys;
1490 cErrors++;
1491
1492 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
1493 rc = PGMPhysGCPhys2HCPhys(pVM, pPage->GCPhys, &HCPhysPT);
1494 AssertRC(rc);
1495
1496 for (unsigned iPage = 0; iPage < pPool->cCurPages; iPage++)
1497 {
1498 PPGMPOOLPAGE pTempPage = &pPool->aPages[iPage];
1499
1500 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1501 {
1502 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pTempPage);
1503
1504 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
1505 {
1506 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[j])
1507 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[j]) == HCPhysPT)
1508 {
1509 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[j])));
1510 }
1511 }
1512
1513 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPT2);
1514 }
1515 }
1516 }
1517 }
1518 }
1519 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[LastPTE]), LastHCPhys));
1520}
1521
1522
1523/**
1524 * Check references to guest physical memory in a PAE / 32-bit page table.
1525 *
1526 * @param pPool The pool.
1527 * @param pPage The page.
1528 * @param pShwPT The shadow page table (mapping of the page).
1529 * @param pGstPT The guest page table.
1530 */
1531static void pgmPoolTrackCheckPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PT pGstPT)
1532{
1533 unsigned cErrors = 0;
1534 int LastRc = -1; /* initialized to shut up gcc */
1535 unsigned LastPTE = ~0U; /* initialized to shut up gcc */
1536 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */
1537 PVM pVM = pPool->CTX_SUFF(pVM);
1538
1539#ifdef VBOX_STRICT
1540 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1541 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1542#endif
1543 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1544 {
1545 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1546 {
1547 RTHCPHYS HCPhys = NIL_RTHCPHYS;
1548 int rc = PGMPhysGCPhys2HCPhys(pVM, pGstPT->a[i].u & X86_PTE_PG_MASK, &HCPhys);
1549 if ( rc != VINF_SUCCESS
1550 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) != HCPhys)
1551 {
1552 Log(("rc=%d idx=%d guest %x shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1553 LastPTE = i;
1554 LastRc = rc;
1555 LastHCPhys = HCPhys;
1556 cErrors++;
1557
1558 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
1559 rc = PGMPhysGCPhys2HCPhys(pVM, pPage->GCPhys, &HCPhysPT);
1560 AssertRC(rc);
1561
1562 for (unsigned iPage = 0; iPage < pPool->cCurPages; iPage++)
1563 {
1564 PPGMPOOLPAGE pTempPage = &pPool->aPages[iPage];
1565
1566 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_32BIT_PT)
1567 {
1568 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pTempPage);
1569
1570 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
1571 {
1572 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[j])
1573 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[j]) == HCPhysPT)
1574 {
1575 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[j])));
1576 }
1577 }
1578
1579 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pShwPT2);
1580 }
1581 }
1582 }
1583 }
1584 }
1585 AssertMsg(!cErrors, ("cErrors=%d: last rc=%d idx=%d guest %x shw=%RX64 vs %RHp\n", cErrors, LastRc, LastPTE, pGstPT->a[LastPTE].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[LastPTE]), LastHCPhys));
1586}
1587
1588# endif /* VBOX_STRICT && !IN_RING3 */
1589
1590/**
1591 * Clear references to guest physical memory in a PAE / PAE page table.
1592 *
1593 * @returns nr of changed PTEs
1594 * @param pPool The pool.
1595 * @param pPage The page.
1596 * @param pShwPT The shadow page table (mapping of the page).
1597 * @param pGstPT The guest page table.
1598 * @param pOldGstPT The old cached guest page table.
1599 * @param fAllowRemoval Bail out as soon as we encounter an invalid PTE
1600 * @param pfFlush Flush reused page table (out)
1601 */
1602DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT,
1603 PCX86PTPAE pOldGstPT, bool fAllowRemoval, bool *pfFlush)
1604{
1605 unsigned cChanged = 0;
1606
1607#ifdef VBOX_STRICT
1608 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1609 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1610#endif
1611 *pfFlush = false;
1612
1613 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1614 {
1615 /* Check the new value written by the guest. If present and with a bogus physical address, then
1616 * it's fairly safe to assume the guest is reusing the PT.
1617 */
1618 if ( fAllowRemoval
1619 && pGstPT->a[i].n.u1Present)
1620 {
1621 if (!PGMPhysIsGCPhysValid(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1622 {
1623 *pfFlush = true;
1624 return ++cChanged;
1625 }
1626 }
1627 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1628 {
1629 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */
1630 if ((pGstPT->a[i].u & X86_PTE_PAE_PG_MASK) == (pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK))
1631 {
1632#ifdef VBOX_STRICT
1633 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1634 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys);
1635 AssertMsg(rc == VINF_SUCCESS && PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) == HCPhys, ("rc=%d guest %RX64 old %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1636#endif
1637 uint64_t uHostAttr = PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1638 bool fHostRW = !!(PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & X86_PTE_RW);
1639 uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G | X86_PTE_PAE_NX);
1640 bool fGuestRW = !!(pGstPT->a[i].u & X86_PTE_RW);
1641
1642 if ( uHostAttr == uGuestAttr
1643 && fHostRW <= fGuestRW)
1644 continue;
1645 }
1646 cChanged++;
1647 /* Something was changed, so flush it. */
1648 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX64 hint=%RX64\n",
1649 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
1650 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PAE_PG_MASK, i);
1651 PGMSHWPTEPAE_ATOMIC_SET(pShwPT->a[i], 0);
1652 }
1653 }
1654 return cChanged;
1655}
1656
1657
1658/**
1659 * Clear references to guest physical memory in a PAE / PAE page table.
1660 *
1661 * @returns nr of changed PTEs
1662 * @param pPool The pool.
1663 * @param pPage The page.
1664 * @param pShwPT The shadow page table (mapping of the page).
1665 * @param pGstPT The guest page table.
1666 * @param pOldGstPT The old cached guest page table.
1667 * @param fAllowRemoval Bail out as soon as we encounter an invalid PTE
1668 * @param pfFlush Flush reused page table (out)
1669 */
1670DECLINLINE(unsigned) pgmPoolTrackFlushPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PT pGstPT,
1671 PCX86PT pOldGstPT, bool fAllowRemoval, bool *pfFlush)
1672{
1673 unsigned cChanged = 0;
1674
1675#ifdef VBOX_STRICT
1676 for (unsigned i = 0; i < RT_MIN(RT_ELEMENTS(pShwPT->a), pPage->iFirstPresent); i++)
1677 AssertMsg(!PGMSHWPTEPAE_IS_P(pShwPT->a[i]), ("Unexpected PTE: idx=%d %RX64 (first=%d)\n", i, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), pPage->iFirstPresent));
1678#endif
1679 *pfFlush = false;
1680
1681 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
1682 {
1683 /* Check the new value written by the guest. If present and with a bogus physical address, then
1684 * it's fairly safe to assume the guest is reusing the PT.
1685 */
1686 if ( fAllowRemoval
1687 && pGstPT->a[i].n.u1Present)
1688 {
1689 if (!PGMPhysIsGCPhysValid(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PG_MASK))
1690 {
1691 *pfFlush = true;
1692 return ++cChanged;
1693 }
1694 }
1695 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
1696 {
1697 /* If the old cached PTE is identical, then there's no need to flush the shadow copy. */
1698 if ((pGstPT->a[i].u & X86_PTE_PG_MASK) == (pOldGstPT->a[i].u & X86_PTE_PG_MASK))
1699 {
1700#ifdef VBOX_STRICT
1701 RTHCPHYS HCPhys = NIL_RTGCPHYS;
1702 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PG_MASK, &HCPhys);
1703 AssertMsg(rc == VINF_SUCCESS && PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]) == HCPhys, ("rc=%d guest %x old %x shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pOldGstPT->a[i].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[i]), HCPhys));
1704#endif
1705 uint64_t uHostAttr = PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G);
1706 bool fHostRW = !!(PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & X86_PTE_RW);
1707 uint64_t uGuestAttr = pGstPT->a[i].u & (X86_PTE_P | X86_PTE_US | X86_PTE_A | X86_PTE_D | X86_PTE_G);
1708 bool fGuestRW = !!(pGstPT->a[i].u & X86_PTE_RW);
1709
1710 if ( uHostAttr == uGuestAttr
1711 && fHostRW <= fGuestRW)
1712 continue;
1713 }
1714 cChanged++;
1715 /* Something was changed, so flush it. */
1716 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX64 hint=%x\n",
1717 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PG_MASK));
1718 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pOldGstPT->a[i].u & X86_PTE_PG_MASK, i);
1719 PGMSHWPTEPAE_ATOMIC_SET(pShwPT->a[i], 0);
1720 }
1721 }
1722 return cChanged;
1723}
1724
1725
1726/**
1727 * Flush a dirty page
1728 *
1729 * @param pVM The cross context VM structure.
1730 * @param pPool The pool.
1731 * @param idxSlot Dirty array slot index
1732 * @param fAllowRemoval Allow a reused page table to be removed
1733 */
1734static void pgmPoolFlushDirtyPage(PVM pVM, PPGMPOOL pPool, unsigned idxSlot, bool fAllowRemoval = false)
1735{
1736 PPGMPOOLPAGE pPage;
1737 unsigned idxPage;
1738
1739 Assert(idxSlot < RT_ELEMENTS(pPool->aDirtyPages));
1740 if (pPool->aDirtyPages[idxSlot].uIdx == NIL_PGMPOOL_IDX)
1741 return;
1742
1743 idxPage = pPool->aDirtyPages[idxSlot].uIdx;
1744 AssertRelease(idxPage != NIL_PGMPOOL_IDX);
1745 pPage = &pPool->aPages[idxPage];
1746 Assert(pPage->idx == idxPage);
1747 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1748
1749 AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, idxSlot));
1750 Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications));
1751
1752#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
1753 PVMCPU pVCpu = VMMGetCpu(pVM);
1754 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
1755#endif
1756
1757 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
1758 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK);
1759 Assert(rc == VINF_SUCCESS);
1760 pPage->fDirty = false;
1761
1762#ifdef VBOX_STRICT
1763 uint64_t fFlags = 0;
1764 RTHCPHYS HCPhys;
1765 rc = PGMShwGetPage(VMMGetCpu(pVM), pPage->GCPtrDirtyFault, &fFlags, &HCPhys);
1766 AssertMsg( ( rc == VINF_SUCCESS
1767 && (!(fFlags & X86_PTE_RW) || HCPhys != pPage->Core.Key))
1768 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */
1769 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1770 || rc == VERR_PAGE_NOT_PRESENT,
1771 ("PGMShwGetPage -> GCPtr=%RGv rc=%d flags=%RX64\n", pPage->GCPtrDirtyFault, rc, fFlags));
1772#endif
1773
1774 /* Flush those PTEs that have changed. */
1775 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
1776 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
1777 void *pvGst;
1778 rc = PGM_GCPHYS_2_PTR_EX(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1779 bool fFlush;
1780 unsigned cChanges;
1781
1782 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1783 cChanges = pgmPoolTrackFlushPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst,
1784 (PCX86PTPAE)&pPool->aDirtyPages[idxSlot].aPage[0], fAllowRemoval, &fFlush);
1785 else
1786 cChanges = pgmPoolTrackFlushPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst,
1787 (PCX86PT)&pPool->aDirtyPages[idxSlot].aPage[0], fAllowRemoval, &fFlush);
1788
1789 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1790 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1791 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
1792 /* Note: we might want to consider keeping the dirty page active in case there were many changes. */
1793
1794 /* This page is likely to be modified again, so reduce the nr of modifications just a bit here. */
1795 Assert(pPage->cModifications);
1796 if (cChanges < 4)
1797 pPage->cModifications = 1; /* must use > 0 here */
1798 else
1799 pPage->cModifications = RT_MAX(1, pPage->cModifications / 2);
1800
1801 STAM_COUNTER_INC(&pPool->StatResetDirtyPages);
1802 if (pPool->cDirtyPages == RT_ELEMENTS(pPool->aDirtyPages))
1803 pPool->idxFreeDirtyPage = idxSlot;
1804
1805 pPool->cDirtyPages--;
1806 pPool->aDirtyPages[idxSlot].uIdx = NIL_PGMPOOL_IDX;
1807 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
1808 if (fFlush)
1809 {
1810 Assert(fAllowRemoval);
1811 Log(("Flush reused page table!\n"));
1812 pgmPoolFlushPage(pPool, pPage);
1813 STAM_COUNTER_INC(&pPool->StatForceFlushReused);
1814 }
1815 else
1816 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges));
1817
1818#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
1819 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
1820#endif
1821}
1822
1823
1824# ifndef IN_RING3
1825/**
1826 * Add a new dirty page
1827 *
1828 * @param pVM The cross context VM structure.
1829 * @param pPool The pool.
1830 * @param pPage The page.
1831 */
1832void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1833{
1834 unsigned idxFree;
1835
1836 PGM_LOCK_ASSERT_OWNER(pVM);
1837 AssertCompile(RT_ELEMENTS(pPool->aDirtyPages) == 8 || RT_ELEMENTS(pPool->aDirtyPages) == 16);
1838 Assert(!pPage->fDirty);
1839
1840 idxFree = pPool->idxFreeDirtyPage;
1841 Assert(idxFree < RT_ELEMENTS(pPool->aDirtyPages));
1842 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
1843
1844 if (pPool->cDirtyPages >= RT_ELEMENTS(pPool->aDirtyPages))
1845 {
1846 STAM_COUNTER_INC(&pPool->StatDirtyPageOverFlowFlush);
1847 pgmPoolFlushDirtyPage(pVM, pPool, idxFree, true /* allow removal of reused page tables*/);
1848 }
1849 Assert(pPool->cDirtyPages < RT_ELEMENTS(pPool->aDirtyPages));
1850 AssertMsg(pPool->aDirtyPages[idxFree].uIdx == NIL_PGMPOOL_IDX, ("idxFree=%d cDirtyPages=%d\n", idxFree, pPool->cDirtyPages));
1851
1852 Log(("Add dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree));
1853
1854 /*
1855 * Make a copy of the guest page table as we require valid GCPhys addresses
1856 * when removing references to physical pages.
1857 * (The HCPhys linear lookup is *extremely* expensive!)
1858 */
1859 void *pvGst;
1860 int rc = PGM_GCPHYS_2_PTR_EX(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
1861 memcpy(&pPool->aDirtyPages[idxFree].aPage[0], pvGst, (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) ? PAGE_SIZE : PAGE_SIZE/2);
1862# ifdef VBOX_STRICT
1863 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
1864 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
1865 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
1866 else
1867 pgmPoolTrackCheckPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst);
1868 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
1869# endif
1870 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
1871
1872 STAM_COUNTER_INC(&pPool->StatDirtyPage);
1873 pPage->fDirty = true;
1874 pPage->idxDirtyEntry = (uint8_t)idxFree; Assert(pPage->idxDirtyEntry == idxFree);
1875 pPool->aDirtyPages[idxFree].uIdx = pPage->idx;
1876 pPool->cDirtyPages++;
1877
1878 pPool->idxFreeDirtyPage = (pPool->idxFreeDirtyPage + 1) & (RT_ELEMENTS(pPool->aDirtyPages) - 1);
1879 if ( pPool->cDirtyPages < RT_ELEMENTS(pPool->aDirtyPages)
1880 && pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx != NIL_PGMPOOL_IDX)
1881 {
1882 unsigned i;
1883 for (i = 1; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
1884 {
1885 idxFree = (pPool->idxFreeDirtyPage + i) & (RT_ELEMENTS(pPool->aDirtyPages) - 1);
1886 if (pPool->aDirtyPages[idxFree].uIdx == NIL_PGMPOOL_IDX)
1887 {
1888 pPool->idxFreeDirtyPage = idxFree;
1889 break;
1890 }
1891 }
1892 Assert(i != RT_ELEMENTS(pPool->aDirtyPages));
1893 }
1894
1895 Assert(pPool->cDirtyPages == RT_ELEMENTS(pPool->aDirtyPages) || pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx == NIL_PGMPOOL_IDX);
1896
1897 /*
1898 * Clear all references to this shadow table. See @bugref{7298}.
1899 */
1900 pgmPoolTrackClearPageUsers(pPool, pPage);
1901}
1902# endif /* !IN_RING3 */
1903
1904
1905/**
1906 * Check if the specified page is dirty (not write monitored)
1907 *
1908 * @return dirty or not
1909 * @param pVM The cross context VM structure.
1910 * @param GCPhys Guest physical address
1911 */
1912bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys)
1913{
1914 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1915 PGM_LOCK_ASSERT_OWNER(pVM);
1916 if (!pPool->cDirtyPages)
1917 return false;
1918
1919 GCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
1920
1921 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
1922 {
1923 if (pPool->aDirtyPages[i].uIdx != NIL_PGMPOOL_IDX)
1924 {
1925 PPGMPOOLPAGE pPage;
1926 unsigned idxPage = pPool->aDirtyPages[i].uIdx;
1927
1928 pPage = &pPool->aPages[idxPage];
1929 if (pPage->GCPhys == GCPhys)
1930 return true;
1931 }
1932 }
1933 return false;
1934}
1935
1936
1937/**
1938 * Reset all dirty pages by reinstating page monitoring.
1939 *
1940 * @param pVM The cross context VM structure.
1941 */
1942void pgmPoolResetDirtyPages(PVM pVM)
1943{
1944 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1945 PGM_LOCK_ASSERT_OWNER(pVM);
1946 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
1947
1948 if (!pPool->cDirtyPages)
1949 return;
1950
1951 Log(("pgmPoolResetDirtyPages\n"));
1952 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
1953 pgmPoolFlushDirtyPage(pVM, pPool, i, true /* allow removal of reused page tables*/);
1954
1955 pPool->idxFreeDirtyPage = 0;
1956 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aDirtyPages)
1957 && pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx != NIL_PGMPOOL_IDX)
1958 {
1959 unsigned i;
1960 for (i = 1; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
1961 {
1962 if (pPool->aDirtyPages[i].uIdx == NIL_PGMPOOL_IDX)
1963 {
1964 pPool->idxFreeDirtyPage = i;
1965 break;
1966 }
1967 }
1968 AssertMsg(i != RT_ELEMENTS(pPool->aDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
1969 }
1970
1971 Assert(pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx == NIL_PGMPOOL_IDX || pPool->cDirtyPages == RT_ELEMENTS(pPool->aDirtyPages));
1972 return;
1973}
1974
1975
1976/**
1977 * Invalidate the PT entry for the specified page
1978 *
1979 * @param pVM The cross context VM structure.
1980 * @param GCPtrPage Guest page to invalidate
1981 */
1982void pgmPoolResetDirtyPage(PVM pVM, RTGCPTR GCPtrPage)
1983{
1984 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1985 PGM_LOCK_ASSERT_OWNER(pVM);
1986 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
1987
1988 if (!pPool->cDirtyPages)
1989 return;
1990
1991 Log(("pgmPoolResetDirtyPage %RGv\n", GCPtrPage));
1992 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
1993 {
1994 }
1995}
1996
1997
1998/**
1999 * Reset all dirty pages by reinstating page monitoring.
2000 *
2001 * @param pVM The cross context VM structure.
2002 * @param GCPhysPT Physical address of the page table
2003 */
2004void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT)
2005{
2006 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2007 PGM_LOCK_ASSERT_OWNER(pVM);
2008 Assert(pPool->cDirtyPages <= RT_ELEMENTS(pPool->aDirtyPages));
2009 unsigned idxDirtyPage = RT_ELEMENTS(pPool->aDirtyPages);
2010
2011 if (!pPool->cDirtyPages)
2012 return;
2013
2014 GCPhysPT = GCPhysPT & ~(RTGCPHYS)PAGE_OFFSET_MASK;
2015
2016 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
2017 {
2018 if (pPool->aDirtyPages[i].uIdx != NIL_PGMPOOL_IDX)
2019 {
2020 unsigned idxPage = pPool->aDirtyPages[i].uIdx;
2021
2022 PPGMPOOLPAGE pPage = &pPool->aPages[idxPage];
2023 if (pPage->GCPhys == GCPhysPT)
2024 {
2025 idxDirtyPage = i;
2026 break;
2027 }
2028 }
2029 }
2030
2031 if (idxDirtyPage != RT_ELEMENTS(pPool->aDirtyPages))
2032 {
2033 pgmPoolFlushDirtyPage(pVM, pPool, idxDirtyPage, true /* allow removal of reused page tables*/);
2034 if ( pPool->cDirtyPages != RT_ELEMENTS(pPool->aDirtyPages)
2035 && pPool->aDirtyPages[pPool->idxFreeDirtyPage].uIdx != NIL_PGMPOOL_IDX)
2036 {
2037 unsigned i;
2038 for (i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
2039 {
2040 if (pPool->aDirtyPages[i].uIdx == NIL_PGMPOOL_IDX)
2041 {
2042 pPool->idxFreeDirtyPage = i;
2043 break;
2044 }
2045 }
2046 AssertMsg(i != RT_ELEMENTS(pPool->aDirtyPages), ("cDirtyPages %d", pPool->cDirtyPages));
2047 }
2048 }
2049}
2050
2051# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
2052
2053/**
2054 * Inserts a page into the GCPhys hash table.
2055 *
2056 * @param pPool The pool.
2057 * @param pPage The page.
2058 */
2059DECLINLINE(void) pgmPoolHashInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2060{
2061 Log3(("pgmPoolHashInsert: %RGp\n", pPage->GCPhys));
2062 Assert(pPage->GCPhys != NIL_RTGCPHYS); Assert(pPage->iNext == NIL_PGMPOOL_IDX);
2063 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
2064 pPage->iNext = pPool->aiHash[iHash];
2065 pPool->aiHash[iHash] = pPage->idx;
2066}
2067
2068
2069/**
2070 * Removes a page from the GCPhys hash table.
2071 *
2072 * @param pPool The pool.
2073 * @param pPage The page.
2074 */
2075DECLINLINE(void) pgmPoolHashRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2076{
2077 Log3(("pgmPoolHashRemove: %RGp\n", pPage->GCPhys));
2078 uint16_t iHash = PGMPOOL_HASH(pPage->GCPhys);
2079 if (pPool->aiHash[iHash] == pPage->idx)
2080 pPool->aiHash[iHash] = pPage->iNext;
2081 else
2082 {
2083 uint16_t iPrev = pPool->aiHash[iHash];
2084 for (;;)
2085 {
2086 const int16_t i = pPool->aPages[iPrev].iNext;
2087 if (i == pPage->idx)
2088 {
2089 pPool->aPages[iPrev].iNext = pPage->iNext;
2090 break;
2091 }
2092 if (i == NIL_PGMPOOL_IDX)
2093 {
2094 AssertReleaseMsgFailed(("GCPhys=%RGp idx=%d\n", pPage->GCPhys, pPage->idx));
2095 break;
2096 }
2097 iPrev = i;
2098 }
2099 }
2100 pPage->iNext = NIL_PGMPOOL_IDX;
2101}
2102
2103
2104/**
2105 * Frees up one cache page.
2106 *
2107 * @returns VBox status code.
2108 * @retval VINF_SUCCESS on success.
2109 * @param pPool The pool.
2110 * @param iUser The user index.
2111 */
2112static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser)
2113{
2114#ifndef IN_RC
2115 const PVM pVM = pPool->CTX_SUFF(pVM);
2116#endif
2117 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */
2118 STAM_COUNTER_INC(&pPool->StatCacheFreeUpOne);
2119
2120 /*
2121 * Select one page from the tail of the age list.
2122 */
2123 PPGMPOOLPAGE pPage;
2124 for (unsigned iLoop = 0; ; iLoop++)
2125 {
2126 uint16_t iToFree = pPool->iAgeTail;
2127 if (iToFree == iUser && iUser != NIL_PGMPOOL_IDX)
2128 iToFree = pPool->aPages[iToFree].iAgePrev;
2129/* This is the alternative to the SyncCR3 pgmPoolCacheUsed calls.
2130 if (pPool->aPages[iToFree].iUserHead != NIL_PGMPOOL_USER_INDEX)
2131 {
2132 uint16_t i = pPool->aPages[iToFree].iAgePrev;
2133 for (unsigned j = 0; j < 10 && i != NIL_PGMPOOL_USER_INDEX; j++, i = pPool->aPages[i].iAgePrev)
2134 {
2135 if (pPool->aPages[iToFree].iUserHead == NIL_PGMPOOL_USER_INDEX)
2136 continue;
2137 iToFree = i;
2138 break;
2139 }
2140 }
2141*/
2142 Assert(iToFree != iUser);
2143 AssertRelease(iToFree != NIL_PGMPOOL_IDX);
2144 pPage = &pPool->aPages[iToFree];
2145
2146 /*
2147 * Reject any attempts at flushing the currently active shadow CR3 mapping.
2148 * Call pgmPoolCacheUsed to move the page to the head of the age list.
2149 */
2150 if ( !pgmPoolIsPageLocked(pPage)
2151 && pPage->idx >= PGMPOOL_IDX_FIRST /* paranoia (#6349) */)
2152 break;
2153 LogFlow(("pgmPoolCacheFreeOne: refuse CR3 mapping\n"));
2154 pgmPoolCacheUsed(pPool, pPage);
2155 AssertLogRelReturn(iLoop < 8192, VERR_PGM_POOL_TOO_MANY_LOOPS);
2156 }
2157
2158 /*
2159 * Found a usable page, flush it and return.
2160 */
2161 int rc = pgmPoolFlushPage(pPool, pPage);
2162 /* This flush was initiated by us and not the guest, so explicitly flush the TLB. */
2163 /* todo: find out why this is necessary; pgmPoolFlushPage should trigger a flush if one is really needed. */
2164 if (rc == VINF_SUCCESS)
2165 PGM_INVL_ALL_VCPU_TLBS(pVM);
2166 return rc;
2167}
2168
2169
2170/**
2171 * Checks if a kind mismatch is really a page being reused
2172 * or if it's just normal remappings.
2173 *
2174 * @returns true if reused and the cached page (enmKind1) should be flushed
2175 * @returns false if not reused.
2176 * @param enmKind1 The kind of the cached page.
2177 * @param enmKind2 The kind of the requested page.
2178 */
2179static bool pgmPoolCacheReusedByKind(PGMPOOLKIND enmKind1, PGMPOOLKIND enmKind2)
2180{
2181 switch (enmKind1)
2182 {
2183 /*
2184 * Never reuse them. There is no remapping in non-paging mode.
2185 */
2186 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2187 case PGMPOOLKIND_32BIT_PD_PHYS:
2188 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2189 case PGMPOOLKIND_PAE_PD_PHYS:
2190 case PGMPOOLKIND_PAE_PDPT_PHYS:
2191 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2192 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2193 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2194 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2195 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2196 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: /* never reuse them for other types */
2197 return false;
2198
2199 /*
2200 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
2201 */
2202 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2203 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2204 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2205 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2206 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2207 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2208 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2209 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2210 case PGMPOOLKIND_32BIT_PD:
2211 case PGMPOOLKIND_PAE_PDPT:
2212 switch (enmKind2)
2213 {
2214 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2215 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2216 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2217 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2218 case PGMPOOLKIND_64BIT_PML4:
2219 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2220 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2221 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2222 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2223 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2224 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2225 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2226 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2227 return true;
2228 default:
2229 return false;
2230 }
2231
2232 /*
2233 * It's perfectly fine to reuse these, except for PAE and non-paging stuff.
2234 */
2235 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2236 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2237 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2238 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2239 case PGMPOOLKIND_64BIT_PML4:
2240 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2241 switch (enmKind2)
2242 {
2243 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2244 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2245 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2246 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2247 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2248 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2249 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2250 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2251 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2252 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2253 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2254 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2255 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2256 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2257 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2258 return true;
2259 default:
2260 return false;
2261 }
2262
2263 /*
2264 * These cannot be flushed, and it's common to reuse the PDs as PTs.
2265 */
2266 case PGMPOOLKIND_ROOT_NESTED:
2267 return false;
2268
2269 default:
2270 AssertFatalMsgFailed(("enmKind1=%d\n", enmKind1));
2271 }
2272}
2273
2274
2275/**
2276 * Attempts to satisfy a pgmPoolAlloc request from the cache.
2277 *
2278 * @returns VBox status code.
2279 * @retval VINF_PGM_CACHED_PAGE on success.
2280 * @retval VERR_FILE_NOT_FOUND if not found.
2281 * @param pPool The pool.
2282 * @param GCPhys The GC physical address of the page we're gonna shadow.
2283 * @param enmKind The kind of mapping.
2284 * @param enmAccess Access type for the mapping (only relevant for big pages)
2285 * @param fA20Enabled Whether the CPU has the A20 gate enabled.
2286 * @param iUser The shadow page pool index of the user table. This is
2287 * NIL_PGMPOOL_IDX for root pages.
2288 * @param iUserTable The index into the user table (shadowed). Ignored if
2289 * root page
2290 * @param ppPage Where to store the pointer to the page.
2291 */
2292static int pgmPoolCacheAlloc(PPGMPOOL pPool, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
2293 uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
2294{
2295 /*
2296 * Look up the GCPhys in the hash.
2297 */
2298 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2299 Log3(("pgmPoolCacheAlloc: %RGp kind %s iUser=%d iUserTable=%x SLOT=%d\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable, i));
2300 if (i != NIL_PGMPOOL_IDX)
2301 {
2302 do
2303 {
2304 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2305 Log4(("pgmPoolCacheAlloc: slot %d found page %RGp\n", i, pPage->GCPhys));
2306 if (pPage->GCPhys == GCPhys)
2307 {
2308 if ( (PGMPOOLKIND)pPage->enmKind == enmKind
2309 && (PGMPOOLACCESS)pPage->enmAccess == enmAccess
2310 && pPage->fA20Enabled == fA20Enabled)
2311 {
2312 /* Put it at the start of the use list to make sure pgmPoolTrackAddUser
2313 * doesn't flush it in case there are no more free use records.
2314 */
2315 pgmPoolCacheUsed(pPool, pPage);
2316
2317 int rc = VINF_SUCCESS;
2318 if (iUser != NIL_PGMPOOL_IDX)
2319 rc = pgmPoolTrackAddUser(pPool, pPage, iUser, iUserTable);
2320 if (RT_SUCCESS(rc))
2321 {
2322 Assert((PGMPOOLKIND)pPage->enmKind == enmKind);
2323 *ppPage = pPage;
2324 if (pPage->cModifications)
2325 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
2326 STAM_COUNTER_INC(&pPool->StatCacheHits);
2327 return VINF_PGM_CACHED_PAGE;
2328 }
2329 return rc;
2330 }
2331
2332 if ((PGMPOOLKIND)pPage->enmKind != enmKind)
2333 {
2334 /*
2335 * The kind is different. In some cases we should now flush the page
2336 * as it has been reused, but in most cases this is normal remapping
2337 * of PDs as PT or big pages using the GCPhys field in a slightly
2338 * different way than the other kinds.
2339 */
2340 if (pgmPoolCacheReusedByKind((PGMPOOLKIND)pPage->enmKind, enmKind))
2341 {
2342 STAM_COUNTER_INC(&pPool->StatCacheKindMismatches);
2343 pgmPoolFlushPage(pPool, pPage);
2344 break;
2345 }
2346 }
2347 }
2348
2349 /* next */
2350 i = pPage->iNext;
2351 } while (i != NIL_PGMPOOL_IDX);
2352 }
2353
2354 Log3(("pgmPoolCacheAlloc: Missed GCPhys=%RGp enmKind=%s\n", GCPhys, pgmPoolPoolKindToStr(enmKind)));
2355 STAM_COUNTER_INC(&pPool->StatCacheMisses);
2356 return VERR_FILE_NOT_FOUND;
2357}
2358
2359
2360/**
2361 * Inserts a page into the cache.
2362 *
2363 * @param pPool The pool.
2364 * @param pPage The cached page.
2365 * @param fCanBeCached Set if the page is fit for caching from the caller's point of view.
2366 */
2367static void pgmPoolCacheInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCanBeCached)
2368{
2369 /*
2370 * Insert into the GCPhys hash if the page is fit for that.
2371 */
2372 Assert(!pPage->fCached);
2373 if (fCanBeCached)
2374 {
2375 pPage->fCached = true;
2376 pgmPoolHashInsert(pPool, pPage);
2377 Log3(("pgmPoolCacheInsert: Caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2378 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2379 STAM_COUNTER_INC(&pPool->StatCacheCacheable);
2380 }
2381 else
2382 {
2383 Log3(("pgmPoolCacheInsert: Not caching %p:{.Core=%RHp, .idx=%d, .enmKind=%s, GCPhys=%RGp}\n",
2384 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
2385 STAM_COUNTER_INC(&pPool->StatCacheUncacheable);
2386 }
2387
2388 /*
2389 * Insert at the head of the age list.
2390 */
2391 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2392 pPage->iAgeNext = pPool->iAgeHead;
2393 if (pPool->iAgeHead != NIL_PGMPOOL_IDX)
2394 pPool->aPages[pPool->iAgeHead].iAgePrev = pPage->idx;
2395 else
2396 pPool->iAgeTail = pPage->idx;
2397 pPool->iAgeHead = pPage->idx;
2398}
2399
2400
2401/**
2402 * Flushes a cached page.
2403 *
2404 * @param pPool The pool.
2405 * @param pPage The cached page.
2406 */
2407static void pgmPoolCacheFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2408{
2409 Log3(("pgmPoolCacheFlushPage: %RGp\n", pPage->GCPhys));
2410
2411 /*
2412 * Remove the page from the hash.
2413 */
2414 if (pPage->fCached)
2415 {
2416 pPage->fCached = false;
2417 pgmPoolHashRemove(pPool, pPage);
2418 }
2419 else
2420 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
2421
2422 /*
2423 * Remove it from the age list.
2424 */
2425 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2426 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2427 else
2428 pPool->iAgeTail = pPage->iAgePrev;
2429 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2430 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2431 else
2432 pPool->iAgeHead = pPage->iAgeNext;
2433 pPage->iAgeNext = NIL_PGMPOOL_IDX;
2434 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2435}
2436
2437
2438/**
2439 * Looks for pages sharing the monitor.
2440 *
2441 * @returns Pointer to the head page.
2442 * @returns NULL if not found.
2443 * @param pPool The Pool
2444 * @param pNewPage The page which is going to be monitored.
2445 */
2446static PPGMPOOLPAGE pgmPoolMonitorGetPageByGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pNewPage)
2447{
2448 /*
2449 * Look up the GCPhys in the hash.
2450 */
2451 RTGCPHYS GCPhys = pNewPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
2452 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
2453 if (i == NIL_PGMPOOL_IDX)
2454 return NULL;
2455 do
2456 {
2457 PPGMPOOLPAGE pPage = &pPool->aPages[i];
2458 if ( pPage->GCPhys - GCPhys < PAGE_SIZE
2459 && pPage != pNewPage)
2460 {
2461 switch (pPage->enmKind)
2462 {
2463 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2464 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2465 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2466 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2467 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2468 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2469 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2470 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2471 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2472 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2473 case PGMPOOLKIND_64BIT_PML4:
2474 case PGMPOOLKIND_32BIT_PD:
2475 case PGMPOOLKIND_PAE_PDPT:
2476 {
2477 /* find the head */
2478 while (pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2479 {
2480 Assert(pPage->iMonitoredPrev != pPage->idx);
2481 pPage = &pPool->aPages[pPage->iMonitoredPrev];
2482 }
2483 return pPage;
2484 }
2485
2486 /* ignore, no monitoring. */
2487 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2488 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2489 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2490 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2491 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2492 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2493 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2494 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2495 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2496 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2497 case PGMPOOLKIND_ROOT_NESTED:
2498 case PGMPOOLKIND_PAE_PD_PHYS:
2499 case PGMPOOLKIND_PAE_PDPT_PHYS:
2500 case PGMPOOLKIND_32BIT_PD_PHYS:
2501 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2502 break;
2503 default:
2504 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
2505 }
2506 }
2507
2508 /* next */
2509 i = pPage->iNext;
2510 } while (i != NIL_PGMPOOL_IDX);
2511 return NULL;
2512}
2513
2514
2515/**
2516 * Enabled write monitoring of a guest page.
2517 *
2518 * @returns VBox status code.
2519 * @retval VINF_SUCCESS on success.
2520 * @param pPool The pool.
2521 * @param pPage The cached page.
2522 */
2523static int pgmPoolMonitorInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2524{
2525 LogFlow(("pgmPoolMonitorInsert %RGp\n", pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK));
2526
2527 /*
2528 * Filter out the relevant kinds.
2529 */
2530 switch (pPage->enmKind)
2531 {
2532 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2533 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2534 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2535 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2536 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2537 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2538 case PGMPOOLKIND_64BIT_PML4:
2539 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2540 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2541 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2542 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2543 case PGMPOOLKIND_32BIT_PD:
2544 case PGMPOOLKIND_PAE_PDPT:
2545 break;
2546
2547 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2548 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2549 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2550 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2551 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2552 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2553 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2554 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2555 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2556 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2557 case PGMPOOLKIND_ROOT_NESTED:
2558 /* Nothing to monitor here. */
2559 return VINF_SUCCESS;
2560
2561 case PGMPOOLKIND_32BIT_PD_PHYS:
2562 case PGMPOOLKIND_PAE_PDPT_PHYS:
2563 case PGMPOOLKIND_PAE_PD_PHYS:
2564 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
2565 /* Nothing to monitor here. */
2566 return VINF_SUCCESS;
2567 default:
2568 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2569 }
2570
2571 /*
2572 * Install handler.
2573 */
2574 int rc;
2575 PPGMPOOLPAGE pPageHead = pgmPoolMonitorGetPageByGCPhys(pPool, pPage);
2576 if (pPageHead)
2577 {
2578 Assert(pPageHead != pPage); Assert(pPageHead->iMonitoredNext != pPage->idx);
2579 Assert(pPageHead->iMonitoredPrev != pPage->idx);
2580
2581#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2582 if (pPageHead->fDirty)
2583 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPageHead->idxDirtyEntry, false /* do not remove */);
2584#endif
2585
2586 pPage->iMonitoredPrev = pPageHead->idx;
2587 pPage->iMonitoredNext = pPageHead->iMonitoredNext;
2588 if (pPageHead->iMonitoredNext != NIL_PGMPOOL_IDX)
2589 pPool->aPages[pPageHead->iMonitoredNext].iMonitoredPrev = pPage->idx;
2590 pPageHead->iMonitoredNext = pPage->idx;
2591 rc = VINF_SUCCESS;
2592 }
2593 else
2594 {
2595 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
2596 PVM pVM = pPool->CTX_SUFF(pVM);
2597 const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
2598 rc = PGMHandlerPhysicalRegister(pVM, GCPhysPage, GCPhysPage + PAGE_OFFSET_MASK, pPool->hAccessHandlerType,
2599 MMHyperCCToR3(pVM, pPage), MMHyperCCToR0(pVM, pPage), MMHyperCCToRC(pVM, pPage),
2600 NIL_RTR3PTR /*pszDesc*/);
2601 /** @todo we should probably deal with out-of-memory conditions here, but for now increasing
2602 * the heap size should suffice. */
2603 AssertFatalMsgRC(rc, ("PGMHandlerPhysicalRegisterEx %RGp failed with %Rrc\n", GCPhysPage, rc));
2604 PVMCPU pVCpu = VMMGetCpu(pVM);
2605 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), ("fSyncFlags=%x syncff=%d\n", pVCpu->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)));
2606 }
2607 pPage->fMonitored = true;
2608 return rc;
2609}
2610
2611
2612/**
2613 * Disables write monitoring of a guest page.
2614 *
2615 * @returns VBox status code.
2616 * @retval VINF_SUCCESS on success.
2617 * @param pPool The pool.
2618 * @param pPage The cached page.
2619 */
2620static int pgmPoolMonitorFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2621{
2622 /*
2623 * Filter out the relevant kinds.
2624 */
2625 switch (pPage->enmKind)
2626 {
2627 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
2628 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
2629 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
2630 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
2631 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
2632 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
2633 case PGMPOOLKIND_64BIT_PML4:
2634 case PGMPOOLKIND_32BIT_PD:
2635 case PGMPOOLKIND_PAE_PDPT:
2636 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
2637 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
2638 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
2639 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
2640 break;
2641
2642 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
2643 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
2644 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
2645 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
2646 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
2647 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
2648 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
2649 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
2650 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
2651 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
2652 case PGMPOOLKIND_ROOT_NESTED:
2653 case PGMPOOLKIND_PAE_PD_PHYS:
2654 case PGMPOOLKIND_PAE_PDPT_PHYS:
2655 case PGMPOOLKIND_32BIT_PD_PHYS:
2656 /* Nothing to monitor here. */
2657 Assert(!pPage->fMonitored);
2658 return VINF_SUCCESS;
2659
2660 default:
2661 AssertFatalMsgFailed(("This can't happen! enmKind=%d\n", pPage->enmKind));
2662 }
2663 Assert(pPage->fMonitored);
2664
2665 /*
2666 * Remove the page from the monitored list or uninstall it if last.
2667 */
2668 const PVM pVM = pPool->CTX_SUFF(pVM);
2669 int rc;
2670 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX
2671 || pPage->iMonitoredPrev != NIL_PGMPOOL_IDX)
2672 {
2673 if (pPage->iMonitoredPrev == NIL_PGMPOOL_IDX)
2674 {
2675 PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext];
2676 pNewHead->iMonitoredPrev = NIL_PGMPOOL_IDX;
2677 rc = PGMHandlerPhysicalChangeUserArgs(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK, MMHyperCCToR3(pVM, pNewHead),
2678 MMHyperCCToR0(pVM, pNewHead), MMHyperCCToRC(pVM, pNewHead));
2679
2680 AssertFatalRCSuccess(rc);
2681 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2682 }
2683 else
2684 {
2685 pPool->aPages[pPage->iMonitoredPrev].iMonitoredNext = pPage->iMonitoredNext;
2686 if (pPage->iMonitoredNext != NIL_PGMPOOL_IDX)
2687 {
2688 pPool->aPages[pPage->iMonitoredNext].iMonitoredPrev = pPage->iMonitoredPrev;
2689 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
2690 }
2691 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
2692 rc = VINF_SUCCESS;
2693 }
2694 }
2695 else
2696 {
2697 rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
2698 AssertFatalRC(rc);
2699 PVMCPU pVCpu = VMMGetCpu(pVM);
2700 AssertFatalMsg(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3),
2701 ("%#x %#x\n", pVCpu->pgm.s.fSyncFlags, pVM->fGlobalForcedActions));
2702 }
2703 pPage->fMonitored = false;
2704
2705 /*
2706 * Remove it from the list of modified pages (if in it).
2707 */
2708 pgmPoolMonitorModifiedRemove(pPool, pPage);
2709
2710 return rc;
2711}
2712
2713
2714/**
2715 * Inserts the page into the list of modified pages.
2716 *
2717 * @param pPool The pool.
2718 * @param pPage The page.
2719 */
2720void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2721{
2722 Log3(("pgmPoolMonitorModifiedInsert: idx=%d\n", pPage->idx));
2723 AssertMsg( pPage->iModifiedNext == NIL_PGMPOOL_IDX
2724 && pPage->iModifiedPrev == NIL_PGMPOOL_IDX
2725 && pPool->iModifiedHead != pPage->idx,
2726 ("Next=%d Prev=%d idx=%d cModifications=%d Head=%d cModifiedPages=%d\n",
2727 pPage->iModifiedNext, pPage->iModifiedPrev, pPage->idx, pPage->cModifications,
2728 pPool->iModifiedHead, pPool->cModifiedPages));
2729
2730 pPage->iModifiedNext = pPool->iModifiedHead;
2731 if (pPool->iModifiedHead != NIL_PGMPOOL_IDX)
2732 pPool->aPages[pPool->iModifiedHead].iModifiedPrev = pPage->idx;
2733 pPool->iModifiedHead = pPage->idx;
2734 pPool->cModifiedPages++;
2735#ifdef VBOX_WITH_STATISTICS
2736 if (pPool->cModifiedPages > pPool->cModifiedPagesHigh)
2737 pPool->cModifiedPagesHigh = pPool->cModifiedPages;
2738#endif
2739}
2740
2741
2742/**
2743 * Removes the page from the list of modified pages and resets the
2744 * modification counter.
2745 *
2746 * @param pPool The pool.
2747 * @param pPage The page which is believed to be in the list of modified pages.
2748 */
2749static void pgmPoolMonitorModifiedRemove(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2750{
2751 Log3(("pgmPoolMonitorModifiedRemove: idx=%d cModifications=%d\n", pPage->idx, pPage->cModifications));
2752 if (pPool->iModifiedHead == pPage->idx)
2753 {
2754 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2755 pPool->iModifiedHead = pPage->iModifiedNext;
2756 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2757 {
2758 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = NIL_PGMPOOL_IDX;
2759 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2760 }
2761 pPool->cModifiedPages--;
2762 }
2763 else if (pPage->iModifiedPrev != NIL_PGMPOOL_IDX)
2764 {
2765 pPool->aPages[pPage->iModifiedPrev].iModifiedNext = pPage->iModifiedNext;
2766 if (pPage->iModifiedNext != NIL_PGMPOOL_IDX)
2767 {
2768 pPool->aPages[pPage->iModifiedNext].iModifiedPrev = pPage->iModifiedPrev;
2769 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2770 }
2771 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2772 pPool->cModifiedPages--;
2773 }
2774 else
2775 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX);
2776 pPage->cModifications = 0;
2777}
2778
2779
2780/**
2781 * Zaps the list of modified pages, resetting their modification counters in the process.
2782 *
2783 * @param pVM The cross context VM structure.
2784 */
2785static void pgmPoolMonitorModifiedClearAll(PVM pVM)
2786{
2787 pgmLock(pVM);
2788 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2789 LogFlow(("pgmPoolMonitorModifiedClearAll: cModifiedPages=%d\n", pPool->cModifiedPages));
2790
2791 unsigned cPages = 0; NOREF(cPages);
2792
2793#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2794 pgmPoolResetDirtyPages(pVM);
2795#endif
2796
2797 uint16_t idx = pPool->iModifiedHead;
2798 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
2799 while (idx != NIL_PGMPOOL_IDX)
2800 {
2801 PPGMPOOLPAGE pPage = &pPool->aPages[idx];
2802 idx = pPage->iModifiedNext;
2803 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
2804 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
2805 pPage->cModifications = 0;
2806 Assert(++cPages);
2807 }
2808 AssertMsg(cPages == pPool->cModifiedPages, ("%d != %d\n", cPages, pPool->cModifiedPages));
2809 pPool->cModifiedPages = 0;
2810 pgmUnlock(pVM);
2811}
2812
2813
2814/**
2815 * Handle SyncCR3 pool tasks
2816 *
2817 * @returns VBox status code.
2818 * @retval VINF_SUCCESS if successfully added.
2819 * @retval VINF_PGM_SYNC_CR3 is it needs to be deferred to ring 3 (GC only)
2820 * @param pVCpu The cross context virtual CPU structure.
2821 * @remark Should only be used when monitoring is available, thus placed in
2822 * the PGMPOOL_WITH_MONITORING \#ifdef.
2823 */
2824int pgmPoolSyncCR3(PVMCPU pVCpu)
2825{
2826 PVM pVM = pVCpu->CTX_SUFF(pVM);
2827 LogFlow(("pgmPoolSyncCR3 fSyncFlags=%x\n", pVCpu->pgm.s.fSyncFlags));
2828
2829 /*
2830 * When monitoring shadowed pages, we reset the modification counters on CR3 sync.
2831 * Occasionally we will have to clear all the shadow page tables because we wanted
2832 * to monitor a page which was mapped by too many shadowed page tables. This operation
2833 * sometimes referred to as a 'lightweight flush'.
2834 */
2835# ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */
2836 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2837 pgmR3PoolClearAll(pVM, false /*fFlushRemTlb*/);
2838# else /* !IN_RING3 */
2839 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2840 {
2841 Log(("SyncCR3: PGM_SYNC_CLEAR_PGM_POOL is set -> VINF_PGM_SYNC_CR3\n"));
2842 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); /** @todo no need to do global sync, right? */
2843
2844 /* Make sure all other VCPUs return to ring 3. */
2845 if (pVM->cCpus > 1)
2846 {
2847 VM_FF_SET(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
2848 PGM_INVL_ALL_VCPU_TLBS(pVM);
2849 }
2850 return VINF_PGM_SYNC_CR3;
2851 }
2852# endif /* !IN_RING3 */
2853 else
2854 {
2855 pgmPoolMonitorModifiedClearAll(pVM);
2856
2857 /* pgmPoolMonitorModifiedClearAll can cause a pgm pool flush (dirty page clearing), so make sure we handle this! */
2858 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2859 {
2860 Log(("pgmPoolMonitorModifiedClearAll caused a pgm flush -> call pgmPoolSyncCR3 again!\n"));
2861 return pgmPoolSyncCR3(pVCpu);
2862 }
2863 }
2864 return VINF_SUCCESS;
2865}
2866
2867
2868/**
2869 * Frees up at least one user entry.
2870 *
2871 * @returns VBox status code.
2872 * @retval VINF_SUCCESS if successfully added.
2873 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2874 * @param pPool The pool.
2875 * @param iUser The user index.
2876 */
2877static int pgmPoolTrackFreeOneUser(PPGMPOOL pPool, uint16_t iUser)
2878{
2879 STAM_COUNTER_INC(&pPool->StatTrackFreeUpOneUser);
2880 /*
2881 * Just free cached pages in a braindead fashion.
2882 */
2883 /** @todo walk the age list backwards and free the first with usage. */
2884 int rc = VINF_SUCCESS;
2885 do
2886 {
2887 int rc2 = pgmPoolCacheFreeOne(pPool, iUser);
2888 if (RT_FAILURE(rc2) && rc == VINF_SUCCESS)
2889 rc = rc2;
2890 } while (pPool->iUserFreeHead == NIL_PGMPOOL_USER_INDEX);
2891 return rc;
2892}
2893
2894
2895/**
2896 * Inserts a page into the cache.
2897 *
2898 * This will create user node for the page, insert it into the GCPhys
2899 * hash, and insert it into the age list.
2900 *
2901 * @returns VBox status code.
2902 * @retval VINF_SUCCESS if successfully added.
2903 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2904 * @param pPool The pool.
2905 * @param pPage The cached page.
2906 * @param GCPhys The GC physical address of the page we're gonna shadow.
2907 * @param iUser The user index.
2908 * @param iUserTable The user table index.
2909 */
2910DECLINLINE(int) pgmPoolTrackInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhys, uint16_t iUser, uint32_t iUserTable)
2911{
2912 int rc = VINF_SUCCESS;
2913 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
2914
2915 LogFlow(("pgmPoolTrackInsert GCPhys=%RGp iUser=%d iUserTable=%x\n", GCPhys, iUser, iUserTable));
2916
2917 if (iUser != NIL_PGMPOOL_IDX)
2918 {
2919#ifdef VBOX_STRICT
2920 /*
2921 * Check that the entry doesn't already exists.
2922 */
2923 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
2924 {
2925 uint16_t i = pPage->iUserHead;
2926 do
2927 {
2928 Assert(i < pPool->cMaxUsers);
2929 AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
2930 i = paUsers[i].iNext;
2931 } while (i != NIL_PGMPOOL_USER_INDEX);
2932 }
2933#endif
2934
2935 /*
2936 * Find free a user node.
2937 */
2938 uint16_t i = pPool->iUserFreeHead;
2939 if (i == NIL_PGMPOOL_USER_INDEX)
2940 {
2941 rc = pgmPoolTrackFreeOneUser(pPool, iUser);
2942 if (RT_FAILURE(rc))
2943 return rc;
2944 i = pPool->iUserFreeHead;
2945 }
2946
2947 /*
2948 * Unlink the user node from the free list,
2949 * initialize and insert it into the user list.
2950 */
2951 pPool->iUserFreeHead = paUsers[i].iNext;
2952 paUsers[i].iNext = NIL_PGMPOOL_USER_INDEX;
2953 paUsers[i].iUser = iUser;
2954 paUsers[i].iUserTable = iUserTable;
2955 pPage->iUserHead = i;
2956 }
2957 else
2958 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
2959
2960
2961 /*
2962 * Insert into cache and enable monitoring of the guest page if enabled.
2963 *
2964 * Until we implement caching of all levels, including the CR3 one, we'll
2965 * have to make sure we don't try monitor & cache any recursive reuse of
2966 * a monitored CR3 page. Because all windows versions are doing this we'll
2967 * have to be able to do combined access monitoring, CR3 + PT and
2968 * PD + PT (guest PAE).
2969 *
2970 * Update:
2971 * We're now cooperating with the CR3 monitor if an uncachable page is found.
2972 */
2973 const bool fCanBeMonitored = true;
2974 pgmPoolCacheInsert(pPool, pPage, fCanBeMonitored); /* This can be expanded. */
2975 if (fCanBeMonitored)
2976 {
2977 rc = pgmPoolMonitorInsert(pPool, pPage);
2978 AssertRC(rc);
2979 }
2980 return rc;
2981}
2982
2983
2984/**
2985 * Adds a user reference to a page.
2986 *
2987 * This will move the page to the head of the
2988 *
2989 * @returns VBox status code.
2990 * @retval VINF_SUCCESS if successfully added.
2991 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
2992 * @param pPool The pool.
2993 * @param pPage The cached page.
2994 * @param iUser The user index.
2995 * @param iUserTable The user table.
2996 */
2997static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
2998{
2999 Log3(("pgmPoolTrackAddUser: GCPhys=%RGp iUser=%x iUserTable=%x\n", pPage->GCPhys, iUser, iUserTable));
3000 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
3001 Assert(iUser != NIL_PGMPOOL_IDX);
3002
3003# ifdef VBOX_STRICT
3004 /*
3005 * Check that the entry doesn't already exists. We only allow multiple
3006 * users of top-level paging structures (SHW_POOL_ROOT_IDX).
3007 */
3008 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
3009 {
3010 uint16_t i = pPage->iUserHead;
3011 do
3012 {
3013 Assert(i < pPool->cMaxUsers);
3014 /** @todo this assertion looks odd... Shouldn't it be && here? */
3015 AssertMsg(paUsers[i].iUser != iUser || paUsers[i].iUserTable != iUserTable, ("%x %x vs new %x %x\n", paUsers[i].iUser, paUsers[i].iUserTable, iUser, iUserTable));
3016 i = paUsers[i].iNext;
3017 } while (i != NIL_PGMPOOL_USER_INDEX);
3018 }
3019# endif
3020
3021 /*
3022 * Allocate a user node.
3023 */
3024 uint16_t i = pPool->iUserFreeHead;
3025 if (i == NIL_PGMPOOL_USER_INDEX)
3026 {
3027 int rc = pgmPoolTrackFreeOneUser(pPool, iUser);
3028 if (RT_FAILURE(rc))
3029 return rc;
3030 i = pPool->iUserFreeHead;
3031 }
3032 pPool->iUserFreeHead = paUsers[i].iNext;
3033
3034 /*
3035 * Initialize the user node and insert it.
3036 */
3037 paUsers[i].iNext = pPage->iUserHead;
3038 paUsers[i].iUser = iUser;
3039 paUsers[i].iUserTable = iUserTable;
3040 pPage->iUserHead = i;
3041
3042# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
3043 if (pPage->fDirty)
3044 pgmPoolFlushDirtyPage(pPool->CTX_SUFF(pVM), pPool, pPage->idxDirtyEntry, false /* do not remove */);
3045# endif
3046
3047 /*
3048 * Tell the cache to update its replacement stats for this page.
3049 */
3050 pgmPoolCacheUsed(pPool, pPage);
3051 return VINF_SUCCESS;
3052}
3053
3054
3055/**
3056 * Frees a user record associated with a page.
3057 *
3058 * This does not clear the entry in the user table, it simply replaces the
3059 * user record to the chain of free records.
3060 *
3061 * @param pPool The pool.
3062 * @param pPage The shadow page.
3063 * @param iUser The shadow page pool index of the user table.
3064 * @param iUserTable The index into the user table (shadowed).
3065 *
3066 * @remarks Don't call this for root pages.
3067 */
3068static void pgmPoolTrackFreeUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
3069{
3070 Log3(("pgmPoolTrackFreeUser %RGp %x %x\n", pPage->GCPhys, iUser, iUserTable));
3071 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
3072 Assert(iUser != NIL_PGMPOOL_IDX);
3073
3074 /*
3075 * Unlink and free the specified user entry.
3076 */
3077
3078 /* Special: For PAE and 32-bit paging, there is usually no more than one user. */
3079 uint16_t i = pPage->iUserHead;
3080 if ( i != NIL_PGMPOOL_USER_INDEX
3081 && paUsers[i].iUser == iUser
3082 && paUsers[i].iUserTable == iUserTable)
3083 {
3084 pPage->iUserHead = paUsers[i].iNext;
3085
3086 paUsers[i].iUser = NIL_PGMPOOL_IDX;
3087 paUsers[i].iNext = pPool->iUserFreeHead;
3088 pPool->iUserFreeHead = i;
3089 return;
3090 }
3091
3092 /* General: Linear search. */
3093 uint16_t iPrev = NIL_PGMPOOL_USER_INDEX;
3094 while (i != NIL_PGMPOOL_USER_INDEX)
3095 {
3096 if ( paUsers[i].iUser == iUser
3097 && paUsers[i].iUserTable == iUserTable)
3098 {
3099 if (iPrev != NIL_PGMPOOL_USER_INDEX)
3100 paUsers[iPrev].iNext = paUsers[i].iNext;
3101 else
3102 pPage->iUserHead = paUsers[i].iNext;
3103
3104 paUsers[i].iUser = NIL_PGMPOOL_IDX;
3105 paUsers[i].iNext = pPool->iUserFreeHead;
3106 pPool->iUserFreeHead = i;
3107 return;
3108 }
3109 iPrev = i;
3110 i = paUsers[i].iNext;
3111 }
3112
3113 /* Fatal: didn't find it */
3114 AssertFatalMsgFailed(("Didn't find the user entry! iUser=%d iUserTable=%#x GCPhys=%RGp\n",
3115 iUser, iUserTable, pPage->GCPhys));
3116}
3117
3118
3119/**
3120 * Gets the entry size of a shadow table.
3121 *
3122 * @param enmKind The kind of page.
3123 *
3124 * @returns The size of the entry in bytes. That is, 4 or 8.
3125 * @returns If the kind is not for a table, an assertion is raised and 0 is
3126 * returned.
3127 */
3128DECLINLINE(unsigned) pgmPoolTrackGetShadowEntrySize(PGMPOOLKIND enmKind)
3129{
3130 switch (enmKind)
3131 {
3132 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3133 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3134 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3135 case PGMPOOLKIND_32BIT_PD:
3136 case PGMPOOLKIND_32BIT_PD_PHYS:
3137 return 4;
3138
3139 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3140 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3141 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3142 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3143 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3144 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3145 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3146 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3147 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3148 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3149 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3150 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3151 case PGMPOOLKIND_64BIT_PML4:
3152 case PGMPOOLKIND_PAE_PDPT:
3153 case PGMPOOLKIND_ROOT_NESTED:
3154 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3155 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3156 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3157 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3158 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
3159 case PGMPOOLKIND_PAE_PD_PHYS:
3160 case PGMPOOLKIND_PAE_PDPT_PHYS:
3161 return 8;
3162
3163 default:
3164 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
3165 }
3166}
3167
3168
3169/**
3170 * Gets the entry size of a guest table.
3171 *
3172 * @param enmKind The kind of page.
3173 *
3174 * @returns The size of the entry in bytes. That is, 0, 4 or 8.
3175 * @returns If the kind is not for a table, an assertion is raised and 0 is
3176 * returned.
3177 */
3178DECLINLINE(unsigned) pgmPoolTrackGetGuestEntrySize(PGMPOOLKIND enmKind)
3179{
3180 switch (enmKind)
3181 {
3182 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3183 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3184 case PGMPOOLKIND_32BIT_PD:
3185 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3186 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3187 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3188 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3189 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3190 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3191 return 4;
3192
3193 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3194 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3195 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3196 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3197 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3198 case PGMPOOLKIND_64BIT_PML4:
3199 case PGMPOOLKIND_PAE_PDPT:
3200 return 8;
3201
3202 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3203 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3204 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3205 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3206 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3207 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3208 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
3209 case PGMPOOLKIND_ROOT_NESTED:
3210 case PGMPOOLKIND_PAE_PD_PHYS:
3211 case PGMPOOLKIND_PAE_PDPT_PHYS:
3212 case PGMPOOLKIND_32BIT_PD_PHYS:
3213 /** @todo can we return 0? (nobody is calling this...) */
3214 AssertFailed();
3215 return 0;
3216
3217 default:
3218 AssertFatalMsgFailed(("enmKind=%d\n", enmKind));
3219 }
3220}
3221
3222
3223/**
3224 * Checks one shadow page table entry for a mapping of a physical page.
3225 *
3226 * @returns true / false indicating removal of all relevant PTEs
3227 *
3228 * @param pVM The cross context VM structure.
3229 * @param pPhysPage The guest page in question.
3230 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3231 * @param iShw The shadow page table.
3232 * @param iPte Page table entry or NIL_PGMPOOL_PHYSEXT_IDX_PTE if unknown
3233 */
3234static bool pgmPoolTrackFlushGCPhysPTInt(PVM pVM, PCPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw, uint16_t iPte)
3235{
3236 LogFlow(("pgmPoolTrackFlushGCPhysPTInt: pPhysPage=%RHp iShw=%d iPte=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw, iPte));
3237 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3238 bool fRet = false;
3239
3240 /*
3241 * Assert sanity.
3242 */
3243 Assert(iPte != NIL_PGMPOOL_PHYSEXT_IDX_PTE);
3244 AssertFatalMsg(iShw < pPool->cCurPages && iShw != NIL_PGMPOOL_IDX, ("iShw=%d\n", iShw));
3245 PPGMPOOLPAGE pPage = &pPool->aPages[iShw];
3246
3247 /*
3248 * Then, clear the actual mappings to the page in the shadow PT.
3249 */
3250 switch (pPage->enmKind)
3251 {
3252 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3253 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3254 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3255 {
3256 const uint32_t u32 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3257 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3258 uint32_t u32AndMask = 0;
3259 uint32_t u32OrMask = 0;
3260
3261 if (!fFlushPTEs)
3262 {
3263 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
3264 {
3265 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /** No handler installed. */
3266 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /** Monitoring is temporarily disabled. */
3267 u32OrMask = X86_PTE_RW;
3268 u32AndMask = UINT32_MAX;
3269 fRet = true;
3270 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3271 break;
3272
3273 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /** Write access is monitored. */
3274 u32OrMask = 0;
3275 u32AndMask = ~X86_PTE_RW;
3276 fRet = true;
3277 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3278 break;
3279 default:
3280 /* (shouldn't be here, will assert below) */
3281 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3282 break;
3283 }
3284 }
3285 else
3286 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3287
3288 /* Update the counter if we're removing references. */
3289 if (!u32AndMask)
3290 {
3291 Assert(pPage->cPresent);
3292 Assert(pPool->cPresent);
3293 pPage->cPresent--;
3294 pPool->cPresent--;
3295 }
3296
3297 if ((pPT->a[iPte].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3298 {
3299 X86PTE Pte;
3300
3301 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX32\n", iPte, pPT->a[iPte]));
3302 Pte.u = (pPT->a[iPte].u & u32AndMask) | u32OrMask;
3303 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3304 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3305
3306 ASMAtomicWriteU32(&pPT->a[iPte].u, Pte.u);
3307 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3308 return fRet;
3309 }
3310#ifdef LOG_ENABLED
3311 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3312 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPT->a); i++)
3313 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3314 {
3315 Log(("i=%d cFound=%d\n", i, ++cFound));
3316 }
3317#endif
3318 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u32=%RX32 poolkind=%x\n", pPage->iFirstPresent, pPage->cPresent, u32, pPage->enmKind));
3319 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/
3320 break;
3321 }
3322
3323 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3324 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3325 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3326 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3327 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3328 case PGMPOOLKIND_EPT_PT_FOR_PHYS: /* physical mask the same as PAE; RW bit as well; be careful! */
3329 {
3330 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P;
3331 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3332 uint64_t u64OrMask = 0;
3333 uint64_t u64AndMask = 0;
3334
3335 if (!fFlushPTEs)
3336 {
3337 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(pPhysPage))
3338 {
3339 case PGM_PAGE_HNDL_PHYS_STATE_NONE: /* No handler installed. */
3340 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED: /* Monitoring is temporarily disabled. */
3341 u64OrMask = X86_PTE_RW;
3342 u64AndMask = UINT64_MAX;
3343 fRet = true;
3344 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3345 break;
3346
3347 case PGM_PAGE_HNDL_PHYS_STATE_WRITE: /* Write access is monitored. */
3348 u64OrMask = 0;
3349 u64AndMask = ~(uint64_t)X86_PTE_RW;
3350 fRet = true;
3351 STAM_COUNTER_INC(&pPool->StatTrackFlushEntryKeep);
3352 break;
3353
3354 default:
3355 /* (shouldn't be here, will assert below) */
3356 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3357 break;
3358 }
3359 }
3360 else
3361 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3362
3363 /* Update the counter if we're removing references. */
3364 if (!u64AndMask)
3365 {
3366 Assert(pPage->cPresent);
3367 Assert(pPool->cPresent);
3368 pPage->cPresent--;
3369 pPool->cPresent--;
3370 }
3371
3372 if ((PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)
3373 {
3374 X86PTEPAE Pte;
3375
3376 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pte=%RX64\n", iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte])));
3377 Pte.u = (PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & u64AndMask) | u64OrMask;
3378 if (Pte.u & PGM_PTFLAGS_TRACK_DIRTY)
3379 Pte.n.u1Write = 0; /* need to disallow writes when dirty bit tracking is still active. */
3380
3381 PGMSHWPTEPAE_ATOMIC_SET(pPT->a[iPte], Pte.u);
3382 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3383 return fRet;
3384 }
3385#ifdef LOG_ENABLED
3386 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3387 Log(("Found %RX64 expected %RX64\n", PGMSHWPTEPAE_GET_U(pPT->a[iPte]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX), u64));
3388 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPT->a); i++)
3389 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P | X86_PTE_PAE_MBZ_MASK_NX)) == u64)
3390 Log(("i=%d cFound=%d\n", i, ++cFound));
3391#endif
3392 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte])));
3393 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/
3394 break;
3395 }
3396
3397#ifdef PGM_WITH_LARGE_PAGES
3398 /* Large page case only. */
3399 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3400 {
3401 Assert(pVM->pgm.s.fNestedPaging);
3402
3403 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3404 PEPTPD pPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3405
3406 if ((pPD->a[iPte].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3407 {
3408 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64\n", iPte, pPD->a[iPte]));
3409 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3410 pPD->a[iPte].u = 0;
3411 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3412
3413 /* Update the counter as we're removing references. */
3414 Assert(pPage->cPresent);
3415 Assert(pPool->cPresent);
3416 pPage->cPresent--;
3417 pPool->cPresent--;
3418
3419 return fRet;
3420 }
3421# ifdef LOG_ENABLED
3422 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3423 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPD->a); i++)
3424 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3425 Log(("i=%d cFound=%d\n", i, ++cFound));
3426# endif
3427 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3428 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/
3429 break;
3430 }
3431
3432 /* AMD-V nested paging */ /** @todo merge with EPT as we only check the parts that are identical. */
3433 case PGMPOOLKIND_PAE_PD_PHYS:
3434 {
3435 Assert(pVM->pgm.s.fNestedPaging);
3436
3437 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS;
3438 PX86PD pPD = (PX86PD)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3439
3440 if ((pPD->a[iPte].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3441 {
3442 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64\n", iPte, pPD->a[iPte]));
3443 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry);
3444 pPD->a[iPte].u = 0;
3445 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);
3446
3447 /* Update the counter as we're removing references. */
3448 Assert(pPage->cPresent);
3449 Assert(pPool->cPresent);
3450 pPage->cPresent--;
3451 pPool->cPresent--;
3452 return fRet;
3453 }
3454# ifdef LOG_ENABLED
3455 Log(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3456 for (unsigned i = 0, cFound = 0; i < RT_ELEMENTS(pPD->a); i++)
3457 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64)
3458 Log(("i=%d cFound=%d\n", i, ++cFound));
3459# endif
3460 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent));
3461 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/
3462 break;
3463 }
3464#endif /* PGM_WITH_LARGE_PAGES */
3465
3466 default:
3467 AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw));
3468 }
3469
3470 /* not reached. */
3471#ifndef _MSC_VER
3472 return fRet;
3473#endif
3474}
3475
3476
3477/**
3478 * Scans one shadow page table for mappings of a physical page.
3479 *
3480 * @param pVM The cross context VM structure.
3481 * @param pPhysPage The guest page in question.
3482 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3483 * @param iShw The shadow page table.
3484 */
3485static void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iShw)
3486{
3487 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool);
3488
3489 /* We should only come here with when there's only one reference to this physical page. */
3490 Assert(PGMPOOL_TD_GET_CREFS(PGM_PAGE_GET_TRACKING(pPhysPage)) == 1);
3491
3492 Log2(("pgmPoolTrackFlushGCPhysPT: pPhysPage=%RHp iShw=%d\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iShw));
3493 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPT, f);
3494 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, iShw, PGM_PAGE_GET_PTE_INDEX(pPhysPage));
3495 if (!fKeptPTEs)
3496 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
3497 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPT, f);
3498}
3499
3500
3501/**
3502 * Flushes a list of shadow page tables mapping the same physical page.
3503 *
3504 * @param pVM The cross context VM structure.
3505 * @param pPhysPage The guest page in question.
3506 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3507 * @param iPhysExt The physical cross reference extent list to flush.
3508 */
3509static void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, uint16_t iPhysExt)
3510{
3511 PGM_LOCK_ASSERT_OWNER(pVM);
3512 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3513 bool fKeepList = false;
3514
3515 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTs, f);
3516 Log2(("pgmPoolTrackFlushGCPhysPTs: pPhysPage=%RHp iPhysExt=%u\n", PGM_PAGE_GET_HCPHYS(pPhysPage), iPhysExt));
3517
3518 const uint16_t iPhysExtStart = iPhysExt;
3519 PPGMPOOLPHYSEXT pPhysExt;
3520 do
3521 {
3522 Assert(iPhysExt < pPool->cMaxPhysExts);
3523 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
3524 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
3525 {
3526 if (pPhysExt->aidx[i] != NIL_PGMPOOL_IDX)
3527 {
3528 bool fKeptPTEs = pgmPoolTrackFlushGCPhysPTInt(pVM, pPhysPage, fFlushPTEs, pPhysExt->aidx[i], pPhysExt->apte[i]);
3529 if (!fKeptPTEs)
3530 {
3531 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
3532 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
3533 }
3534 else
3535 fKeepList = true;
3536 }
3537 }
3538 /* next */
3539 iPhysExt = pPhysExt->iNext;
3540 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
3541
3542 if (!fKeepList)
3543 {
3544 /* insert the list into the free list and clear the ram range entry. */
3545 pPhysExt->iNext = pPool->iPhysExtFreeHead;
3546 pPool->iPhysExtFreeHead = iPhysExtStart;
3547 /* Invalidate the tracking data. */
3548 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
3549 }
3550
3551 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTs, f);
3552}
3553
3554
3555/**
3556 * Flushes all shadow page table mappings of the given guest page.
3557 *
3558 * This is typically called when the host page backing the guest one has been
3559 * replaced or when the page protection was changed due to a guest access
3560 * caught by the monitoring.
3561 *
3562 * @returns VBox status code.
3563 * @retval VINF_SUCCESS if all references has been successfully cleared.
3564 * @retval VINF_PGM_SYNC_CR3 if we're better off with a CR3 sync and a page
3565 * pool cleaning. FF and sync flags are set.
3566 *
3567 * @param pVM The cross context VM structure.
3568 * @param GCPhysPage GC physical address of the page in question
3569 * @param pPhysPage The guest page in question.
3570 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change)
3571 * @param pfFlushTLBs This is set to @a true if the shadow TLBs should be
3572 * flushed, it is NOT touched if this isn't necessary.
3573 * The caller MUST initialized this to @a false.
3574 */
3575int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs)
3576{
3577 PVMCPU pVCpu = VMMGetCpu(pVM);
3578 pgmLock(pVM);
3579 int rc = VINF_SUCCESS;
3580
3581#ifdef PGM_WITH_LARGE_PAGES
3582 /* Is this page part of a large page? */
3583 if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE)
3584 {
3585 RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK;
3586 GCPhysPage &= X86_PDE_PAE_PG_MASK;
3587
3588 /* Fetch the large page base. */
3589 PPGMPAGE pLargePage;
3590 if (GCPhysBase != GCPhysPage)
3591 {
3592 pLargePage = pgmPhysGetPage(pVM, GCPhysBase);
3593 AssertFatal(pLargePage);
3594 }
3595 else
3596 pLargePage = pPhysPage;
3597
3598 Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage));
3599
3600 if (PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE)
3601 {
3602 /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
3603 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
3604 pVM->pgm.s.cLargePagesDisabled++;
3605
3606 /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */
3607 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pLargePage, fFlushPTEs, pfFlushTLBs);
3608
3609 *pfFlushTLBs = true;
3610 pgmUnlock(pVM);
3611 return rc;
3612 }
3613 }
3614#else
3615 NOREF(GCPhysPage);
3616#endif /* PGM_WITH_LARGE_PAGES */
3617
3618 const uint16_t u16 = PGM_PAGE_GET_TRACKING(pPhysPage);
3619 if (u16)
3620 {
3621 /*
3622 * The zero page is currently screwing up the tracking and we'll
3623 * have to flush the whole shebang. Unless VBOX_WITH_NEW_LAZY_PAGE_ALLOC
3624 * is defined, zero pages won't normally be mapped. Some kind of solution
3625 * will be needed for this problem of course, but it will have to wait...
3626 */
3627 if ( PGM_PAGE_IS_ZERO(pPhysPage)
3628 || PGM_PAGE_IS_BALLOONED(pPhysPage))
3629 rc = VINF_PGM_GCPHYS_ALIASED;
3630 else
3631 {
3632# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC) /** @todo we can drop this now. */
3633 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and
3634 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */
3635 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
3636# endif
3637
3638 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
3639 {
3640 Assert(PGMPOOL_TD_GET_CREFS(u16) == 1);
3641 pgmPoolTrackFlushGCPhysPT(pVM,
3642 pPhysPage,
3643 fFlushPTEs,
3644 PGMPOOL_TD_GET_IDX(u16));
3645 }
3646 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
3647 pgmPoolTrackFlushGCPhysPTs(pVM, pPhysPage, fFlushPTEs, PGMPOOL_TD_GET_IDX(u16));
3648 else
3649 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage);
3650 *pfFlushTLBs = true;
3651
3652# if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
3653 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
3654# endif
3655 }
3656 }
3657
3658 if (rc == VINF_PGM_GCPHYS_ALIASED)
3659 {
3660 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3661 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3662 rc = VINF_PGM_SYNC_CR3;
3663 }
3664 pgmUnlock(pVM);
3665 return rc;
3666}
3667
3668
3669/**
3670 * Scans all shadow page tables for mappings of a physical page.
3671 *
3672 * This may be slow, but it's most likely more efficient than cleaning
3673 * out the entire page pool / cache.
3674 *
3675 * @returns VBox status code.
3676 * @retval VINF_SUCCESS if all references has been successfully cleared.
3677 * @retval VINF_PGM_GCPHYS_ALIASED if we're better off with a CR3 sync and
3678 * a page pool cleaning.
3679 *
3680 * @param pVM The cross context VM structure.
3681 * @param pPhysPage The guest page in question.
3682 */
3683int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage)
3684{
3685 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
3686 STAM_PROFILE_START(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3687 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: cUsedPages=%d cPresent=%d pPhysPage=%R[pgmpage]\n",
3688 pPool->cUsedPages, pPool->cPresent, pPhysPage));
3689
3690 /*
3691 * There is a limit to what makes sense.
3692 */
3693 if ( pPool->cPresent > 1024
3694 && pVM->cCpus == 1)
3695 {
3696 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
3697 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3698 return VINF_PGM_GCPHYS_ALIASED;
3699 }
3700
3701 /*
3702 * Iterate all the pages until we've encountered all that in use.
3703 * This is simple but not quite optimal solution.
3704 */
3705 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PTE_P; /** @todo drop X86_PTE_P here as we always test if present separately, anyway. */
3706 const uint32_t u32 = u64; /** @todo move into the 32BIT_PT_xx case */
3707 unsigned cLeft = pPool->cUsedPages;
3708 unsigned iPage = pPool->cCurPages;
3709 while (--iPage >= PGMPOOL_IDX_FIRST)
3710 {
3711 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
3712 if ( pPage->GCPhys != NIL_RTGCPHYS
3713 && pPage->cPresent)
3714 {
3715 switch (pPage->enmKind)
3716 {
3717 /*
3718 * We only care about shadow page tables.
3719 */
3720 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
3721 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
3722 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
3723 {
3724 unsigned cPresent = pPage->cPresent;
3725 PX86PT pPT = (PX86PT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3726 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3727 if (pPT->a[i].n.u1Present)
3728 {
3729 if ((pPT->a[i].u & (X86_PTE_PG_MASK | X86_PTE_P)) == u32)
3730 {
3731 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX32\n", iPage, i, pPT->a[i]));
3732 pPT->a[i].u = 0;
3733
3734 /* Update the counter as we're removing references. */
3735 Assert(pPage->cPresent);
3736 Assert(pPool->cPresent);
3737 pPage->cPresent--;
3738 pPool->cPresent--;
3739 }
3740 if (!--cPresent)
3741 break;
3742 }
3743 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3744 break;
3745 }
3746
3747 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
3748 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
3749 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
3750 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
3751 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
3752 {
3753 unsigned cPresent = pPage->cPresent;
3754 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3755 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3756 if (PGMSHWPTEPAE_IS_P(pPT->a[i]))
3757 {
3758 if ((PGMSHWPTEPAE_GET_U(pPT->a[i]) & (X86_PTE_PAE_PG_MASK | X86_PTE_P)) == u64)
3759 {
3760 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
3761 PGMSHWPTEPAE_SET(pPT->a[i], 0); /// @todo why not atomic?
3762
3763 /* Update the counter as we're removing references. */
3764 Assert(pPage->cPresent);
3765 Assert(pPool->cPresent);
3766 pPage->cPresent--;
3767 pPool->cPresent--;
3768 }
3769 if (!--cPresent)
3770 break;
3771 }
3772 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3773 break;
3774 }
3775#ifndef IN_RC
3776 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
3777 {
3778 unsigned cPresent = pPage->cPresent;
3779 PEPTPT pPT = (PEPTPT)PGMPOOL_PAGE_2_PTR(pVM, pPage);
3780 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPT->a); i++)
3781 if (pPT->a[i].n.u1Present)
3782 {
3783 if ((pPT->a[i].u & (EPT_PTE_PG_MASK | X86_PTE_P)) == u64)
3784 {
3785 //Log4(("pgmPoolTrackFlushGCPhysPTsSlow: idx=%d i=%d pte=%RX64\n", iPage, i, pPT->a[i]));
3786 pPT->a[i].u = 0;
3787
3788 /* Update the counter as we're removing references. */
3789 Assert(pPage->cPresent);
3790 Assert(pPool->cPresent);
3791 pPage->cPresent--;
3792 pPool->cPresent--;
3793 }
3794 if (!--cPresent)
3795 break;
3796 }
3797 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);
3798 break;
3799 }
3800#endif
3801 }
3802 if (!--cLeft)
3803 break;
3804 }
3805 }
3806
3807 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
3808 STAM_PROFILE_STOP(&pPool->StatTrackFlushGCPhysPTsSlow, s);
3809
3810 /*
3811 * There is a limit to what makes sense. The above search is very expensive, so force a pgm pool flush.
3812 */
3813 if (pPool->cPresent > 1024)
3814 {
3815 LogFlow(("pgmPoolTrackFlushGCPhysPTsSlow: giving up... (cPresent=%d)\n", pPool->cPresent));
3816 return VINF_PGM_GCPHYS_ALIASED;
3817 }
3818
3819 return VINF_SUCCESS;
3820}
3821
3822
3823/**
3824 * Clears the user entry in a user table.
3825 *
3826 * This is used to remove all references to a page when flushing it.
3827 */
3828static void pgmPoolTrackClearPageUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PCPGMPOOLUSER pUser)
3829{
3830 Assert(pUser->iUser != NIL_PGMPOOL_IDX);
3831 Assert(pUser->iUser < pPool->cCurPages);
3832 uint32_t iUserTable = pUser->iUserTable;
3833
3834 /*
3835 * Map the user page. Ignore references made by fictitious pages.
3836 */
3837 PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser];
3838 LogFlow(("pgmPoolTrackClearPageUser: clear %x in %s (%RGp) (flushing %s)\n", iUserTable, pgmPoolPoolKindToStr(pUserPage->enmKind), pUserPage->Core.Key, pgmPoolPoolKindToStr(pPage->enmKind)));
3839 union
3840 {
3841 uint64_t *pau64;
3842 uint32_t *pau32;
3843 } u;
3844 if (pUserPage->idx < PGMPOOL_IDX_FIRST)
3845 {
3846 Assert(!pUserPage->pvPageR3);
3847 return;
3848 }
3849 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pUserPage);
3850
3851
3852 /* Safety precaution in case we change the paging for other modes too in the future. */
3853 Assert(!pgmPoolIsPageLocked(pPage));
3854
3855#ifdef VBOX_STRICT
3856 /*
3857 * Some sanity checks.
3858 */
3859 switch (pUserPage->enmKind)
3860 {
3861 case PGMPOOLKIND_32BIT_PD:
3862 case PGMPOOLKIND_32BIT_PD_PHYS:
3863 Assert(iUserTable < X86_PG_ENTRIES);
3864 break;
3865 case PGMPOOLKIND_PAE_PDPT:
3866 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3867 case PGMPOOLKIND_PAE_PDPT_PHYS:
3868 Assert(iUserTable < 4);
3869 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3870 break;
3871 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3872 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3873 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3874 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3875 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3876 case PGMPOOLKIND_PAE_PD_PHYS:
3877 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3878 break;
3879 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3880 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3881 Assert(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING));
3882 break;
3883 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3884 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3885 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3886 break;
3887 case PGMPOOLKIND_64BIT_PML4:
3888 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
3889 /* GCPhys >> PAGE_SHIFT is the index here */
3890 break;
3891 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3892 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3893 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3894 break;
3895
3896 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3897 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3898 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3899 break;
3900
3901 case PGMPOOLKIND_ROOT_NESTED:
3902 Assert(iUserTable < X86_PG_PAE_ENTRIES);
3903 break;
3904
3905 default:
3906 AssertMsgFailed(("enmKind=%d\n", pUserPage->enmKind));
3907 break;
3908 }
3909#endif /* VBOX_STRICT */
3910
3911 /*
3912 * Clear the entry in the user page.
3913 */
3914 switch (pUserPage->enmKind)
3915 {
3916 /* 32-bit entries */
3917 case PGMPOOLKIND_32BIT_PD:
3918 case PGMPOOLKIND_32BIT_PD_PHYS:
3919 ASMAtomicWriteU32(&u.pau32[iUserTable], 0);
3920 break;
3921
3922 /* 64-bit entries */
3923 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
3924 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
3925 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
3926 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
3927 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
3928#ifdef IN_RC
3929 /*
3930 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
3931 * PDPT entry; the CPU fetches them only during cr3 load, so any
3932 * non-present PDPT will continue to cause page faults.
3933 */
3934 ASMReloadCR3();
3935 /* no break */
3936#endif
3937 case PGMPOOLKIND_PAE_PD_PHYS:
3938 case PGMPOOLKIND_PAE_PDPT_PHYS:
3939 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
3940 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
3941 case PGMPOOLKIND_64BIT_PML4:
3942 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
3943 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
3944 case PGMPOOLKIND_PAE_PDPT:
3945 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
3946 case PGMPOOLKIND_ROOT_NESTED:
3947 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
3948 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
3949 ASMAtomicWriteU64(&u.pau64[iUserTable], 0);
3950 break;
3951
3952 default:
3953 AssertFatalMsgFailed(("enmKind=%d iUser=%d iUserTable=%#x\n", pUserPage->enmKind, pUser->iUser, pUser->iUserTable));
3954 }
3955 PGM_DYNMAP_UNUSED_HINT_VM(pPool->CTX_SUFF(pVM), u.pau64);
3956}
3957
3958
3959/**
3960 * Clears all users of a page.
3961 */
3962static void pgmPoolTrackClearPageUsers(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
3963{
3964 /*
3965 * Free all the user records.
3966 */
3967 LogFlow(("pgmPoolTrackClearPageUsers %RGp\n", pPage->GCPhys));
3968
3969 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
3970 uint16_t i = pPage->iUserHead;
3971 while (i != NIL_PGMPOOL_USER_INDEX)
3972 {
3973 /* Clear enter in user table. */
3974 pgmPoolTrackClearPageUser(pPool, pPage, &paUsers[i]);
3975
3976 /* Free it. */
3977 const uint16_t iNext = paUsers[i].iNext;
3978 paUsers[i].iUser = NIL_PGMPOOL_IDX;
3979 paUsers[i].iNext = pPool->iUserFreeHead;
3980 pPool->iUserFreeHead = i;
3981
3982 /* Next. */
3983 i = iNext;
3984 }
3985 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
3986}
3987
3988
3989/**
3990 * Allocates a new physical cross reference extent.
3991 *
3992 * @returns Pointer to the allocated extent on success. NULL if we're out of them.
3993 * @param pVM The cross context VM structure.
3994 * @param piPhysExt Where to store the phys ext index.
3995 */
3996PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt)
3997{
3998 PGM_LOCK_ASSERT_OWNER(pVM);
3999 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4000 uint16_t iPhysExt = pPool->iPhysExtFreeHead;
4001 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX)
4002 {
4003 STAM_COUNTER_INC(&pPool->StamTrackPhysExtAllocFailures);
4004 return NULL;
4005 }
4006 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
4007 pPool->iPhysExtFreeHead = pPhysExt->iNext;
4008 pPhysExt->iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
4009 *piPhysExt = iPhysExt;
4010 return pPhysExt;
4011}
4012
4013
4014/**
4015 * Frees a physical cross reference extent.
4016 *
4017 * @param pVM The cross context VM structure.
4018 * @param iPhysExt The extent to free.
4019 */
4020void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt)
4021{
4022 PGM_LOCK_ASSERT_OWNER(pVM);
4023 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4024 Assert(iPhysExt < pPool->cMaxPhysExts);
4025 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
4026 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
4027 {
4028 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
4029 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
4030 }
4031 pPhysExt->iNext = pPool->iPhysExtFreeHead;
4032 pPool->iPhysExtFreeHead = iPhysExt;
4033}
4034
4035
4036/**
4037 * Frees a physical cross reference extent.
4038 *
4039 * @param pVM The cross context VM structure.
4040 * @param iPhysExt The extent to free.
4041 */
4042void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt)
4043{
4044 PGM_LOCK_ASSERT_OWNER(pVM);
4045 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4046
4047 const uint16_t iPhysExtStart = iPhysExt;
4048 PPGMPOOLPHYSEXT pPhysExt;
4049 do
4050 {
4051 Assert(iPhysExt < pPool->cMaxPhysExts);
4052 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt];
4053 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++)
4054 {
4055 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX;
4056 pPhysExt->apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
4057 }
4058
4059 /* next */
4060 iPhysExt = pPhysExt->iNext;
4061 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
4062
4063 pPhysExt->iNext = pPool->iPhysExtFreeHead;
4064 pPool->iPhysExtFreeHead = iPhysExtStart;
4065}
4066
4067
4068/**
4069 * Insert a reference into a list of physical cross reference extents.
4070 *
4071 * @returns The new tracking data for PGMPAGE.
4072 *
4073 * @param pVM The cross context VM structure.
4074 * @param iPhysExt The physical extent index of the list head.
4075 * @param iShwPT The shadow page table index.
4076 * @param iPte Page table entry
4077 *
4078 */
4079static uint16_t pgmPoolTrackPhysExtInsert(PVM pVM, uint16_t iPhysExt, uint16_t iShwPT, uint16_t iPte)
4080{
4081 PGM_LOCK_ASSERT_OWNER(pVM);
4082 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
4083 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
4084
4085 /*
4086 * Special common cases.
4087 */
4088 if (paPhysExts[iPhysExt].aidx[1] == NIL_PGMPOOL_IDX)
4089 {
4090 paPhysExts[iPhysExt].aidx[1] = iShwPT;
4091 paPhysExts[iPhysExt].apte[1] = iPte;
4092 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedMany);
4093 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{,%d pte %d,}\n", iPhysExt, iShwPT, iPte));
4094 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
4095 }
4096 if (paPhysExts[iPhysExt].aidx[2] == NIL_PGMPOOL_IDX)
4097 {
4098 paPhysExts[iPhysExt].aidx[2] = iShwPT;
4099 paPhysExts[iPhysExt].apte[2] = iPte;
4100 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedMany);
4101 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{,,%d pte %d}\n", iPhysExt, iShwPT, iPte));
4102 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
4103 }
4104 AssertCompile(RT_ELEMENTS(paPhysExts[iPhysExt].aidx) == 3);
4105
4106 /*
4107 * General treatment.
4108 */
4109 const uint16_t iPhysExtStart = iPhysExt;
4110 unsigned cMax = 15;
4111 for (;;)
4112 {
4113 Assert(iPhysExt < pPool->cMaxPhysExts);
4114 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
4115 if (paPhysExts[iPhysExt].aidx[i] == NIL_PGMPOOL_IDX)
4116 {
4117 paPhysExts[iPhysExt].aidx[i] = iShwPT;
4118 paPhysExts[iPhysExt].apte[i] = iPte;
4119 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedMany);
4120 LogFlow(("pgmPoolTrackPhysExtInsert: %d:{%d pte %d} i=%d cMax=%d\n", iPhysExt, iShwPT, iPte, i, cMax));
4121 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtStart);
4122 }
4123 if (!--cMax)
4124 {
4125 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackOverflows);
4126 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
4127 LogFlow(("pgmPoolTrackPhysExtInsert: overflow (1) iShwPT=%d\n", iShwPT));
4128 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
4129 }
4130
4131 /* advance */
4132 iPhysExt = paPhysExts[iPhysExt].iNext;
4133 if (iPhysExt == NIL_PGMPOOL_PHYSEXT_INDEX)
4134 break;
4135 }
4136
4137 /*
4138 * Add another extent to the list.
4139 */
4140 PPGMPOOLPHYSEXT pNew = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
4141 if (!pNew)
4142 {
4143 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackNoExtentsLeft);
4144 pgmPoolTrackPhysExtFreeList(pVM, iPhysExtStart);
4145 LogFlow(("pgmPoolTrackPhysExtInsert: pgmPoolTrackPhysExtAlloc failed iShwPT=%d\n", iShwPT));
4146 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
4147 }
4148 pNew->iNext = iPhysExtStart;
4149 pNew->aidx[0] = iShwPT;
4150 pNew->apte[0] = iPte;
4151 LogFlow(("pgmPoolTrackPhysExtInsert: added new extent %d:{%d pte %d}->%d\n", iPhysExt, iShwPT, iPte, iPhysExtStart));
4152 return PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
4153}
4154
4155
4156/**
4157 * Add a reference to guest physical page where extents are in use.
4158 *
4159 * @returns The new tracking data for PGMPAGE.
4160 *
4161 * @param pVM The cross context VM structure.
4162 * @param pPhysPage Pointer to the aPages entry in the ram range.
4163 * @param u16 The ram range flags (top 16-bits).
4164 * @param iShwPT The shadow page table index.
4165 * @param iPte Page table entry
4166 */
4167uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, PPGMPAGE pPhysPage, uint16_t u16, uint16_t iShwPT, uint16_t iPte)
4168{
4169 pgmLock(pVM);
4170 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT)
4171 {
4172 /*
4173 * Convert to extent list.
4174 */
4175 Assert(PGMPOOL_TD_GET_CREFS(u16) == 1);
4176 uint16_t iPhysExt;
4177 PPGMPOOLPHYSEXT pPhysExt = pgmPoolTrackPhysExtAlloc(pVM, &iPhysExt);
4178 if (pPhysExt)
4179 {
4180 LogFlow(("pgmPoolTrackPhysExtAddref: new extent: %d:{%d, %d}\n", iPhysExt, PGMPOOL_TD_GET_IDX(u16), iShwPT));
4181 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliased);
4182 pPhysExt->aidx[0] = PGMPOOL_TD_GET_IDX(u16);
4183 pPhysExt->apte[0] = PGM_PAGE_GET_PTE_INDEX(pPhysPage);
4184 pPhysExt->aidx[1] = iShwPT;
4185 pPhysExt->apte[1] = iPte;
4186 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExt);
4187 }
4188 else
4189 u16 = PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED);
4190 }
4191 else if (u16 != PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, PGMPOOL_TD_IDX_OVERFLOWED))
4192 {
4193 /*
4194 * Insert into the extent list.
4195 */
4196 u16 = pgmPoolTrackPhysExtInsert(pVM, PGMPOOL_TD_GET_IDX(u16), iShwPT, iPte);
4197 }
4198 else
4199 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatTrackAliasedLots);
4200 pgmUnlock(pVM);
4201 return u16;
4202}
4203
4204
4205/**
4206 * Clear references to guest physical memory.
4207 *
4208 * @param pPool The pool.
4209 * @param pPage The page.
4210 * @param pPhysPage Pointer to the aPages entry in the ram range.
4211 * @param iPte Shadow PTE index
4212 */
4213void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage, uint16_t iPte)
4214{
4215 PVM pVM = pPool->CTX_SUFF(pVM);
4216 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
4217 AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
4218
4219 uint16_t iPhysExt = PGM_PAGE_GET_TD_IDX(pPhysPage);
4220 if (iPhysExt != PGMPOOL_TD_IDX_OVERFLOWED)
4221 {
4222 pgmLock(pVM);
4223
4224 uint16_t iPhysExtPrev = NIL_PGMPOOL_PHYSEXT_INDEX;
4225 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
4226 do
4227 {
4228 Assert(iPhysExt < pPool->cMaxPhysExts);
4229
4230 /*
4231 * Look for the shadow page and check if it's all freed.
4232 */
4233 for (unsigned i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
4234 {
4235 if ( paPhysExts[iPhysExt].aidx[i] == pPage->idx
4236 && paPhysExts[iPhysExt].apte[i] == iPte)
4237 {
4238 paPhysExts[iPhysExt].aidx[i] = NIL_PGMPOOL_IDX;
4239 paPhysExts[iPhysExt].apte[i] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
4240
4241 for (i = 0; i < RT_ELEMENTS(paPhysExts[iPhysExt].aidx); i++)
4242 if (paPhysExts[iPhysExt].aidx[i] != NIL_PGMPOOL_IDX)
4243 {
4244 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d\n", pPhysPage, pPage->idx));
4245 pgmUnlock(pVM);
4246 return;
4247 }
4248
4249 /* we can free the node. */
4250 const uint16_t iPhysExtNext = paPhysExts[iPhysExt].iNext;
4251 if ( iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX
4252 && iPhysExtNext == NIL_PGMPOOL_PHYSEXT_INDEX)
4253 {
4254 /* lonely node */
4255 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
4256 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d lonely\n", pPhysPage, pPage->idx));
4257 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
4258 }
4259 else if (iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX)
4260 {
4261 /* head */
4262 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d head\n", pPhysPage, pPage->idx));
4263 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, PGMPOOL_TD_MAKE(PGMPOOL_TD_CREFS_PHYSEXT, iPhysExtNext));
4264 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
4265 }
4266 else
4267 {
4268 /* in list */
4269 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage] idx=%d in list\n", pPhysPage, pPage->idx));
4270 paPhysExts[iPhysExtPrev].iNext = iPhysExtNext;
4271 pgmPoolTrackPhysExtFree(pVM, iPhysExt);
4272 }
4273 iPhysExt = iPhysExtNext;
4274 pgmUnlock(pVM);
4275 return;
4276 }
4277 }
4278
4279 /* next */
4280 iPhysExtPrev = iPhysExt;
4281 iPhysExt = paPhysExts[iPhysExt].iNext;
4282 } while (iPhysExt != NIL_PGMPOOL_PHYSEXT_INDEX);
4283
4284 pgmUnlock(pVM);
4285 AssertFatalMsgFailed(("not-found! cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx));
4286 }
4287 else /* nothing to do */
4288 Log2(("pgmPoolTrackPhysExtDerefGCPhys: pPhysPage=%R[pgmpage]\n", pPhysPage));
4289}
4290
4291/**
4292 * Clear references to guest physical memory.
4293 *
4294 * This is the same as pgmPoolTracDerefGCPhysHint except that the guest
4295 * physical address is assumed to be correct, so the linear search can be
4296 * skipped and we can assert at an earlier point.
4297 *
4298 * @param pPool The pool.
4299 * @param pPage The page.
4300 * @param HCPhys The host physical address corresponding to the guest page.
4301 * @param GCPhys The guest physical address corresponding to HCPhys.
4302 * @param iPte Shadow PTE index
4303 */
4304static void pgmPoolTracDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhys, uint16_t iPte)
4305{
4306 /*
4307 * Lookup the page and check if it checks out before derefing it.
4308 */
4309 PVM pVM = pPool->CTX_SUFF(pVM);
4310 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhys);
4311 if (pPhysPage)
4312 {
4313 Assert(PGM_PAGE_GET_HCPHYS(pPhysPage));
4314#ifdef LOG_ENABLED
4315 RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(pPhysPage);
4316 Log2(("pgmPoolTracDerefGCPhys %RHp vs %RHp\n", HCPhysPage, HCPhys));
4317#endif
4318 if (PGM_PAGE_GET_HCPHYS(pPhysPage) == HCPhys)
4319 {
4320 Assert(pPage->cPresent);
4321 Assert(pPool->cPresent);
4322 pPage->cPresent--;
4323 pPool->cPresent--;
4324 pgmTrackDerefGCPhys(pPool, pPage, pPhysPage, iPte);
4325 return;
4326 }
4327
4328 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp; found page has HCPhys=%RHp\n",
4329 HCPhys, GCPhys, PGM_PAGE_GET_HCPHYS(pPhysPage)));
4330 }
4331 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys));
4332}
4333
4334
4335/**
4336 * Clear references to guest physical memory.
4337 *
4338 * @param pPool The pool.
4339 * @param pPage The page.
4340 * @param HCPhys The host physical address corresponding to the guest page.
4341 * @param GCPhysHint The guest physical address which may corresponding to HCPhys.
4342 * @param iPte Shadow pte index
4343 */
4344void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte)
4345{
4346 Log4(("pgmPoolTracDerefGCPhysHint %RHp %RGp\n", HCPhys, GCPhysHint));
4347
4348 /*
4349 * Try the hint first.
4350 */
4351 RTHCPHYS HCPhysHinted;
4352 PVM pVM = pPool->CTX_SUFF(pVM);
4353 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysHint);
4354 if (pPhysPage)
4355 {
4356 HCPhysHinted = PGM_PAGE_GET_HCPHYS(pPhysPage);
4357 Assert(HCPhysHinted);
4358 if (HCPhysHinted == HCPhys)
4359 {
4360 Assert(pPage->cPresent);
4361 Assert(pPool->cPresent);
4362 pPage->cPresent--;
4363 pPool->cPresent--;
4364 pgmTrackDerefGCPhys(pPool, pPage, pPhysPage, iPte);
4365 return;
4366 }
4367 }
4368 else
4369 HCPhysHinted = UINT64_C(0xdeadbeefdeadbeef);
4370
4371 /*
4372 * Damn, the hint didn't work. We'll have to do an expensive linear search.
4373 */
4374 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches);
4375 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX);
4376 while (pRam)
4377 {
4378 unsigned iPage = pRam->cb >> PAGE_SHIFT;
4379 while (iPage-- > 0)
4380 {
4381 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys)
4382 {
4383 Log4(("pgmPoolTracDerefGCPhysHint: Linear HCPhys=%RHp GCPhysHint=%RGp GCPhysReal=%RGp\n",
4384 HCPhys, GCPhysHint, pRam->GCPhys + (iPage << PAGE_SHIFT)));
4385 Assert(pPage->cPresent);
4386 Assert(pPool->cPresent);
4387 pPage->cPresent--;
4388 pPool->cPresent--;
4389 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte);
4390 return;
4391 }
4392 }
4393 pRam = pRam->CTX_SUFF(pNext);
4394 }
4395
4396 AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp (Hinted page has HCPhys = %RHp)\n", HCPhys, GCPhysHint, HCPhysHinted));
4397}
4398
4399
4400/**
4401 * Clear references to guest physical memory in a 32-bit / 32-bit page table.
4402 *
4403 * @param pPool The pool.
4404 * @param pPage The page.
4405 * @param pShwPT The shadow page table (mapping of the page).
4406 * @param pGstPT The guest page table.
4407 */
4408DECLINLINE(void) pgmPoolTrackDerefPT32Bit32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT, PCX86PT pGstPT)
4409{
4410 RTGCPHYS32 const fPgMask = pPage->fA20Enabled ? X86_PTE_PG_MASK : X86_PTE_PG_MASK & ~RT_BIT_32(20);
4411 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4412 {
4413 Assert(!(pShwPT->a[i].u & RT_BIT_32(10)));
4414 if (pShwPT->a[i].n.u1Present)
4415 {
4416 Log4(("pgmPoolTrackDerefPT32Bit32Bit: i=%d pte=%RX32 hint=%RX32\n",
4417 i, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & X86_PTE_PG_MASK));
4418 pgmPoolTracDerefGCPhysHint(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, pGstPT->a[i].u & fPgMask, i);
4419 if (!pPage->cPresent)
4420 break;
4421 }
4422 }
4423}
4424
4425
4426/**
4427 * Clear references to guest physical memory in a PAE / 32-bit page table.
4428 *
4429 * @param pPool The pool.
4430 * @param pPage The page.
4431 * @param pShwPT The shadow page table (mapping of the page).
4432 * @param pGstPT The guest page table (just a half one).
4433 */
4434DECLINLINE(void) pgmPoolTrackDerefPTPae32Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PT pGstPT)
4435{
4436 RTGCPHYS32 const fPgMask = pPage->fA20Enabled ? X86_PTE_PG_MASK : X86_PTE_PG_MASK & ~RT_BIT_32(20);
4437 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4438 {
4439 Assert( (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == 0
4440 || (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == UINT64_C(0x7ff0000000000000));
4441 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4442 {
4443 Log4(("pgmPoolTrackDerefPTPae32Bit: i=%d pte=%RX64 hint=%RX32\n",
4444 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PG_MASK));
4445 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & fPgMask, i);
4446 if (!pPage->cPresent)
4447 break;
4448 }
4449 }
4450}
4451
4452
4453/**
4454 * Clear references to guest physical memory in a PAE / PAE page table.
4455 *
4456 * @param pPool The pool.
4457 * @param pPage The page.
4458 * @param pShwPT The shadow page table (mapping of the page).
4459 * @param pGstPT The guest page table.
4460 */
4461DECLINLINE(void) pgmPoolTrackDerefPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT, PCX86PTPAE pGstPT)
4462{
4463 RTGCPHYS const fPgMask = pPage->fA20Enabled ? X86_PTE_PAE_PG_MASK : X86_PTE_PAE_PG_MASK & ~RT_BIT_64(20);
4464 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++)
4465 {
4466 Assert( (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == 0
4467 || (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == UINT64_C(0x7ff0000000000000));
4468 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4469 {
4470 Log4(("pgmPoolTrackDerefPTPaePae: i=%d pte=%RX32 hint=%RX32\n",
4471 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK));
4472 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), pGstPT->a[i].u & fPgMask, i);
4473 if (!pPage->cPresent)
4474 break;
4475 }
4476 }
4477}
4478
4479
4480/**
4481 * Clear references to guest physical memory in a 32-bit / 4MB page table.
4482 *
4483 * @param pPool The pool.
4484 * @param pPage The page.
4485 * @param pShwPT The shadow page table (mapping of the page).
4486 */
4487DECLINLINE(void) pgmPoolTrackDerefPT32Bit4MB(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PT pShwPT)
4488{
4489 RTGCPHYS const GCPhysA20Mask = pPage->fA20Enabled ? UINT64_MAX : ~RT_BIT_64(20);
4490 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4491 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4492 {
4493 Assert(!(pShwPT->a[i].u & RT_BIT_32(10)));
4494 if (pShwPT->a[i].n.u1Present)
4495 {
4496 Log4(("pgmPoolTrackDerefPT32Bit4MB: i=%d pte=%RX32 GCPhys=%RGp\n",
4497 i, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys));
4498 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & X86_PTE_PG_MASK, GCPhys & GCPhysA20Mask, i);
4499 if (!pPage->cPresent)
4500 break;
4501 }
4502 }
4503}
4504
4505
4506/**
4507 * Clear references to guest physical memory in a PAE / 2/4MB page table.
4508 *
4509 * @param pPool The pool.
4510 * @param pPage The page.
4511 * @param pShwPT The shadow page table (mapping of the page).
4512 */
4513DECLINLINE(void) pgmPoolTrackDerefPTPaeBig(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMSHWPTPAE pShwPT)
4514{
4515 RTGCPHYS const GCPhysA20Mask = pPage->fA20Enabled ? UINT64_MAX : ~RT_BIT_64(20);
4516 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4517 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4518 {
4519 Assert( (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == 0
4520 || (PGMSHWPTEPAE_GET_U(pShwPT->a[i]) & UINT64_C(0x7ff0000000000400)) == UINT64_C(0x7ff0000000000000));
4521 if (PGMSHWPTEPAE_IS_P(pShwPT->a[i]))
4522 {
4523 Log4(("pgmPoolTrackDerefPTPaeBig: i=%d pte=%RX64 hint=%RGp\n",
4524 i, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys));
4525 pgmPoolTracDerefGCPhys(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[i]), GCPhys & GCPhysA20Mask, i);
4526 if (!pPage->cPresent)
4527 break;
4528 }
4529 }
4530}
4531
4532
4533/**
4534 * Clear references to shadowed pages in an EPT page table.
4535 *
4536 * @param pPool The pool.
4537 * @param pPage The page.
4538 * @param pShwPT The shadow page directory pointer table (mapping of the
4539 * page).
4540 */
4541DECLINLINE(void) pgmPoolTrackDerefPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPT pShwPT)
4542{
4543 RTGCPHYS const GCPhysA20Mask = pPage->fA20Enabled ? UINT64_MAX : ~RT_BIT_64(20);
4544 RTGCPHYS GCPhys = pPage->GCPhys + PAGE_SIZE * pPage->iFirstPresent;
4545 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pShwPT->a); i++, GCPhys += PAGE_SIZE)
4546 {
4547 Assert((pShwPT->a[i].u & UINT64_C(0xfff0000000000f80)) == 0);
4548 if (pShwPT->a[i].n.u1Present)
4549 {
4550 Log4(("pgmPoolTrackDerefPTEPT: i=%d pte=%RX64 GCPhys=%RX64\n",
4551 i, pShwPT->a[i].u & EPT_PTE_PG_MASK, pPage->GCPhys));
4552 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPT->a[i].u & EPT_PTE_PG_MASK, GCPhys & GCPhysA20Mask, i);
4553 if (!pPage->cPresent)
4554 break;
4555 }
4556 }
4557}
4558
4559
4560/**
4561 * Clear references to shadowed pages in a 32 bits page directory.
4562 *
4563 * @param pPool The pool.
4564 * @param pPage The page.
4565 * @param pShwPD The shadow page directory (mapping of the page).
4566 */
4567DECLINLINE(void) pgmPoolTrackDerefPD(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PD pShwPD)
4568{
4569 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4570 {
4571 Assert(!(pShwPD->a[i].u & RT_BIT_32(9)));
4572 if ( pShwPD->a[i].n.u1Present
4573 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
4574 )
4575 {
4576 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK);
4577 if (pSubPage)
4578 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4579 else
4580 AssertFatalMsgFailed(("%x\n", pShwPD->a[i].u & X86_PDE_PG_MASK));
4581 }
4582 }
4583}
4584
4585
4586/**
4587 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
4588 *
4589 * @param pPool The pool.
4590 * @param pPage The page.
4591 * @param pShwPD The shadow page directory (mapping of the page).
4592 */
4593DECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD)
4594{
4595 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4596 {
4597 if ( pShwPD->a[i].n.u1Present
4598 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING))
4599 {
4600#ifdef PGM_WITH_LARGE_PAGES
4601 if (pShwPD->a[i].b.u1Size)
4602 {
4603 Log4(("pgmPoolTrackDerefPDPae: i=%d pde=%RX64 GCPhys=%RX64\n",
4604 i, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys));
4605 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK,
4606 pPage->GCPhys + i * 2 * _1M /* pPage->GCPhys = base address of the memory described by the PD */,
4607 i);
4608 }
4609 else
4610#endif
4611 {
4612 Assert((pShwPD->a[i].u & (X86_PDE_PAE_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0);
4613 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK);
4614 if (pSubPage)
4615 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4616 else
4617 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & X86_PDE_PAE_PG_MASK));
4618 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4619 }
4620 }
4621 }
4622}
4623
4624
4625/**
4626 * Clear references to shadowed pages in a PAE page directory pointer table.
4627 *
4628 * @param pPool The pool.
4629 * @param pPage The page.
4630 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4631 */
4632DECLINLINE(void) pgmPoolTrackDerefPDPTPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4633{
4634 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
4635 {
4636 Assert((pShwPDPT->a[i].u & (X86_PDPE_PAE_MBZ_MASK | UINT64_C(0x7ff0000000000200))) == 0);
4637 if ( pShwPDPT->a[i].n.u1Present
4638 && !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING)
4639 )
4640 {
4641 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4642 if (pSubPage)
4643 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4644 else
4645 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4646 }
4647 }
4648}
4649
4650
4651/**
4652 * Clear references to shadowed pages in a 64-bit page directory pointer table.
4653 *
4654 * @param pPool The pool.
4655 * @param pPage The page.
4656 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4657 */
4658DECLINLINE(void) pgmPoolTrackDerefPDPT64Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPT pShwPDPT)
4659{
4660 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4661 {
4662 Assert((pShwPDPT->a[i].u & (X86_PDPE_LM_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0);
4663 if (pShwPDPT->a[i].n.u1Present)
4664 {
4665 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
4666 if (pSubPage)
4667 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4668 else
4669 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & X86_PDPE_PG_MASK));
4670 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4671 }
4672 }
4673}
4674
4675
4676/**
4677 * Clear references to shadowed pages in a 64-bit level 4 page table.
4678 *
4679 * @param pPool The pool.
4680 * @param pPage The page.
4681 * @param pShwPML4 The shadow page directory pointer table (mapping of the page).
4682 */
4683DECLINLINE(void) pgmPoolTrackDerefPML464Bit(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PML4 pShwPML4)
4684{
4685 for (unsigned i = 0; i < RT_ELEMENTS(pShwPML4->a); i++)
4686 {
4687 Assert((pShwPML4->a[i].u & (X86_PML4E_MBZ_MASK_NX | UINT64_C(0x7ff0000000000200))) == 0);
4688 if (pShwPML4->a[i].n.u1Present)
4689 {
4690 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPML4->a[i].u & X86_PDPE_PG_MASK);
4691 if (pSubPage)
4692 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4693 else
4694 AssertFatalMsgFailed(("%RX64\n", pShwPML4->a[i].u & X86_PML4E_PG_MASK));
4695 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4696 }
4697 }
4698}
4699
4700
4701/**
4702 * Clear references to shadowed pages in an EPT page directory.
4703 *
4704 * @param pPool The pool.
4705 * @param pPage The page.
4706 * @param pShwPD The shadow page directory (mapping of the page).
4707 */
4708DECLINLINE(void) pgmPoolTrackDerefPDEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPD pShwPD)
4709{
4710 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
4711 {
4712 Assert((pShwPD->a[i].u & UINT64_C(0xfff0000000000f80)) == 0);
4713 if (pShwPD->a[i].n.u1Present)
4714 {
4715#ifdef PGM_WITH_LARGE_PAGES
4716 if (pShwPD->a[i].b.u1Size)
4717 {
4718 Log4(("pgmPoolTrackDerefPDEPT: i=%d pde=%RX64 GCPhys=%RX64\n",
4719 i, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK, pPage->GCPhys));
4720 pgmPoolTracDerefGCPhys(pPool, pPage, pShwPD->a[i].u & X86_PDE2M_PAE_PG_MASK,
4721 pPage->GCPhys + i * 2 * _1M /* pPage->GCPhys = base address of the memory described by the PD */,
4722 i);
4723 }
4724 else
4725#endif
4726 {
4727 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & EPT_PDE_PG_MASK);
4728 if (pSubPage)
4729 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4730 else
4731 AssertFatalMsgFailed(("%RX64\n", pShwPD->a[i].u & EPT_PDE_PG_MASK));
4732 }
4733 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4734 }
4735 }
4736}
4737
4738
4739/**
4740 * Clear references to shadowed pages in an EPT page directory pointer table.
4741 *
4742 * @param pPool The pool.
4743 * @param pPage The page.
4744 * @param pShwPDPT The shadow page directory pointer table (mapping of the page).
4745 */
4746DECLINLINE(void) pgmPoolTrackDerefPDPTEPT(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PEPTPDPT pShwPDPT)
4747{
4748 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
4749 {
4750 Assert((pShwPDPT->a[i].u & UINT64_C(0xfff0000000000f80)) == 0);
4751 if (pShwPDPT->a[i].n.u1Present)
4752 {
4753 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK);
4754 if (pSubPage)
4755 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
4756 else
4757 AssertFatalMsgFailed(("%RX64\n", pShwPDPT->a[i].u & EPT_PDPTE_PG_MASK));
4758 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
4759 }
4760 }
4761}
4762
4763
4764/**
4765 * Clears all references made by this page.
4766 *
4767 * This includes other shadow pages and GC physical addresses.
4768 *
4769 * @param pPool The pool.
4770 * @param pPage The page.
4771 */
4772static void pgmPoolTrackDeref(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4773{
4774 /*
4775 * Map the shadow page and take action according to the page kind.
4776 */
4777 PVM pVM = pPool->CTX_SUFF(pVM);
4778 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage);
4779 switch (pPage->enmKind)
4780 {
4781 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
4782 {
4783 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4784 void *pvGst;
4785 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4786 pgmPoolTrackDerefPT32Bit32Bit(pPool, pPage, (PX86PT)pvShw, (PCX86PT)pvGst);
4787 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4788 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4789 break;
4790 }
4791
4792 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
4793 {
4794 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4795 void *pvGst;
4796 int rc = PGM_GCPHYS_2_PTR_EX(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4797 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PT)pvGst);
4798 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4799 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4800 break;
4801 }
4802
4803 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
4804 {
4805 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4806 void *pvGst;
4807 int rc = PGM_GCPHYS_2_PTR(pVM, pPage->GCPhys, &pvGst); AssertReleaseRC(rc);
4808 pgmPoolTrackDerefPTPaePae(pPool, pPage, (PPGMSHWPTPAE)pvShw, (PCX86PTPAE)pvGst);
4809 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvGst);
4810 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4811 break;
4812 }
4813
4814 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: /* treat it like a 4 MB page */
4815 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
4816 {
4817 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4818 pgmPoolTrackDerefPT32Bit4MB(pPool, pPage, (PX86PT)pvShw);
4819 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4820 break;
4821 }
4822
4823 case PGMPOOLKIND_PAE_PT_FOR_PHYS: /* treat it like a 2 MB page */
4824 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
4825 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
4826 {
4827 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g);
4828 pgmPoolTrackDerefPTPaeBig(pPool, pPage, (PPGMSHWPTPAE)pvShw);
4829 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g);
4830 break;
4831 }
4832
4833 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
4834 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
4835 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
4836 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
4837 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
4838 case PGMPOOLKIND_PAE_PD_PHYS:
4839 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
4840 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
4841 pgmPoolTrackDerefPDPae(pPool, pPage, (PX86PDPAE)pvShw);
4842 break;
4843
4844 case PGMPOOLKIND_32BIT_PD_PHYS:
4845 case PGMPOOLKIND_32BIT_PD:
4846 pgmPoolTrackDerefPD(pPool, pPage, (PX86PD)pvShw);
4847 break;
4848
4849 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
4850 case PGMPOOLKIND_PAE_PDPT:
4851 case PGMPOOLKIND_PAE_PDPT_PHYS:
4852 pgmPoolTrackDerefPDPTPae(pPool, pPage, (PX86PDPT)pvShw);
4853 break;
4854
4855 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
4856 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
4857 pgmPoolTrackDerefPDPT64Bit(pPool, pPage, (PX86PDPT)pvShw);
4858 break;
4859
4860 case PGMPOOLKIND_64BIT_PML4:
4861 pgmPoolTrackDerefPML464Bit(pPool, pPage, (PX86PML4)pvShw);
4862 break;
4863
4864 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
4865 pgmPoolTrackDerefPTEPT(pPool, pPage, (PEPTPT)pvShw);
4866 break;
4867
4868 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
4869 pgmPoolTrackDerefPDEPT(pPool, pPage, (PEPTPD)pvShw);
4870 break;
4871
4872 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
4873 pgmPoolTrackDerefPDPTEPT(pPool, pPage, (PEPTPDPT)pvShw);
4874 break;
4875
4876 default:
4877 AssertFatalMsgFailed(("enmKind=%d\n", pPage->enmKind));
4878 }
4879
4880 /* paranoia, clear the shadow page. Remove this laser (i.e. let Alloc and ClearAll do it). */
4881 STAM_PROFILE_START(&pPool->StatZeroPage, z);
4882 ASMMemZeroPage(pvShw);
4883 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
4884 pPage->fZeroed = true;
4885 Assert(!pPage->cPresent);
4886 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pvShw);
4887}
4888
4889
4890/**
4891 * Flushes a pool page.
4892 *
4893 * This moves the page to the free list after removing all user references to it.
4894 *
4895 * @returns VBox status code.
4896 * @retval VINF_SUCCESS on success.
4897 * @param pPool The pool.
4898 * @param pPage The shadow page.
4899 * @param fFlush Flush the TLBS when required (should only be false in very specific use cases!!)
4900 */
4901int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush)
4902{
4903 PVM pVM = pPool->CTX_SUFF(pVM);
4904 bool fFlushRequired = false;
4905
4906 int rc = VINF_SUCCESS;
4907 STAM_PROFILE_START(&pPool->StatFlushPage, f);
4908 LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
4909 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
4910
4911 /*
4912 * Reject any attempts at flushing any of the special root pages (shall
4913 * not happen).
4914 */
4915 AssertMsgReturn(pPage->idx >= PGMPOOL_IDX_FIRST,
4916 ("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n",
4917 pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx),
4918 VINF_SUCCESS);
4919
4920 pgmLock(pVM);
4921
4922 /*
4923 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
4924 */
4925 if (pgmPoolIsPageLocked(pPage))
4926 {
4927 AssertMsg( pPage->enmKind == PGMPOOLKIND_64BIT_PML4
4928 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
4929 || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
4930 || pPage->enmKind == PGMPOOLKIND_32BIT_PD
4931 || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
4932 || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
4933 || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
4934 || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
4935 || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD
4936 || pPage->enmKind == PGMPOOLKIND_ROOT_NESTED,
4937 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
4938 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
4939 pgmUnlock(pVM);
4940 return VINF_SUCCESS;
4941 }
4942
4943#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
4944 /* Start a subset so we won't run out of mapping space. */
4945 PVMCPU pVCpu = VMMGetCpu(pVM);
4946 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);
4947#endif
4948
4949 /*
4950 * Mark the page as being in need of an ASMMemZeroPage().
4951 */
4952 pPage->fZeroed = false;
4953
4954#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
4955 if (pPage->fDirty)
4956 pgmPoolFlushDirtyPage(pVM, pPool, pPage->idxDirtyEntry, false /* do not remove */);
4957#endif
4958
4959 /* If there are any users of this table, then we *must* issue a tlb flush on all VCPUs. */
4960 if (pPage->iUserHead != NIL_PGMPOOL_USER_INDEX)
4961 fFlushRequired = true;
4962
4963 /*
4964 * Clear the page.
4965 */
4966 pgmPoolTrackClearPageUsers(pPool, pPage);
4967 STAM_PROFILE_START(&pPool->StatTrackDeref,a);
4968 pgmPoolTrackDeref(pPool, pPage);
4969 STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
4970
4971 /*
4972 * Flush it from the cache.
4973 */
4974 pgmPoolCacheFlushPage(pPool, pPage);
4975
4976#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
4977 /* Heavy stuff done. */
4978 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);
4979#endif
4980
4981 /*
4982 * Deregistering the monitoring.
4983 */
4984 if (pPage->fMonitored)
4985 rc = pgmPoolMonitorFlush(pPool, pPage);
4986
4987 /*
4988 * Free the page.
4989 */
4990 Assert(pPage->iNext == NIL_PGMPOOL_IDX);
4991 pPage->iNext = pPool->iFreeHead;
4992 pPool->iFreeHead = pPage->idx;
4993 pPage->enmKind = PGMPOOLKIND_FREE;
4994 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
4995 pPage->GCPhys = NIL_RTGCPHYS;
4996 pPage->fReusedFlushPending = false;
4997
4998 pPool->cUsedPages--;
4999
5000 /* Flush the TLBs of all VCPUs if required. */
5001 if ( fFlushRequired
5002 && fFlush)
5003 {
5004 PGM_INVL_ALL_VCPU_TLBS(pVM);
5005 }
5006
5007 pgmUnlock(pVM);
5008 STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
5009 return rc;
5010}
5011
5012
5013/**
5014 * Frees a usage of a pool page.
5015 *
5016 * The caller is responsible to updating the user table so that it no longer
5017 * references the shadow page.
5018 *
5019 * @param pPool The pool.
5020 * @param pPage The shadow page.
5021 * @param iUser The shadow page pool index of the user table.
5022 * NIL_PGMPOOL_IDX for root pages.
5023 * @param iUserTable The index into the user table (shadowed). Ignored if
5024 * root page.
5025 */
5026void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
5027{
5028 PVM pVM = pPool->CTX_SUFF(pVM);
5029
5030 STAM_PROFILE_START(&pPool->StatFree, a);
5031 LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%d iUserTable=%#x\n",
5032 pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
5033 AssertReturnVoid(pPage->idx >= PGMPOOL_IDX_FIRST); /* paranoia (#6349) */
5034
5035 pgmLock(pVM);
5036 if (iUser != NIL_PGMPOOL_IDX)
5037 pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
5038 if (!pPage->fCached)
5039 pgmPoolFlushPage(pPool, pPage);
5040 pgmUnlock(pVM);
5041 STAM_PROFILE_STOP(&pPool->StatFree, a);
5042}
5043
5044
5045/**
5046 * Makes one or more free page free.
5047 *
5048 * @returns VBox status code.
5049 * @retval VINF_SUCCESS on success.
5050 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
5051 *
5052 * @param pPool The pool.
5053 * @param enmKind Page table kind
5054 * @param iUser The user of the page.
5055 */
5056static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
5057{
5058 PVM pVM = pPool->CTX_SUFF(pVM);
5059 LogFlow(("pgmPoolMakeMoreFreePages: enmKind=%d iUser=%d\n", enmKind, iUser));
5060 NOREF(enmKind);
5061
5062 /*
5063 * If the pool isn't full grown yet, expand it.
5064 */
5065 if ( pPool->cCurPages < pPool->cMaxPages
5066#if defined(IN_RC)
5067 /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
5068 && enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
5069 && (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
5070#endif
5071 )
5072 {
5073 STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
5074#ifdef IN_RING3
5075 int rc = PGMR3PoolGrow(pVM);
5076#else
5077 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_POOL_GROW, 0);
5078#endif
5079 if (RT_FAILURE(rc))
5080 return rc;
5081 STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
5082 if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
5083 return VINF_SUCCESS;
5084 }
5085
5086 /*
5087 * Free one cached page.
5088 */
5089 return pgmPoolCacheFreeOne(pPool, iUser);
5090}
5091
5092
5093/**
5094 * Allocates a page from the pool.
5095 *
5096 * This page may actually be a cached page and not in need of any processing
5097 * on the callers part.
5098 *
5099 * @returns VBox status code.
5100 * @retval VINF_SUCCESS if a NEW page was allocated.
5101 * @retval VINF_PGM_CACHED_PAGE if a CACHED page was returned.
5102 * @retval VERR_PGM_POOL_FLUSHED if the pool was flushed.
5103 *
5104 * @param pVM The cross context VM structure.
5105 * @param GCPhys The GC physical address of the page we're gonna shadow.
5106 * For 4MB and 2MB PD entries, it's the first address the
5107 * shadow PT is covering.
5108 * @param enmKind The kind of mapping.
5109 * @param enmAccess Access type for the mapping (only relevant for big pages)
5110 * @param fA20Enabled Whether the A20 gate is enabled or not.
5111 * @param iUser The shadow page pool index of the user table. Root
5112 * pages should pass NIL_PGMPOOL_IDX.
5113 * @param iUserTable The index into the user table (shadowed). Ignored for
5114 * root pages (iUser == NIL_PGMPOOL_IDX).
5115 * @param fLockPage Lock the page
5116 * @param ppPage Where to store the pointer to the page. NULL is stored here on failure.
5117 */
5118int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled,
5119 uint16_t iUser, uint32_t iUserTable, bool fLockPage, PPPGMPOOLPAGE ppPage)
5120{
5121 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
5122 STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
5123 LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%d iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
5124 *ppPage = NULL;
5125 /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
5126 * (TRPMR3SyncIDT) because of FF priority. Try fix that?
5127 * Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */
5128
5129 pgmLock(pVM);
5130
5131 if (pPool->fCacheEnabled)
5132 {
5133 int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, enmAccess, fA20Enabled, iUser, iUserTable, ppPage);
5134 if (RT_SUCCESS(rc2))
5135 {
5136 if (fLockPage)
5137 pgmPoolLockPage(pPool, *ppPage);
5138 pgmUnlock(pVM);
5139 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
5140 LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
5141 return rc2;
5142 }
5143 }
5144
5145 /*
5146 * Allocate a new one.
5147 */
5148 int rc = VINF_SUCCESS;
5149 uint16_t iNew = pPool->iFreeHead;
5150 if (iNew == NIL_PGMPOOL_IDX)
5151 {
5152 rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
5153 if (RT_FAILURE(rc))
5154 {
5155 pgmUnlock(pVM);
5156 Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
5157 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
5158 return rc;
5159 }
5160 iNew = pPool->iFreeHead;
5161 AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_PGM_POOL_IPE);
5162 }
5163
5164 /* unlink the free head */
5165 PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
5166 pPool->iFreeHead = pPage->iNext;
5167 pPage->iNext = NIL_PGMPOOL_IDX;
5168
5169 /*
5170 * Initialize it.
5171 */
5172 pPool->cUsedPages++; /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
5173 pPage->enmKind = enmKind;
5174 pPage->enmAccess = enmAccess;
5175 pPage->GCPhys = GCPhys;
5176 pPage->fA20Enabled = fA20Enabled;
5177 pPage->fSeenNonGlobal = false; /* Set this to 'true' to disable this feature. */
5178 pPage->fMonitored = false;
5179 pPage->fCached = false;
5180 pPage->fDirty = false;
5181 pPage->fReusedFlushPending = false;
5182 pPage->cModifications = 0;
5183 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
5184 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
5185 pPage->cPresent = 0;
5186 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
5187 pPage->idxDirtyEntry = 0;
5188 pPage->GCPtrLastAccessHandlerFault = NIL_RTGCPTR;
5189 pPage->GCPtrLastAccessHandlerRip = NIL_RTGCPTR;
5190 pPage->cLastAccessHandler = 0;
5191 pPage->cLocked = 0;
5192# ifdef VBOX_STRICT
5193 pPage->GCPtrDirtyFault = NIL_RTGCPTR;
5194# endif
5195
5196 /*
5197 * Insert into the tracking and cache. If this fails, free the page.
5198 */
5199 int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
5200 if (RT_FAILURE(rc3))
5201 {
5202 pPool->cUsedPages--;
5203 pPage->enmKind = PGMPOOLKIND_FREE;
5204 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
5205 pPage->GCPhys = NIL_RTGCPHYS;
5206 pPage->iNext = pPool->iFreeHead;
5207 pPool->iFreeHead = pPage->idx;
5208 pgmUnlock(pVM);
5209 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
5210 Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
5211 return rc3;
5212 }
5213
5214 /*
5215 * Commit the allocation, clear the page and return.
5216 */
5217#ifdef VBOX_WITH_STATISTICS
5218 if (pPool->cUsedPages > pPool->cUsedPagesHigh)
5219 pPool->cUsedPagesHigh = pPool->cUsedPages;
5220#endif
5221
5222 if (!pPage->fZeroed)
5223 {
5224 STAM_PROFILE_START(&pPool->StatZeroPage, z);
5225 void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
5226 ASMMemZeroPage(pv);
5227 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
5228 }
5229
5230 *ppPage = pPage;
5231 if (fLockPage)
5232 pgmPoolLockPage(pPool, pPage);
5233 pgmUnlock(pVM);
5234 LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
5235 rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
5236 STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
5237 return rc;
5238}
5239
5240
5241/**
5242 * Frees a usage of a pool page.
5243 *
5244 * @param pVM The cross context VM structure.
5245 * @param HCPhys The HC physical address of the shadow page.
5246 * @param iUser The shadow page pool index of the user table.
5247 * NIL_PGMPOOL_IDX if root page.
5248 * @param iUserTable The index into the user table (shadowed). Ignored if
5249 * root page.
5250 */
5251void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
5252{
5253 LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%d iUserTable=%#x\n", HCPhys, iUser, iUserTable));
5254 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
5255 pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
5256}
5257
5258
5259/**
5260 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
5261 *
5262 * @returns Pointer to the shadow page structure.
5263 * @param pPool The pool.
5264 * @param HCPhys The HC physical address of the shadow page.
5265 */
5266PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
5267{
5268 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
5269
5270 /*
5271 * Look up the page.
5272 */
5273 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
5274
5275 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
5276 return pPage;
5277}
5278
5279
5280/**
5281 * Internal worker for finding a page for debugging purposes, no assertions.
5282 *
5283 * @returns Pointer to the shadow page structure. NULL on if not found.
5284 * @param pPool The pool.
5285 * @param HCPhys The HC physical address of the shadow page.
5286 */
5287PPGMPOOLPAGE pgmPoolQueryPageForDbg(PPGMPOOL pPool, RTHCPHYS HCPhys)
5288{
5289 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
5290 return (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
5291}
5292
5293#ifdef IN_RING3 /* currently only used in ring 3; save some space in the R0 & GC modules (left it here as we might need it elsewhere later on) */
5294
5295/**
5296 * Flush the specified page if present
5297 *
5298 * @param pVM The cross context VM structure.
5299 * @param GCPhys Guest physical address of the page to flush
5300 */
5301void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys)
5302{
5303 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
5304
5305 VM_ASSERT_EMT(pVM);
5306
5307 /*
5308 * Look up the GCPhys in the hash.
5309 */
5310 GCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
5311 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)];
5312 if (i == NIL_PGMPOOL_IDX)
5313 return;
5314
5315 do
5316 {
5317 PPGMPOOLPAGE pPage = &pPool->aPages[i];
5318 if (pPage->GCPhys - GCPhys < PAGE_SIZE)
5319 {
5320 switch (pPage->enmKind)
5321 {
5322 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
5323 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
5324 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
5325 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
5326 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
5327 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
5328 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
5329 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
5330 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
5331 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
5332 case PGMPOOLKIND_64BIT_PML4:
5333 case PGMPOOLKIND_32BIT_PD:
5334 case PGMPOOLKIND_PAE_PDPT:
5335 {
5336 Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys));
5337#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5338 if (pPage->fDirty)
5339 STAM_COUNTER_INC(&pPool->StatForceFlushDirtyPage);
5340 else
5341#endif
5342 STAM_COUNTER_INC(&pPool->StatForceFlushPage);
5343 Assert(!pgmPoolIsPageLocked(pPage));
5344 pgmPoolMonitorChainFlush(pPool, pPage);
5345 return;
5346 }
5347
5348 /* ignore, no monitoring. */
5349 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
5350 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
5351 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
5352 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
5353 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
5354 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
5355 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
5356 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
5357 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
5358 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
5359 case PGMPOOLKIND_ROOT_NESTED:
5360 case PGMPOOLKIND_PAE_PD_PHYS:
5361 case PGMPOOLKIND_PAE_PDPT_PHYS:
5362 case PGMPOOLKIND_32BIT_PD_PHYS:
5363 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
5364 break;
5365
5366 default:
5367 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx));
5368 }
5369 }
5370
5371 /* next */
5372 i = pPage->iNext;
5373 } while (i != NIL_PGMPOOL_IDX);
5374 return;
5375}
5376
5377#endif /* IN_RING3 */
5378#ifdef IN_RING3
5379
5380/**
5381 * Reset CPU on hot plugging.
5382 *
5383 * @param pVM The cross context VM structure.
5384 * @param pVCpu The cross context virtual CPU structure.
5385 */
5386void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu)
5387{
5388 pgmR3ExitShadowModeBeforePoolFlush(pVCpu);
5389
5390 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
5391 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
5392 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5393}
5394
5395
5396/**
5397 * Flushes the entire cache.
5398 *
5399 * It will assert a global CR3 flush (FF) and assumes the caller is aware of
5400 * this and execute this CR3 flush.
5401 *
5402 * @param pVM The cross context VM structure.
5403 */
5404void pgmR3PoolReset(PVM pVM)
5405{
5406 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
5407
5408 PGM_LOCK_ASSERT_OWNER(pVM);
5409 STAM_PROFILE_START(&pPool->StatR3Reset, a);
5410 LogFlow(("pgmR3PoolReset:\n"));
5411
5412 /*
5413 * If there are no pages in the pool, there is nothing to do.
5414 */
5415 if (pPool->cCurPages <= PGMPOOL_IDX_FIRST)
5416 {
5417 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
5418 return;
5419 }
5420
5421 /*
5422 * Exit the shadow mode since we're going to clear everything,
5423 * including the root page.
5424 */
5425 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5426 pgmR3ExitShadowModeBeforePoolFlush(&pVM->aCpus[i]);
5427
5428 /*
5429 * Nuke the free list and reinsert all pages into it.
5430 */
5431 for (unsigned i = pPool->cCurPages - 1; i >= PGMPOOL_IDX_FIRST; i--)
5432 {
5433 PPGMPOOLPAGE pPage = &pPool->aPages[i];
5434
5435 Assert(pPage->Core.Key == MMPage2Phys(pVM, pPage->pvPageR3));
5436 if (pPage->fMonitored)
5437 pgmPoolMonitorFlush(pPool, pPage);
5438 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
5439 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
5440 pPage->iMonitoredNext = NIL_PGMPOOL_IDX;
5441 pPage->iMonitoredPrev = NIL_PGMPOOL_IDX;
5442 pPage->cModifications = 0;
5443 pPage->GCPhys = NIL_RTGCPHYS;
5444 pPage->enmKind = PGMPOOLKIND_FREE;
5445 pPage->enmAccess = PGMPOOLACCESS_DONTCARE;
5446 Assert(pPage->idx == i);
5447 pPage->iNext = i + 1;
5448 pPage->fA20Enabled = true;
5449 pPage->fZeroed = false; /* This could probably be optimized, but better safe than sorry. */
5450 pPage->fSeenNonGlobal = false;
5451 pPage->fMonitored = false;
5452 pPage->fDirty = false;
5453 pPage->fCached = false;
5454 pPage->fReusedFlushPending = false;
5455 pPage->iUserHead = NIL_PGMPOOL_USER_INDEX;
5456 pPage->iAgeNext = NIL_PGMPOOL_IDX;
5457 pPage->iAgePrev = NIL_PGMPOOL_IDX;
5458 pPage->GCPtrLastAccessHandlerRip = NIL_RTGCPTR;
5459 pPage->GCPtrLastAccessHandlerFault = NIL_RTGCPTR;
5460 pPage->cLastAccessHandler = 0;
5461 pPage->cLocked = 0;
5462#ifdef VBOX_STRICT
5463 pPage->GCPtrDirtyFault = NIL_RTGCPTR;
5464#endif
5465 }
5466 pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX;
5467 pPool->iFreeHead = PGMPOOL_IDX_FIRST;
5468 pPool->cUsedPages = 0;
5469
5470 /*
5471 * Zap and reinitialize the user records.
5472 */
5473 pPool->cPresent = 0;
5474 pPool->iUserFreeHead = 0;
5475 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers);
5476 const unsigned cMaxUsers = pPool->cMaxUsers;
5477 for (unsigned i = 0; i < cMaxUsers; i++)
5478 {
5479 paUsers[i].iNext = i + 1;
5480 paUsers[i].iUser = NIL_PGMPOOL_IDX;
5481 paUsers[i].iUserTable = 0xfffffffe;
5482 }
5483 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
5484
5485 /*
5486 * Clear all the GCPhys links and rebuild the phys ext free list.
5487 */
5488 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
5489 pRam;
5490 pRam = pRam->CTX_SUFF(pNext))
5491 {
5492 unsigned iPage = pRam->cb >> PAGE_SHIFT;
5493 while (iPage-- > 0)
5494 PGM_PAGE_SET_TRACKING(pVM, &pRam->aPages[iPage], 0);
5495 }
5496
5497 pPool->iPhysExtFreeHead = 0;
5498 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
5499 const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
5500 for (unsigned i = 0; i < cMaxPhysExts; i++)
5501 {
5502 paPhysExts[i].iNext = i + 1;
5503 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
5504 paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5505 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
5506 paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5507 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
5508 paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
5509 }
5510 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
5511
5512 /*
5513 * Just zap the modified list.
5514 */
5515 pPool->cModifiedPages = 0;
5516 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
5517
5518 /*
5519 * Clear the GCPhys hash and the age list.
5520 */
5521 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
5522 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
5523 pPool->iAgeHead = NIL_PGMPOOL_IDX;
5524 pPool->iAgeTail = NIL_PGMPOOL_IDX;
5525
5526#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
5527 /* Clear all dirty pages. */
5528 pPool->idxFreeDirtyPage = 0;
5529 pPool->cDirtyPages = 0;
5530 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
5531 pPool->aDirtyPages[i].uIdx = NIL_PGMPOOL_IDX;
5532#endif
5533
5534 /*
5535 * Reinsert active pages into the hash and ensure monitoring chains are correct.
5536 */
5537 for (VMCPUID i = 0; i < pVM->cCpus; i++)
5538 {
5539 /*
5540 * Re-enter the shadowing mode and assert Sync CR3 FF.
5541 */
5542 PVMCPU pVCpu = &pVM->aCpus[i];
5543 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu);
5544 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
5545 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5546 }
5547
5548 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
5549}
5550
5551#endif /* IN_RING3 */
5552
5553#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
5554/**
5555 * Stringifies a PGMPOOLKIND value.
5556 */
5557static const char *pgmPoolPoolKindToStr(uint8_t enmKind)
5558{
5559 switch ((PGMPOOLKIND)enmKind)
5560 {
5561 case PGMPOOLKIND_INVALID:
5562 return "PGMPOOLKIND_INVALID";
5563 case PGMPOOLKIND_FREE:
5564 return "PGMPOOLKIND_FREE";
5565 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
5566 return "PGMPOOLKIND_32BIT_PT_FOR_PHYS";
5567 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
5568 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT";
5569 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
5570 return "PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB";
5571 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
5572 return "PGMPOOLKIND_PAE_PT_FOR_PHYS";
5573 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
5574 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_PT";
5575 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
5576 return "PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB";
5577 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
5578 return "PGMPOOLKIND_PAE_PT_FOR_PAE_PT";
5579 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
5580 return "PGMPOOLKIND_PAE_PT_FOR_PAE_2MB";
5581 case PGMPOOLKIND_32BIT_PD:
5582 return "PGMPOOLKIND_32BIT_PD";
5583 case PGMPOOLKIND_32BIT_PD_PHYS:
5584 return "PGMPOOLKIND_32BIT_PD_PHYS";
5585 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
5586 return "PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD";
5587 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
5588 return "PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD";
5589 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
5590 return "PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD";
5591 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
5592 return "PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD";
5593 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
5594 return "PGMPOOLKIND_PAE_PD_FOR_PAE_PD";
5595 case PGMPOOLKIND_PAE_PD_PHYS:
5596 return "PGMPOOLKIND_PAE_PD_PHYS";
5597 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
5598 return "PGMPOOLKIND_PAE_PDPT_FOR_32BIT";
5599 case PGMPOOLKIND_PAE_PDPT:
5600 return "PGMPOOLKIND_PAE_PDPT";
5601 case PGMPOOLKIND_PAE_PDPT_PHYS:
5602 return "PGMPOOLKIND_PAE_PDPT_PHYS";
5603 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
5604 return "PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT";
5605 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
5606 return "PGMPOOLKIND_64BIT_PDPT_FOR_PHYS";
5607 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD:
5608 return "PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD";
5609 case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
5610 return "PGMPOOLKIND_64BIT_PD_FOR_PHYS";
5611 case PGMPOOLKIND_64BIT_PML4:
5612 return "PGMPOOLKIND_64BIT_PML4";
5613 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
5614 return "PGMPOOLKIND_EPT_PDPT_FOR_PHYS";
5615 case PGMPOOLKIND_EPT_PD_FOR_PHYS:
5616 return "PGMPOOLKIND_EPT_PD_FOR_PHYS";
5617 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
5618 return "PGMPOOLKIND_EPT_PT_FOR_PHYS";
5619 case PGMPOOLKIND_ROOT_NESTED:
5620 return "PGMPOOLKIND_ROOT_NESTED";
5621 }
5622 return "Unknown kind!";
5623}
5624#endif /* LOG_ENABLED || VBOX_STRICT */
5625
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette