VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 17282

Last change on this file since 17282 was 17279, checked in by vboxsync, 16 years ago

PGM,MM: Attacking the shadow page pool tracking info stored in PGMPPAGE, replacing direct access with PGM_PAGE_SET/GET_XXXX access. Required to get rid of MM_RAM_FLAGS_* and to be able to restructure PGMPAGE. MM_RAM_FLAGS_NO_REFS_MASK fully eliminated already.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 198.3 KB
Line 
1/* $Id: PGMInternal.h 17279 2009-03-03 14:05:15Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/avl.h>
40#include <iprt/assert.h>
41#include <iprt/critsect.h>
42
43
44
45/** @defgroup grp_pgm_int Internals
46 * @ingroup grp_pgm
47 * @internal
48 * @{
49 */
50
51
52/** @name PGM Compile Time Config
53 * @{
54 */
55
56/*
57 * Enable to use the PGM pool for all levels in the paging chain in all paging modes.
58 */
59#define VBOX_WITH_PGMPOOL_PAGING_ONLY
60
61/**
62 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
63 * Comment it if it will break something.
64 */
65#define PGM_OUT_OF_SYNC_IN_GC
66
67/**
68 * Check and skip global PDEs for non-global flushes
69 */
70#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
71
72/**
73 * Sync N pages instead of a whole page table
74 */
75#define PGM_SYNC_N_PAGES
76
77/**
78 * Number of pages to sync during a page fault
79 *
80 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
81 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
82 */
83#define PGM_SYNC_NR_PAGES 8
84
85/**
86 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
87 */
88#define PGM_MAX_PHYSCACHE_ENTRIES 64
89#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
90
91/**
92 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
93 */
94#define PGM_PHYSMEMACCESS_CACHING
95
96/** @def PGMPOOL_WITH_CACHE
97 * Enable agressive caching using the page pool.
98 *
99 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
100 */
101#define PGMPOOL_WITH_CACHE
102
103/** @def PGMPOOL_WITH_MIXED_PT_CR3
104 * When defined, we'll deal with 'uncachable' pages.
105 */
106#ifdef PGMPOOL_WITH_CACHE
107# define PGMPOOL_WITH_MIXED_PT_CR3
108#endif
109
110/** @def PGMPOOL_WITH_MONITORING
111 * Monitor the guest pages which are shadowed.
112 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
113 * be enabled as well.
114 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
115 */
116#ifdef PGMPOOL_WITH_CACHE
117# define PGMPOOL_WITH_MONITORING
118#endif
119
120/** @def PGMPOOL_WITH_GCPHYS_TRACKING
121 * Tracking the of shadow pages mapping guest physical pages.
122 *
123 * This is very expensive, the current cache prototype is trying to figure out
124 * whether it will be acceptable with an agressive caching policy.
125 */
126#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
127# define PGMPOOL_WITH_GCPHYS_TRACKING
128#endif
129
130/** @def PGMPOOL_WITH_USER_TRACKING
131 * Tracking users of shadow pages. This is required for the linking of shadow page
132 * tables and physical guest addresses.
133 */
134#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
135# define PGMPOOL_WITH_USER_TRACKING
136#endif
137
138/** @def PGMPOOL_CFG_MAX_GROW
139 * The maximum number of pages to add to the pool in one go.
140 */
141#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
142
143/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
144 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
145 */
146#ifdef VBOX_STRICT
147# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
148#endif
149/** @} */
150
151
152/** @name PDPT and PML4 flags.
153 * These are placed in the three bits available for system programs in
154 * the PDPT and PML4 entries.
155 * @{ */
156/** The entry is a permanent one and it's must always be present.
157 * Never free such an entry. */
158#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
159/** Mapping (hypervisor allocated pagetable). */
160#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
161/** @} */
162
163/** @name Page directory flags.
164 * These are placed in the three bits available for system programs in
165 * the page directory entries.
166 * @{ */
167/** Mapping (hypervisor allocated pagetable). */
168#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
169/** Made read-only to facilitate dirty bit tracking. */
170#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
171/** @} */
172
173/** @name Page flags.
174 * These are placed in the three bits available for system programs in
175 * the page entries.
176 * @{ */
177/** Made read-only to facilitate dirty bit tracking. */
178#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
179
180#ifndef PGM_PTFLAGS_CSAM_VALIDATED
181/** Scanned and approved by CSAM (tm).
182 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
183 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
184#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
185#endif
186
187/** Mark a dynamic map entry (PGMDynMapHCPage) as locked. */
188#define PGM_PTFLAGS_DYN_LOCKED RT_BIT(9)
189
190/** @} */
191
192/** @name Defines used to indicate the shadow and guest paging in the templates.
193 * @{ */
194#define PGM_TYPE_REAL 1
195#define PGM_TYPE_PROT 2
196#define PGM_TYPE_32BIT 3
197#define PGM_TYPE_PAE 4
198#define PGM_TYPE_AMD64 5
199#define PGM_TYPE_NESTED 6
200#define PGM_TYPE_EPT 7
201#define PGM_TYPE_MAX PGM_TYPE_EPT
202/** @} */
203
204/** Macro for checking if the guest is using paging.
205 * @param uGstType PGM_TYPE_*
206 * @param uShwType PGM_TYPE_*
207 * @remark ASSUMES certain order of the PGM_TYPE_* values.
208 */
209#define PGM_WITH_PAGING(uGstType, uShwType) \
210 ( (uGstType) >= PGM_TYPE_32BIT \
211 && (uShwType) != PGM_TYPE_NESTED \
212 && (uShwType) != PGM_TYPE_EPT)
213
214/** Macro for checking if the guest supports the NX bit.
215 * @param uGstType PGM_TYPE_*
216 * @param uShwType PGM_TYPE_*
217 * @remark ASSUMES certain order of the PGM_TYPE_* values.
218 */
219#define PGM_WITH_NX(uGstType, uShwType) \
220 ( (uGstType) >= PGM_TYPE_PAE \
221 && (uShwType) != PGM_TYPE_NESTED \
222 && (uShwType) != PGM_TYPE_EPT)
223
224
225/** @def PGM_HCPHYS_2_PTR
226 * Maps a HC physical page pool address to a virtual address.
227 *
228 * @returns VBox status code.
229 * @param pVM The VM handle.
230 * @param HCPhys The HC physical address to map to a virtual one.
231 * @param ppv Where to store the virtual address. No need to cast this.
232 *
233 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
234 * small page window employeed by that function. Be careful.
235 * @remark There is no need to assert on the result.
236 */
237#ifdef IN_RC
238# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
239 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
240#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
241# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
242 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
243#else
244# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
245 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
246#endif
247
248/** @def PGM_HCPHYS_2_PTR_BY_PGM
249 * Maps a HC physical page pool address to a virtual address.
250 *
251 * @returns VBox status code.
252 * @param pPGM The PGM instance data.
253 * @param HCPhys The HC physical address to map to a virtual one.
254 * @param ppv Where to store the virtual address. No need to cast this.
255 *
256 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
257 * small page window employeed by that function. Be careful.
258 * @remark There is no need to assert on the result.
259 */
260#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
261# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
262 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
263#else
264# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
265 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
266#endif
267
268/** @def PGM_GCPHYS_2_PTR
269 * Maps a GC physical page address to a virtual address.
270 *
271 * @returns VBox status code.
272 * @param pVM The VM handle.
273 * @param GCPhys The GC physical address to map to a virtual one.
274 * @param ppv Where to store the virtual address. No need to cast this.
275 *
276 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
277 * small page window employeed by that function. Be careful.
278 * @remark There is no need to assert on the result.
279 */
280#ifdef IN_RC
281# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
282 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
283#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
284# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
285 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
286#else
287# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
288 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
289#endif
290
291/** @def PGM_GCPHYS_2_PTR_BY_PGM
292 * Maps a GC physical page address to a virtual address.
293 *
294 * @returns VBox status code.
295 * @param pPGM Pointer to the PGM instance data.
296 * @param GCPhys The GC physical address to map to a virtual one.
297 * @param ppv Where to store the virtual address. No need to cast this.
298 *
299 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
300 * small page window employeed by that function. Be careful.
301 * @remark There is no need to assert on the result.
302 */
303#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
304# define PGM_GCPHYS_2_PTR_BY_PGM(pPGM, GCPhys, ppv) \
305 pgmR0DynMapGCPageInlined(pPGM, GCPhys, (void **)(ppv))
306#else
307# define PGM_GCPHYS_2_PTR_BY_PGM(pPGM, GCPhys, ppv) \
308 PGM_GCPHYS_2_PTR(PGM2VM(pPGM), GCPhys, ppv)
309#endif
310
311/** @def PGM_GCPHYS_2_PTR_EX
312 * Maps a unaligned GC physical page address to a virtual address.
313 *
314 * @returns VBox status code.
315 * @param pVM The VM handle.
316 * @param GCPhys The GC physical address to map to a virtual one.
317 * @param ppv Where to store the virtual address. No need to cast this.
318 *
319 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
320 * small page window employeed by that function. Be careful.
321 * @remark There is no need to assert on the result.
322 */
323#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
324# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
325 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
326#else
327# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
328 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
329#endif
330
331/** @def PGM_INVL_PG
332 * Invalidates a page when in GC does nothing in HC.
333 *
334 * @param GCVirt The virtual address of the page to invalidate.
335 */
336#ifdef IN_RC
337# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
338#elif defined(IN_RING0)
339# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
340#else
341# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
342#endif
343
344/** @def PGM_INVL_BIG_PG
345 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
346 *
347 * @param GCVirt The virtual address within the page directory to invalidate.
348 */
349#ifdef IN_RC
350# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
351#elif defined(IN_RING0)
352# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
353#else
354# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
355#endif
356
357/** @def PGM_INVL_GUEST_TLBS()
358 * Invalidates all guest TLBs.
359 */
360#ifdef IN_RC
361# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
362#elif defined(IN_RING0)
363# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
364#else
365# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
366#endif
367
368
369/**
370 * Structure for tracking GC Mappings.
371 *
372 * This structure is used by linked list in both GC and HC.
373 */
374typedef struct PGMMAPPING
375{
376 /** Pointer to next entry. */
377 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
378 /** Pointer to next entry. */
379 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
380 /** Pointer to next entry. */
381 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
382 /** Indicate whether this entry is finalized. */
383 bool fFinalized;
384 /** Start Virtual address. */
385 RTGCPTR GCPtr;
386 /** Last Virtual address (inclusive). */
387 RTGCPTR GCPtrLast;
388 /** Range size (bytes). */
389 RTGCPTR cb;
390 /** Pointer to relocation callback function. */
391 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
392 /** User argument to the callback. */
393 R3PTRTYPE(void *) pvUser;
394 /** Mapping description / name. For easing debugging. */
395 R3PTRTYPE(const char *) pszDesc;
396 /** Number of page tables. */
397 uint32_t cPTs;
398#if HC_ARCH_BITS != GC_ARCH_BITS || GC_ARCH_BITS == 64
399 uint32_t uPadding1; /**< Alignment padding. */
400#endif
401 /** Array of page table mapping data. Each entry
402 * describes one page table. The array can be longer
403 * than the declared length.
404 */
405 struct
406 {
407 /** The HC physical address of the page table. */
408 RTHCPHYS HCPhysPT;
409 /** The HC physical address of the first PAE page table. */
410 RTHCPHYS HCPhysPaePT0;
411 /** The HC physical address of the second PAE page table. */
412 RTHCPHYS HCPhysPaePT1;
413 /** The HC virtual address of the 32-bit page table. */
414 R3PTRTYPE(PX86PT) pPTR3;
415 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
416 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
417 /** The GC virtual address of the 32-bit page table. */
418 RCPTRTYPE(PX86PT) pPTRC;
419 /** The GC virtual address of the two PAE page table. */
420 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
421 /** The GC virtual address of the 32-bit page table. */
422 R0PTRTYPE(PX86PT) pPTR0;
423 /** The GC virtual address of the two PAE page table. */
424 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
425 } aPTs[1];
426} PGMMAPPING;
427/** Pointer to structure for tracking GC Mappings. */
428typedef struct PGMMAPPING *PPGMMAPPING;
429
430
431/**
432 * Physical page access handler structure.
433 *
434 * This is used to keep track of physical address ranges
435 * which are being monitored in some kind of way.
436 */
437typedef struct PGMPHYSHANDLER
438{
439 AVLROGCPHYSNODECORE Core;
440 /** Access type. */
441 PGMPHYSHANDLERTYPE enmType;
442 /** Number of pages to update. */
443 uint32_t cPages;
444 /** Pointer to R3 callback function. */
445 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
446 /** User argument for R3 handlers. */
447 R3PTRTYPE(void *) pvUserR3;
448 /** Pointer to R0 callback function. */
449 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
450 /** User argument for R0 handlers. */
451 R0PTRTYPE(void *) pvUserR0;
452 /** Pointer to GC callback function. */
453 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
454 /** User argument for RC handlers. */
455 RCPTRTYPE(void *) pvUserRC;
456 /** Description / Name. For easing debugging. */
457 R3PTRTYPE(const char *) pszDesc;
458#ifdef VBOX_WITH_STATISTICS
459 /** Profiling of this handler. */
460 STAMPROFILE Stat;
461#endif
462} PGMPHYSHANDLER;
463/** Pointer to a physical page access handler structure. */
464typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
465
466
467/**
468 * Cache node for the physical addresses covered by a virtual handler.
469 */
470typedef struct PGMPHYS2VIRTHANDLER
471{
472 /** Core node for the tree based on physical ranges. */
473 AVLROGCPHYSNODECORE Core;
474 /** Offset from this struct to the PGMVIRTHANDLER structure. */
475 int32_t offVirtHandler;
476 /** Offset of the next alias relative to this one.
477 * Bit 0 is used for indicating whether we're in the tree.
478 * Bit 1 is used for indicating that we're the head node.
479 */
480 int32_t offNextAlias;
481} PGMPHYS2VIRTHANDLER;
482/** Pointer to a phys to virtual handler structure. */
483typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
484
485/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
486 * node is in the tree. */
487#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
488/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
489 * node is in the head of an alias chain.
490 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
491#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
492/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
493#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
494
495
496/**
497 * Virtual page access handler structure.
498 *
499 * This is used to keep track of virtual address ranges
500 * which are being monitored in some kind of way.
501 */
502typedef struct PGMVIRTHANDLER
503{
504 /** Core node for the tree based on virtual ranges. */
505 AVLROGCPTRNODECORE Core;
506 /** Size of the range (in bytes). */
507 RTGCPTR cb;
508 /** Number of cache pages. */
509 uint32_t cPages;
510 /** Access type. */
511 PGMVIRTHANDLERTYPE enmType;
512 /** Pointer to the RC callback function. */
513 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
514#if HC_ARCH_BITS == 64
515 RTRCPTR padding;
516#endif
517 /** Pointer to the R3 callback function for invalidation. */
518 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
519 /** Pointer to the R3 callback function. */
520 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
521 /** Description / Name. For easing debugging. */
522 R3PTRTYPE(const char *) pszDesc;
523#ifdef VBOX_WITH_STATISTICS
524 /** Profiling of this handler. */
525 STAMPROFILE Stat;
526#endif
527 /** Array of cached physical addresses for the monitored ranged. */
528 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
529} PGMVIRTHANDLER;
530/** Pointer to a virtual page access handler structure. */
531typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
532
533
534/**
535 * Page type.
536 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
537 * @todo convert to \#defines.
538 */
539typedef enum PGMPAGETYPE
540{
541 /** The usual invalid zero entry. */
542 PGMPAGETYPE_INVALID = 0,
543 /** RAM page. (RWX) */
544 PGMPAGETYPE_RAM,
545 /** MMIO2 page. (RWX) */
546 PGMPAGETYPE_MMIO2,
547 /** Shadowed ROM. (RWX) */
548 PGMPAGETYPE_ROM_SHADOW,
549 /** ROM page. (R-X) */
550 PGMPAGETYPE_ROM,
551 /** MMIO page. (---) */
552 PGMPAGETYPE_MMIO,
553 /** End of valid entries. */
554 PGMPAGETYPE_END
555} PGMPAGETYPE;
556AssertCompile(PGMPAGETYPE_END < 7);
557
558/** @name Page type predicates.
559 * @{ */
560#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
561#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
562#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
563#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
564#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
565/** @} */
566
567
568/**
569 * A Physical Guest Page tracking structure.
570 *
571 * The format of this structure is complicated because we have to fit a lot
572 * of information into as few bits as possible. The format is also subject
573 * to change (there is one comming up soon). Which means that for we'll be
574 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
575 * accessess to the structure.
576 */
577typedef struct PGMPAGE
578{
579 /** The physical address and a whole lot of other stuff. All bits are used! */
580 RTHCPHYS HCPhys;
581 /** The page state. */
582 uint32_t u2StateX : 2;
583 /** Flag indicating that a write monitored page was written to when set. */
584 uint32_t fWrittenToX : 1;
585 /** For later. */
586 uint32_t fSomethingElse : 1;
587 /** The Page ID.
588 * @todo Merge with HCPhys once we've liberated HCPhys of its stuff.
589 * The HCPhys will be 100% static. */
590 uint32_t idPageX : 28;
591 /** The page type (PGMPAGETYPE). */
592 uint32_t u3Type : 3;
593 /** The physical handler state (PGM_PAGE_HNDL_PHYS_STATE*) */
594 uint32_t u2HandlerPhysStateX : 2;
595 /** The virtual handler state (PGM_PAGE_HNDL_VIRT_STATE*) */
596 uint32_t u2HandlerVirtStateX : 2;
597 uint32_t u29B : 25;
598} PGMPAGE;
599AssertCompileSize(PGMPAGE, 16);
600/** Pointer to a physical guest page. */
601typedef PGMPAGE *PPGMPAGE;
602/** Pointer to a const physical guest page. */
603typedef const PGMPAGE *PCPGMPAGE;
604/** Pointer to a physical guest page pointer. */
605typedef PPGMPAGE *PPPGMPAGE;
606
607
608/**
609 * Clears the page structure.
610 * @param pPage Pointer to the physical guest page tracking structure.
611 */
612#define PGM_PAGE_CLEAR(pPage) \
613 do { \
614 (pPage)->HCPhys = 0; \
615 (pPage)->u2StateX = 0; \
616 (pPage)->fWrittenToX = 0; \
617 (pPage)->fSomethingElse = 0; \
618 (pPage)->idPageX = 0; \
619 (pPage)->u3Type = 0; \
620 (pPage)->u29B = 0; \
621 } while (0)
622
623/**
624 * Initializes the page structure.
625 * @param pPage Pointer to the physical guest page tracking structure.
626 */
627#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
628 do { \
629 (pPage)->HCPhys = (_HCPhys); \
630 (pPage)->u2StateX = (_uState); \
631 (pPage)->fWrittenToX = 0; \
632 (pPage)->fSomethingElse = 0; \
633 (pPage)->idPageX = (_idPage); \
634 /*(pPage)->u3Type = (_uType); - later */ \
635 PGM_PAGE_SET_TYPE(pPage, _uType); \
636 (pPage)->u29B = 0; \
637 } while (0)
638
639/**
640 * Initializes the page structure of a ZERO page.
641 * @param pPage Pointer to the physical guest page tracking structure.
642 */
643#ifdef VBOX_WITH_NEW_PHYS_CODE
644# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
645 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
646#else
647# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
648 PGM_PAGE_INIT(pPage, 0, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
649#endif
650/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
651# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType) \
652 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
653
654
655/** @name The Page state, PGMPAGE::u2StateX.
656 * @{ */
657/** The zero page.
658 * This is a per-VM page that's never ever mapped writable. */
659#define PGM_PAGE_STATE_ZERO 0
660/** A allocated page.
661 * This is a per-VM page allocated from the page pool (or wherever
662 * we get MMIO2 pages from if the type is MMIO2).
663 */
664#define PGM_PAGE_STATE_ALLOCATED 1
665/** A allocated page that's being monitored for writes.
666 * The shadow page table mappings are read-only. When a write occurs, the
667 * fWrittenTo member is set, the page remapped as read-write and the state
668 * moved back to allocated. */
669#define PGM_PAGE_STATE_WRITE_MONITORED 2
670/** The page is shared, aka. copy-on-write.
671 * This is a page that's shared with other VMs. */
672#define PGM_PAGE_STATE_SHARED 3
673/** @} */
674
675
676/**
677 * Gets the page state.
678 * @returns page state (PGM_PAGE_STATE_*).
679 * @param pPage Pointer to the physical guest page tracking structure.
680 */
681#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->u2StateX )
682
683/**
684 * Sets the page state.
685 * @param pPage Pointer to the physical guest page tracking structure.
686 * @param _uState The new page state.
687 */
688#define PGM_PAGE_SET_STATE(pPage, _uState) \
689 do { (pPage)->u2StateX = (_uState); } while (0)
690
691
692/**
693 * Gets the host physical address of the guest page.
694 * @returns host physical address (RTHCPHYS).
695 * @param pPage Pointer to the physical guest page tracking structure.
696 */
697#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhys & UINT64_C(0x0000fffffffff000) )
698
699/**
700 * Sets the host physical address of the guest page.
701 * @param pPage Pointer to the physical guest page tracking structure.
702 * @param _HCPhys The new host physical address.
703 */
704#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
705 do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0xffff000000000fff)) \
706 | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
707
708/**
709 * Get the Page ID.
710 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
711 * @param pPage Pointer to the physical guest page tracking structure.
712 */
713#define PGM_PAGE_GET_PAGEID(pPage) ( (pPage)->idPageX )
714/* later:
715#define PGM_PAGE_GET_PAGEID(pPage) ( ((uint32_t)(pPage)->HCPhys >> (48 - 12))
716 | ((uint32_t)(pPage)->HCPhys & 0xfff) )
717*/
718/**
719 * Sets the Page ID.
720 * @param pPage Pointer to the physical guest page tracking structure.
721 */
722#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPageX = (_idPage); } while (0)
723/* later:
724#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0x0000fffffffff000)) \
725 | ((_idPage) & 0xfff) \
726 | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
727*/
728
729/**
730 * Get the Chunk ID.
731 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
732 * @param pPage Pointer to the physical guest page tracking structure.
733 */
734#define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPageX >> GMM_CHUNKID_SHIFT )
735/* later:
736#if GMM_CHUNKID_SHIFT == 12
737# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> 48) )
738#elif GMM_CHUNKID_SHIFT > 12
739# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
740#elif GMM_CHUNKID_SHIFT < 12
741# define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhys >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \
742 | ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GMM_CHUNKID_SHIFT ) )
743#else
744# error "GMM_CHUNKID_SHIFT isn't defined or something."
745#endif
746*/
747
748/**
749 * Get the index of the page within the allocaiton chunk.
750 * @returns The page index.
751 * @param pPage Pointer to the physical guest page tracking structure.
752 */
753#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPageX & GMM_PAGEID_IDX_MASK )
754/* later:
755#if GMM_CHUNKID_SHIFT <= 12
756# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & GMM_PAGEID_IDX_MASK) )
757#else
758# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & 0xfff) \
759 | ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
760#endif
761*/
762
763
764/**
765 * Gets the page type.
766 * @returns The page type.
767 * @param pPage Pointer to the physical guest page tracking structure.
768 */
769#define PGM_PAGE_GET_TYPE(pPage) (pPage)->u3Type
770
771/**
772 * Sets the page type.
773 * @param pPage Pointer to the physical guest page tracking structure.
774 * @param _enmType The new page type (PGMPAGETYPE).
775 */
776#ifdef VBOX_WITH_NEW_PHYS_CODE
777#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
778 do { (pPage)->u3Type = (_enmType); } while (0)
779#else
780#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
781 do { \
782 (pPage)->u3Type = (_enmType); \
783 if ((_enmType) == PGMPAGETYPE_ROM) \
784 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM; \
785 else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \
786 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \
787 else if ((_enmType) == PGMPAGETYPE_MMIO2) \
788 (pPage)->HCPhys |= MM_RAM_FLAGS_MMIO2; \
789 } while (0)
790#endif
791
792
793/**
794 * Checks if the page is 'reserved'.
795 * @returns true/false.
796 * @param pPage Pointer to the physical guest page tracking structure.
797 */
798#define PGM_PAGE_IS_RESERVED(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_RESERVED) )
799
800/**
801 * Checks if the page is marked for MMIO.
802 * @returns true/false.
803 * @param pPage Pointer to the physical guest page tracking structure.
804 */
805#define PGM_PAGE_IS_MMIO(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_MMIO) )
806
807/**
808 * Checks if the page is backed by the ZERO page.
809 * @returns true/false.
810 * @param pPage Pointer to the physical guest page tracking structure.
811 */
812#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_ZERO )
813
814/**
815 * Checks if the page is backed by a SHARED page.
816 * @returns true/false.
817 * @param pPage Pointer to the physical guest page tracking structure.
818 */
819#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
820
821
822/**
823 * Marks the paget as written to (for GMM change monitoring).
824 * @param pPage Pointer to the physical guest page tracking structure.
825 */
826#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 1; } while (0)
827
828/**
829 * Clears the written-to indicator.
830 * @param pPage Pointer to the physical guest page tracking structure.
831 */
832#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 0; } while (0)
833
834/**
835 * Checks if the page was marked as written-to.
836 * @returns true/false.
837 * @param pPage Pointer to the physical guest page tracking structure.
838 */
839#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( (pPage)->fWrittenToX )
840
841
842/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateX).
843 *
844 * @remarks The values are assigned in order of priority, so we can calculate
845 * the correct state for a page with different handlers installed.
846 * @{ */
847/** No handler installed. */
848#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
849/** Monitoring is temporarily disabled. */
850#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
851/** Write access is monitored. */
852#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
853/** All access is monitored. */
854#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
855/** @} */
856
857/**
858 * Gets the physical access handler state of a page.
859 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
860 * @param pPage Pointer to the physical guest page tracking structure.
861 */
862#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) ( (pPage)->u2HandlerPhysStateX )
863
864/**
865 * Sets the physical access handler state of a page.
866 * @param pPage Pointer to the physical guest page tracking structure.
867 * @param _uState The new state value.
868 */
869#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
870 do { (pPage)->u2HandlerPhysStateX = (_uState); } while (0)
871
872/**
873 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
874 * @returns true/false
875 * @param pPage Pointer to the physical guest page tracking structure.
876 */
877#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE )
878
879/**
880 * Checks if the page has any active physical access handlers.
881 * @returns true/false
882 * @param pPage Pointer to the physical guest page tracking structure.
883 */
884#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
885
886
887/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateX).
888 *
889 * @remarks The values are assigned in order of priority, so we can calculate
890 * the correct state for a page with different handlers installed.
891 * @{ */
892/** No handler installed. */
893#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
894/* 1 is reserved so the lineup is identical with the physical ones. */
895/** Write access is monitored. */
896#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
897/** All access is monitored. */
898#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
899/** @} */
900
901/**
902 * Gets the virtual access handler state of a page.
903 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
904 * @param pPage Pointer to the physical guest page tracking structure.
905 */
906#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u2HandlerVirtStateX )
907
908/**
909 * Sets the virtual access handler state of a page.
910 * @param pPage Pointer to the physical guest page tracking structure.
911 * @param _uState The new state value.
912 */
913#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
914 do { (pPage)->u2HandlerVirtStateX = (_uState); } while (0)
915
916/**
917 * Checks if the page has any virtual access handlers.
918 * @returns true/false
919 * @param pPage Pointer to the physical guest page tracking structure.
920 */
921#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ( (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
922
923/**
924 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
925 * virtual handlers.
926 * @returns true/false
927 * @param pPage Pointer to the physical guest page tracking structure.
928 */
929#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
930
931
932
933/**
934 * Checks if the page has any access handlers, including temporarily disabled ones.
935 * @returns true/false
936 * @param pPage Pointer to the physical guest page tracking structure.
937 */
938#define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
939 ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE \
940 || (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
941
942/**
943 * Checks if the page has any active access handlers.
944 * @returns true/false
945 * @param pPage Pointer to the physical guest page tracking structure.
946 */
947#define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
948 ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
949 || (pPage)->u2HandlerVirtStateX >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
950
951/**
952 * Checks if the page has any active access handlers catching all accesses.
953 * @returns true/false
954 * @param pPage Pointer to the physical guest page tracking structure.
955 */
956#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
957 ( (pPage)->u2HandlerPhysStateX == PGM_PAGE_HNDL_PHYS_STATE_ALL \
958 || (pPage)->u2HandlerVirtStateX == PGM_PAGE_HNDL_VIRT_STATE_ALL )
959
960
961
962
963/** @def PGM_PAGE_GET_TRACKING
964 * Gets the packed shadow page pool tracking data associated with a guest page.
965 * @returns uint16_t containing the data.
966 * @param pPage Pointer to the physical guest page tracking structure.
967 */
968#define PGM_PAGE_GET_TRACKING(pPage) \
969 ( *((uint16_t *)&(pPage)->HCPhys + 3) )
970
971/** @def PGM_PAGE_SET_TRACKING
972 * Sets the packed shadow page pool tracking data associated with a guest page.
973 * @param pPage Pointer to the physical guest page tracking structure.
974 * @param u16TrackingData The tracking data to store.
975 */
976#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
977 do { *((uint16_t *)&(pPage)->HCPhys + 3) = (u16TrackingData); } while (0)
978
979/** @def PGM_PAGE_GET_TD_CREFS
980 * Gets the @a cRefs tracking data member.
981 * @returns cRefs.
982 * @param pPage Pointer to the physical guest page tracking structure.
983 */
984#define PGM_PAGE_GET_TD_CREFS(pPage) \
985 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
986
987#define PGM_PAGE_GET_TD_IDX(pPage) \
988 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
989
990/**
991 * Ram range for GC Phys to HC Phys conversion.
992 *
993 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
994 * conversions too, but we'll let MM handle that for now.
995 *
996 * This structure is used by linked lists in both GC and HC.
997 */
998typedef struct PGMRAMRANGE
999{
1000 /** Pointer to the next RAM range - for R3. */
1001 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1002 /** Pointer to the next RAM range - for R0. */
1003 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1004 /** Pointer to the next RAM range - for RC. */
1005 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1006 /** Pointer alignment. */
1007 RTRCPTR RCPtrAlignment;
1008 /** Start of the range. Page aligned. */
1009 RTGCPHYS GCPhys;
1010 /** Last address in the range (inclusive). Page aligned (-1). */
1011 RTGCPHYS GCPhysLast;
1012 /** Size of the range. (Page aligned of course). */
1013 RTGCPHYS cb;
1014 /** MM_RAM_* flags */
1015 uint32_t fFlags;
1016 uint32_t u32Alignment; /**< alignment. */
1017#ifndef VBOX_WITH_NEW_PHYS_CODE
1018 /** R3 virtual lookup ranges for chunks.
1019 * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges.
1020 * @remarks This is occationally accessed from ring-0!! (not darwin) */
1021# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1022 R3PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs;
1023# else
1024 R3R0PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs;
1025# endif
1026#endif
1027 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1028 R3PTRTYPE(void *) pvR3;
1029 /** The range description. */
1030 R3PTRTYPE(const char *) pszDesc;
1031
1032 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1033#ifdef VBOX_WITH_NEW_PHYS_CODE
1034 uint32_t au32Reserved[2];
1035#elif HC_ARCH_BITS == 32
1036 uint32_t au32Reserved[1];
1037#endif
1038
1039 /** Array of physical guest page tracking structures. */
1040 PGMPAGE aPages[1];
1041} PGMRAMRANGE;
1042/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1043typedef PGMRAMRANGE *PPGMRAMRANGE;
1044
1045/** Return hc ptr corresponding to the ram range and physical offset */
1046#define PGMRAMRANGE_GETHCPTR(pRam, off) \
1047 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((pRam)->paChunkR3Ptrs[(off) >> PGM_DYNAMIC_CHUNK_SHIFT] + ((off) & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
1048 : (RTHCPTR)((RTR3UINTPTR)(pRam)->pvR3 + (off));
1049
1050/**
1051 * Per page tracking structure for ROM image.
1052 *
1053 * A ROM image may have a shadow page, in which case we may have
1054 * two pages backing it. This structure contains the PGMPAGE for
1055 * both while PGMRAMRANGE have a copy of the active one. It is
1056 * important that these aren't out of sync in any regard other
1057 * than page pool tracking data.
1058 */
1059typedef struct PGMROMPAGE
1060{
1061 /** The page structure for the virgin ROM page. */
1062 PGMPAGE Virgin;
1063 /** The page structure for the shadow RAM page. */
1064 PGMPAGE Shadow;
1065 /** The current protection setting. */
1066 PGMROMPROT enmProt;
1067 /** Pad the structure size to a multiple of 8. */
1068 uint32_t u32Padding;
1069} PGMROMPAGE;
1070/** Pointer to a ROM page tracking structure. */
1071typedef PGMROMPAGE *PPGMROMPAGE;
1072
1073
1074/**
1075 * A registered ROM image.
1076 *
1077 * This is needed to keep track of ROM image since they generally
1078 * intrude into a PGMRAMRANGE. It also keeps track of additional
1079 * info like the two page sets (read-only virgin and read-write shadow),
1080 * the current state of each page.
1081 *
1082 * Because access handlers cannot easily be executed in a different
1083 * context, the ROM ranges needs to be accessible and in all contexts.
1084 */
1085typedef struct PGMROMRANGE
1086{
1087 /** Pointer to the next range - R3. */
1088 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1089 /** Pointer to the next range - R0. */
1090 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1091 /** Pointer to the next range - RC. */
1092 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1093 /** Pointer alignment */
1094 RTRCPTR GCPtrAlignment;
1095 /** Address of the range. */
1096 RTGCPHYS GCPhys;
1097 /** Address of the last byte in the range. */
1098 RTGCPHYS GCPhysLast;
1099 /** Size of the range. */
1100 RTGCPHYS cb;
1101 /** The flags (PGMPHYS_ROM_FLAG_*). */
1102 uint32_t fFlags;
1103 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1104 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 7 : 3];
1105 /** Pointer to the original bits when PGMPHYS_ROM_FLAG_PERMANENT_BINARY was specified.
1106 * This is used for strictness checks. */
1107 R3PTRTYPE(const void *) pvOriginal;
1108 /** The ROM description. */
1109 R3PTRTYPE(const char *) pszDesc;
1110 /** The per page tracking structures. */
1111 PGMROMPAGE aPages[1];
1112} PGMROMRANGE;
1113/** Pointer to a ROM range. */
1114typedef PGMROMRANGE *PPGMROMRANGE;
1115
1116
1117/**
1118 * A registered MMIO2 (= Device RAM) range.
1119 *
1120 * There are a few reason why we need to keep track of these
1121 * registrations. One of them is the deregistration & cleanup
1122 * stuff, while another is that the PGMRAMRANGE associated with
1123 * such a region may have to be removed from the ram range list.
1124 *
1125 * Overlapping with a RAM range has to be 100% or none at all. The
1126 * pages in the existing RAM range must not be ROM nor MMIO. A guru
1127 * meditation will be raised if a partial overlap or an overlap of
1128 * ROM pages is encountered. On an overlap we will free all the
1129 * existing RAM pages and put in the ram range pages instead.
1130 */
1131typedef struct PGMMMIO2RANGE
1132{
1133 /** The owner of the range. (a device) */
1134 PPDMDEVINSR3 pDevInsR3;
1135 /** Pointer to the ring-3 mapping of the allocation. */
1136 RTR3PTR pvR3;
1137 /** Pointer to the next range - R3. */
1138 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1139 /** Whether it's mapped or not. */
1140 bool fMapped;
1141 /** Whether it's overlapping or not. */
1142 bool fOverlapping;
1143 /** The PCI region number.
1144 * @remarks This ASSUMES that nobody will ever really need to have multiple
1145 * PCI devices with matching MMIO region numbers on a single device. */
1146 uint8_t iRegion;
1147 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1148 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5];
1149 /** The associated RAM range. */
1150 PGMRAMRANGE RamRange;
1151} PGMMMIO2RANGE;
1152/** Pointer to a MMIO2 range. */
1153typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1154
1155
1156
1157
1158/**
1159 * PGMPhysRead/Write cache entry
1160 */
1161typedef struct PGMPHYSCACHEENTRY
1162{
1163 /** R3 pointer to physical page. */
1164 R3PTRTYPE(uint8_t *) pbR3;
1165 /** GC Physical address for cache entry */
1166 RTGCPHYS GCPhys;
1167#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1168 RTGCPHYS u32Padding0; /**< alignment padding. */
1169#endif
1170} PGMPHYSCACHEENTRY;
1171
1172/**
1173 * PGMPhysRead/Write cache to reduce REM memory access overhead
1174 */
1175typedef struct PGMPHYSCACHE
1176{
1177 /** Bitmap of valid cache entries */
1178 uint64_t aEntries;
1179 /** Cache entries */
1180 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1181} PGMPHYSCACHE;
1182
1183
1184/** Pointer to an allocation chunk ring-3 mapping. */
1185typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1186/** Pointer to an allocation chunk ring-3 mapping pointer. */
1187typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1188
1189/**
1190 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1191 *
1192 * The primary tree (Core) uses the chunk id as key.
1193 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1194 */
1195typedef struct PGMCHUNKR3MAP
1196{
1197 /** The key is the chunk id. */
1198 AVLU32NODECORE Core;
1199 /** The key is the ageing sequence number. */
1200 AVLLU32NODECORE AgeCore;
1201 /** The current age thingy. */
1202 uint32_t iAge;
1203 /** The current reference count. */
1204 uint32_t volatile cRefs;
1205 /** The current permanent reference count. */
1206 uint32_t volatile cPermRefs;
1207 /** The mapping address. */
1208 void *pv;
1209} PGMCHUNKR3MAP;
1210
1211/**
1212 * Allocation chunk ring-3 mapping TLB entry.
1213 */
1214typedef struct PGMCHUNKR3MAPTLBE
1215{
1216 /** The chunk id. */
1217 uint32_t volatile idChunk;
1218#if HC_ARCH_BITS == 64
1219 uint32_t u32Padding; /**< alignment padding. */
1220#endif
1221 /** The chunk map. */
1222#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1223 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1224#else
1225 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1226#endif
1227} PGMCHUNKR3MAPTLBE;
1228/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1229typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1230
1231/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1232 * @remark Must be a power of two value. */
1233#define PGM_CHUNKR3MAPTLB_ENTRIES 32
1234
1235/**
1236 * Allocation chunk ring-3 mapping TLB.
1237 *
1238 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1239 * At first glance this might look kinda odd since AVL trees are
1240 * supposed to give the most optimial lookup times of all trees
1241 * due to their balancing. However, take a tree with 1023 nodes
1242 * in it, that's 10 levels, meaning that most searches has to go
1243 * down 9 levels before they find what they want. This isn't fast
1244 * compared to a TLB hit. There is the factor of cache misses,
1245 * and of course the problem with trees and branch prediction.
1246 * This is why we use TLBs in front of most of the trees.
1247 *
1248 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1249 * difficult when we switch to the new inlined AVL trees (from kStuff).
1250 */
1251typedef struct PGMCHUNKR3MAPTLB
1252{
1253 /** The TLB entries. */
1254 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1255} PGMCHUNKR3MAPTLB;
1256
1257/**
1258 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1259 * @returns Chunk TLB index.
1260 * @param idChunk The Chunk ID.
1261 */
1262#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1263
1264
1265/**
1266 * Ring-3 guest page mapping TLB entry.
1267 * @remarks used in ring-0 as well at the moment.
1268 */
1269typedef struct PGMPAGER3MAPTLBE
1270{
1271 /** Address of the page. */
1272 RTGCPHYS volatile GCPhys;
1273 /** The guest page. */
1274#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1275 R3PTRTYPE(PPGMPAGE) volatile pPage;
1276#else
1277 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1278#endif
1279 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1280#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1281 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1282#else
1283 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1284#endif
1285 /** The address */
1286#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1287 R3PTRTYPE(void *) volatile pv;
1288#else
1289 R3R0PTRTYPE(void *) volatile pv;
1290#endif
1291#if HC_ARCH_BITS == 32
1292 uint32_t u32Padding; /**< alignment padding. */
1293#endif
1294} PGMPAGER3MAPTLBE;
1295/** Pointer to an entry in the HC physical TLB. */
1296typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1297
1298
1299/** The number of entries in the ring-3 guest page mapping TLB.
1300 * @remarks The value must be a power of two. */
1301#define PGM_PAGER3MAPTLB_ENTRIES 64
1302
1303/**
1304 * Ring-3 guest page mapping TLB.
1305 * @remarks used in ring-0 as well at the moment.
1306 */
1307typedef struct PGMPAGER3MAPTLB
1308{
1309 /** The TLB entries. */
1310 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1311} PGMPAGER3MAPTLB;
1312/** Pointer to the ring-3 guest page mapping TLB. */
1313typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1314
1315/**
1316 * Calculates the index of the TLB entry for the specified guest page.
1317 * @returns Physical TLB index.
1318 * @param GCPhys The guest physical address.
1319 */
1320#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1321
1322
1323/**
1324 * Mapping cache usage set entry.
1325 *
1326 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1327 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1328 * cache. If it's extended to include ring-3, well, then something will
1329 * have be changed here...
1330 */
1331typedef struct PGMMAPSETENTRY
1332{
1333 /** The mapping cache index. */
1334 uint16_t iPage;
1335 /** The number of references.
1336 * The max is UINT16_MAX - 1. */
1337 uint16_t cRefs;
1338 /** Pointer to the page. */
1339 RTR0PTR pvPage;
1340 /** The physical address for this entry. */
1341 RTHCPHYS HCPhys;
1342} PGMMAPSETENTRY;
1343/** Pointer to a mapping cache usage set entry. */
1344typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1345
1346/**
1347 * Mapping cache usage set.
1348 *
1349 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1350 * done during exits / traps. The set is
1351 */
1352typedef struct PGMMAPSET
1353{
1354 /** The number of occupied entries.
1355 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1356 * dynamic mappings. */
1357 uint32_t cEntries;
1358 /** The start of the current subset.
1359 * This is UINT32_MAX if no subset is currently open. */
1360 uint32_t iSubset;
1361 /** The index of the current CPU, only valid if the set is open. */
1362 int32_t iCpu;
1363 /** The entries. */
1364 PGMMAPSETENTRY aEntries[64];
1365 /** HCPhys -> iEntry fast lookup table.
1366 * Use PGMMAPSET_HASH for hashing.
1367 * The entries may or may not be valid, check against cEntries. */
1368 uint8_t aiHashTable[128];
1369} PGMMAPSET;
1370/** Pointer to the mapping cache set. */
1371typedef PGMMAPSET *PPGMMAPSET;
1372
1373/** PGMMAPSET::cEntries value for a closed set. */
1374#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1375
1376/** Hash function for aiHashTable. */
1377#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1378
1379/** The max fill size (strict builds). */
1380#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1381
1382
1383/** @name Context neutrual page mapper TLB.
1384 *
1385 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1386 * code is writting in a kind of context neutrual way. Time will show whether
1387 * this actually makes sense or not...
1388 *
1389 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1390 * context ends up using a global mapping cache on some platforms
1391 * (darwin).
1392 *
1393 * @{ */
1394/** @typedef PPGMPAGEMAPTLB
1395 * The page mapper TLB pointer type for the current context. */
1396/** @typedef PPGMPAGEMAPTLB
1397 * The page mapper TLB entry pointer type for the current context. */
1398/** @typedef PPGMPAGEMAPTLB
1399 * The page mapper TLB entry pointer pointer type for the current context. */
1400/** @def PGM_PAGEMAPTLB_ENTRIES
1401 * The number of TLB entries in the page mapper TLB for the current context. */
1402/** @def PGM_PAGEMAPTLB_IDX
1403 * Calculate the TLB index for a guest physical address.
1404 * @returns The TLB index.
1405 * @param GCPhys The guest physical address. */
1406/** @typedef PPGMPAGEMAP
1407 * Pointer to a page mapper unit for current context. */
1408/** @typedef PPPGMPAGEMAP
1409 * Pointer to a page mapper unit pointer for current context. */
1410#ifdef IN_RC
1411// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1412// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1413// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1414# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1415# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1416 typedef void * PPGMPAGEMAP;
1417 typedef void ** PPPGMPAGEMAP;
1418//#elif IN_RING0
1419// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1420// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1421// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1422//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1423//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1424// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1425// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1426#else
1427 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1428 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1429 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1430# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1431# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1432 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1433 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1434#endif
1435/** @} */
1436
1437
1438/** @name PGM Pool Indexes.
1439 * Aka. the unique shadow page identifier.
1440 * @{ */
1441/** NIL page pool IDX. */
1442#define NIL_PGMPOOL_IDX 0
1443/** The first normal index. */
1444#define PGMPOOL_IDX_FIRST_SPECIAL 1
1445#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1446/** Page directory (32-bit root). */
1447#define PGMPOOL_IDX_PD 1
1448/** Page Directory Pointer Table (PAE root). */
1449#define PGMPOOL_IDX_PDPT 2
1450/** AMD64 CR3 level index.*/
1451#define PGMPOOL_IDX_AMD64_CR3 3
1452/** Nested paging root.*/
1453#define PGMPOOL_IDX_NESTED_ROOT 4
1454/** The first normal index. */
1455#define PGMPOOL_IDX_FIRST 5
1456#else
1457/** Page directory (32-bit root). */
1458#define PGMPOOL_IDX_PD 1
1459/** The extended PAE page directory (2048 entries, works as root currently). */
1460#define PGMPOOL_IDX_PAE_PD 2
1461/** PAE Page Directory Table 0. */
1462#define PGMPOOL_IDX_PAE_PD_0 3
1463/** PAE Page Directory Table 1. */
1464#define PGMPOOL_IDX_PAE_PD_1 4
1465/** PAE Page Directory Table 2. */
1466#define PGMPOOL_IDX_PAE_PD_2 5
1467/** PAE Page Directory Table 3. */
1468#define PGMPOOL_IDX_PAE_PD_3 6
1469/** Page Directory Pointer Table (PAE root, not currently used). */
1470#define PGMPOOL_IDX_PDPT 7
1471/** AMD64 CR3 level index.*/
1472#define PGMPOOL_IDX_AMD64_CR3 8
1473/** Nested paging root.*/
1474#define PGMPOOL_IDX_NESTED_ROOT 9
1475/** The first normal index. */
1476#define PGMPOOL_IDX_FIRST 10
1477#endif
1478/** The last valid index. (inclusive, 14 bits) */
1479#define PGMPOOL_IDX_LAST 0x3fff
1480/** @} */
1481
1482/** The NIL index for the parent chain. */
1483#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1484
1485/**
1486 * Node in the chain linking a shadowed page to it's parent (user).
1487 */
1488#pragma pack(1)
1489typedef struct PGMPOOLUSER
1490{
1491 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1492 uint16_t iNext;
1493 /** The user page index. */
1494 uint16_t iUser;
1495 /** Index into the user table. */
1496 uint32_t iUserTable;
1497} PGMPOOLUSER, *PPGMPOOLUSER;
1498typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1499#pragma pack()
1500
1501
1502/** The NIL index for the phys ext chain. */
1503#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1504
1505/**
1506 * Node in the chain of physical cross reference extents.
1507 * @todo Calling this an 'extent' is not quite right, find a better name.
1508 */
1509#pragma pack(1)
1510typedef struct PGMPOOLPHYSEXT
1511{
1512 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1513 uint16_t iNext;
1514 /** The user page index. */
1515 uint16_t aidx[3];
1516} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1517typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1518#pragma pack()
1519
1520
1521/**
1522 * The kind of page that's being shadowed.
1523 */
1524typedef enum PGMPOOLKIND
1525{
1526 /** The virtual invalid 0 entry. */
1527 PGMPOOLKIND_INVALID = 0,
1528 /** The entry is free (=unused). */
1529 PGMPOOLKIND_FREE,
1530
1531 /** Shw: 32-bit page table; Gst: no paging */
1532 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1533 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1534 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1535 /** Shw: 32-bit page table; Gst: 4MB page. */
1536 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1537 /** Shw: PAE page table; Gst: no paging */
1538 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1539 /** Shw: PAE page table; Gst: 32-bit page table. */
1540 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1541 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1542 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1543 /** Shw: PAE page table; Gst: PAE page table. */
1544 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1545 /** Shw: PAE page table; Gst: 2MB page. */
1546 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1547
1548 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1549 PGMPOOLKIND_32BIT_PD,
1550 /** Shw: 32-bit page directory. Gst: no paging. */
1551 PGMPOOLKIND_32BIT_PD_PHYS,
1552 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1553 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1554 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1555 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1556 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1557 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1558 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1559 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1560 /** Shw: PAE page directory; Gst: PAE page directory. */
1561 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1562 /** Shw: PAE page directory; Gst: no paging. */
1563 PGMPOOLKIND_PAE_PD_PHYS,
1564
1565 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1566 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1567 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1568 PGMPOOLKIND_PAE_PDPT,
1569 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1570 PGMPOOLKIND_PAE_PDPT_PHYS,
1571
1572 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1573 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1574 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1575 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1576 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1577 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1578 /** Shw: 64-bit page directory table; Gst: no paging */
1579 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1580
1581 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1582 PGMPOOLKIND_64BIT_PML4,
1583
1584 /** Shw: EPT page directory pointer table; Gst: no paging */
1585 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1586 /** Shw: EPT page directory table; Gst: no paging */
1587 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1588 /** Shw: EPT page table; Gst: no paging */
1589 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1590
1591#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1592 /** Shw: Root 32-bit page directory. */
1593 PGMPOOLKIND_ROOT_32BIT_PD,
1594 /** Shw: Root PAE page directory */
1595 PGMPOOLKIND_ROOT_PAE_PD,
1596 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
1597 PGMPOOLKIND_ROOT_PDPT,
1598#endif
1599 /** Shw: Root Nested paging table. */
1600 PGMPOOLKIND_ROOT_NESTED,
1601
1602 /** The last valid entry. */
1603 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1604} PGMPOOLKIND;
1605
1606
1607/**
1608 * The tracking data for a page in the pool.
1609 */
1610typedef struct PGMPOOLPAGE
1611{
1612 /** AVL node code with the (R3) physical address of this page. */
1613 AVLOHCPHYSNODECORE Core;
1614 /** Pointer to the R3 mapping of the page. */
1615#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1616 R3PTRTYPE(void *) pvPageR3;
1617#else
1618 R3R0PTRTYPE(void *) pvPageR3;
1619#endif
1620 /** The guest physical address. */
1621#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1622 uint32_t Alignment0;
1623#endif
1624 RTGCPHYS GCPhys;
1625 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1626 uint8_t enmKind;
1627 uint8_t bPadding;
1628 /** The index of this page. */
1629 uint16_t idx;
1630 /** The next entry in the list this page currently resides in.
1631 * It's either in the free list or in the GCPhys hash. */
1632 uint16_t iNext;
1633#ifdef PGMPOOL_WITH_USER_TRACKING
1634 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1635 uint16_t iUserHead;
1636 /** The number of present entries. */
1637 uint16_t cPresent;
1638 /** The first entry in the table which is present. */
1639 uint16_t iFirstPresent;
1640#endif
1641#ifdef PGMPOOL_WITH_MONITORING
1642 /** The number of modifications to the monitored page. */
1643 uint16_t cModifications;
1644 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1645 uint16_t iModifiedNext;
1646 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1647 uint16_t iModifiedPrev;
1648 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1649 uint16_t iMonitoredNext;
1650 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1651 uint16_t iMonitoredPrev;
1652#endif
1653#ifdef PGMPOOL_WITH_CACHE
1654 /** The next page in the age list. */
1655 uint16_t iAgeNext;
1656 /** The previous page in the age list. */
1657 uint16_t iAgePrev;
1658#endif /* PGMPOOL_WITH_CACHE */
1659 /** Used to indicate that the page is zeroed. */
1660 bool fZeroed;
1661 /** Used to indicate that a PT has non-global entries. */
1662 bool fSeenNonGlobal;
1663 /** Used to indicate that we're monitoring writes to the guest page. */
1664 bool fMonitored;
1665 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1666 * (All pages are in the age list.) */
1667 bool fCached;
1668 /** This is used by the R3 access handlers when invoked by an async thread.
1669 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1670 bool volatile fReusedFlushPending;
1671#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1672 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1673 bool fLocked;
1674#else
1675 /** Used to indicate that the guest is mapping the page is also used as a CR3.
1676 * In these cases the access handler acts differently and will check
1677 * for mapping conflicts like the normal CR3 handler.
1678 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
1679 * replaced by a list of pages which share access handler.
1680 */
1681 bool fCR3Mix;
1682#endif
1683} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1684
1685
1686#ifdef PGMPOOL_WITH_CACHE
1687/** The hash table size. */
1688# define PGMPOOL_HASH_SIZE 0x40
1689/** The hash function. */
1690# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1691#endif
1692
1693
1694/**
1695 * The shadow page pool instance data.
1696 *
1697 * It's all one big allocation made at init time, except for the
1698 * pages that is. The user nodes follows immediatly after the
1699 * page structures.
1700 */
1701typedef struct PGMPOOL
1702{
1703 /** The VM handle - R3 Ptr. */
1704 PVMR3 pVMR3;
1705 /** The VM handle - R0 Ptr. */
1706 PVMR0 pVMR0;
1707 /** The VM handle - RC Ptr. */
1708 PVMRC pVMRC;
1709 /** The max pool size. This includes the special IDs. */
1710 uint16_t cMaxPages;
1711 /** The current pool size. */
1712 uint16_t cCurPages;
1713 /** The head of the free page list. */
1714 uint16_t iFreeHead;
1715 /* Padding. */
1716 uint16_t u16Padding;
1717#ifdef PGMPOOL_WITH_USER_TRACKING
1718 /** Head of the chain of free user nodes. */
1719 uint16_t iUserFreeHead;
1720 /** The number of user nodes we've allocated. */
1721 uint16_t cMaxUsers;
1722 /** The number of present page table entries in the entire pool. */
1723 uint32_t cPresent;
1724 /** Pointer to the array of user nodes - RC pointer. */
1725 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1726 /** Pointer to the array of user nodes - R3 pointer. */
1727 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1728 /** Pointer to the array of user nodes - R0 pointer. */
1729 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1730#endif /* PGMPOOL_WITH_USER_TRACKING */
1731#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1732 /** Head of the chain of free phys ext nodes. */
1733 uint16_t iPhysExtFreeHead;
1734 /** The number of user nodes we've allocated. */
1735 uint16_t cMaxPhysExts;
1736 /** Pointer to the array of physical xref extent - RC pointer. */
1737 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1738 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1739 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1740 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1741 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1742#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1743#ifdef PGMPOOL_WITH_CACHE
1744 /** Hash table for GCPhys addresses. */
1745 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1746 /** The head of the age list. */
1747 uint16_t iAgeHead;
1748 /** The tail of the age list. */
1749 uint16_t iAgeTail;
1750 /** Set if the cache is enabled. */
1751 bool fCacheEnabled;
1752#endif /* PGMPOOL_WITH_CACHE */
1753#ifdef PGMPOOL_WITH_MONITORING
1754 /** Head of the list of modified pages. */
1755 uint16_t iModifiedHead;
1756 /** The current number of modified pages. */
1757 uint16_t cModifiedPages;
1758 /** Access handler, RC. */
1759 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1760 /** Access handler, R0. */
1761 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1762 /** Access handler, R3. */
1763 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1764 /** The access handler description (HC ptr). */
1765 R3PTRTYPE(const char *) pszAccessHandler;
1766#endif /* PGMPOOL_WITH_MONITORING */
1767 /** The number of pages currently in use. */
1768 uint16_t cUsedPages;
1769#ifdef VBOX_WITH_STATISTICS
1770 /** The high wather mark for cUsedPages. */
1771 uint16_t cUsedPagesHigh;
1772 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1773 /** Profiling pgmPoolAlloc(). */
1774 STAMPROFILEADV StatAlloc;
1775 /** Profiling pgmPoolClearAll(). */
1776 STAMPROFILE StatClearAll;
1777 /** Profiling pgmPoolFlushAllInt(). */
1778 STAMPROFILE StatFlushAllInt;
1779 /** Profiling pgmPoolFlushPage(). */
1780 STAMPROFILE StatFlushPage;
1781 /** Profiling pgmPoolFree(). */
1782 STAMPROFILE StatFree;
1783 /** Profiling time spent zeroing pages. */
1784 STAMPROFILE StatZeroPage;
1785# ifdef PGMPOOL_WITH_USER_TRACKING
1786 /** Profiling of pgmPoolTrackDeref. */
1787 STAMPROFILE StatTrackDeref;
1788 /** Profiling pgmTrackFlushGCPhysPT. */
1789 STAMPROFILE StatTrackFlushGCPhysPT;
1790 /** Profiling pgmTrackFlushGCPhysPTs. */
1791 STAMPROFILE StatTrackFlushGCPhysPTs;
1792 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1793 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1794 /** Number of times we've been out of user records. */
1795 STAMCOUNTER StatTrackFreeUpOneUser;
1796# endif
1797# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1798 /** Profiling deref activity related tracking GC physical pages. */
1799 STAMPROFILE StatTrackDerefGCPhys;
1800 /** Number of linear searches for a HCPhys in the ram ranges. */
1801 STAMCOUNTER StatTrackLinearRamSearches;
1802 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1803 STAMCOUNTER StamTrackPhysExtAllocFailures;
1804# endif
1805# ifdef PGMPOOL_WITH_MONITORING
1806 /** Profiling the RC/R0 access handler. */
1807 STAMPROFILE StatMonitorRZ;
1808 /** Times we've failed interpreting the instruction. */
1809 STAMCOUNTER StatMonitorRZEmulateInstr;
1810 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
1811 STAMPROFILE StatMonitorRZFlushPage;
1812 /** Times we've detected fork(). */
1813 STAMCOUNTER StatMonitorRZFork;
1814 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
1815 STAMPROFILE StatMonitorRZHandled;
1816 /** Times we've failed interpreting a patch code instruction. */
1817 STAMCOUNTER StatMonitorRZIntrFailPatch1;
1818 /** Times we've failed interpreting a patch code instruction during flushing. */
1819 STAMCOUNTER StatMonitorRZIntrFailPatch2;
1820 /** The number of times we've seen rep prefixes we can't handle. */
1821 STAMCOUNTER StatMonitorRZRepPrefix;
1822 /** Profiling the REP STOSD cases we've handled. */
1823 STAMPROFILE StatMonitorRZRepStosd;
1824
1825 /** Profiling the R3 access handler. */
1826 STAMPROFILE StatMonitorR3;
1827 /** Times we've failed interpreting the instruction. */
1828 STAMCOUNTER StatMonitorR3EmulateInstr;
1829 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
1830 STAMPROFILE StatMonitorR3FlushPage;
1831 /** Times we've detected fork(). */
1832 STAMCOUNTER StatMonitorR3Fork;
1833 /** Profiling the R3 access we've handled (except REP STOSD). */
1834 STAMPROFILE StatMonitorR3Handled;
1835 /** The number of times we've seen rep prefixes we can't handle. */
1836 STAMCOUNTER StatMonitorR3RepPrefix;
1837 /** Profiling the REP STOSD cases we've handled. */
1838 STAMPROFILE StatMonitorR3RepStosd;
1839 /** The number of times we're called in an async thread an need to flush. */
1840 STAMCOUNTER StatMonitorR3Async;
1841 /** The high wather mark for cModifiedPages. */
1842 uint16_t cModifiedPagesHigh;
1843 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
1844# endif
1845# ifdef PGMPOOL_WITH_CACHE
1846 /** The number of cache hits. */
1847 STAMCOUNTER StatCacheHits;
1848 /** The number of cache misses. */
1849 STAMCOUNTER StatCacheMisses;
1850 /** The number of times we've got a conflict of 'kind' in the cache. */
1851 STAMCOUNTER StatCacheKindMismatches;
1852 /** Number of times we've been out of pages. */
1853 STAMCOUNTER StatCacheFreeUpOne;
1854 /** The number of cacheable allocations. */
1855 STAMCOUNTER StatCacheCacheable;
1856 /** The number of uncacheable allocations. */
1857 STAMCOUNTER StatCacheUncacheable;
1858# endif
1859#elif HC_ARCH_BITS == 64
1860 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
1861#endif
1862 /** The AVL tree for looking up a page by its HC physical address. */
1863 AVLOHCPHYSTREE HCPhysTree;
1864 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
1865 /** Array of pages. (cMaxPages in length)
1866 * The Id is the index into thist array.
1867 */
1868 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
1869} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
1870
1871
1872/** @def PGMPOOL_PAGE_2_PTR
1873 * Maps a pool page pool into the current context.
1874 *
1875 * @returns VBox status code.
1876 * @param pVM The VM handle.
1877 * @param pPage The pool page.
1878 *
1879 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1880 * small page window employeed by that function. Be careful.
1881 * @remark There is no need to assert on the result.
1882 */
1883#if defined(IN_RC)
1884# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1885#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1886# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1887#elif defined(VBOX_STRICT)
1888# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
1889DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
1890{
1891 Assert(pPage->pvPageR3);
1892 return pPage->pvPageR3;
1893}
1894#else
1895# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
1896#endif
1897
1898/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
1899 * Maps a pool page pool into the current context.
1900 *
1901 * @returns VBox status code.
1902 * @param pPGM Pointer to the PGM instance data.
1903 * @param pPage The pool page.
1904 *
1905 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1906 * small page window employeed by that function. Be careful.
1907 * @remark There is no need to assert on the result.
1908 */
1909#if defined(IN_RC)
1910# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined((pPGM), (pPage))
1911#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1912# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined((pPGM), (pPage))
1913#else
1914# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
1915#endif
1916
1917
1918/** @name Per guest page tracking data.
1919 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
1920 * is to use more bits for it and split it up later on. But for now we'll play
1921 * safe and change as little as possible.
1922 *
1923 * The 16-bit word has two parts:
1924 *
1925 * The first 14-bit forms the @a idx field. It is either the index of a page in
1926 * the shadow page pool, or and index into the extent list.
1927 *
1928 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
1929 * shadow page pool references to the page. If cRefs equals
1930 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
1931 * (misnomer) table and not the shadow page pool.
1932 *
1933 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
1934 * the 16-bit word.
1935 *
1936 * @{ */
1937/** The shift count for getting to the cRefs part. */
1938#define PGMPOOL_TD_CREFS_SHIFT 14
1939/** The mask applied after shifting the tracking data down by
1940 * PGMPOOL_TD_CREFS_SHIFT. */
1941#define PGMPOOL_TD_CREFS_MASK 0x3
1942/** The cRef value used to indiciate that the idx is the head of a
1943 * physical cross reference list. */
1944#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
1945/** The shift used to get idx. */
1946#define PGMPOOL_TD_IDX_SHIFT 0
1947/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
1948#define PGMPOOL_TD_IDX_MASK 0x3fff
1949/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
1950 * simply too many mappings of this page. */
1951#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
1952/** @} */
1953
1954#ifdef MM_RAM_FLAGS_CREFS_SHIFT
1955# if MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT != PGMPOOL_TD_CREFS_SHIFT
1956# error "MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT != PGMPOOL_TD_CREFS_SHIFT"
1957# endif
1958# if MM_RAM_FLAGS_CREFS_MASK != MM_RAM_FLAGS_CREFS_MASK
1959# error "MM_RAM_FLAGS_CREFS_MASK != MM_RAM_FLAGS_CREFS_MASK"
1960# endif
1961# if MM_RAM_FLAGS_CREFS_PHYSEXT != MM_RAM_FLAGS_CREFS_PHYSEXT
1962# error "MM_RAM_FLAGS_CREFS_PHYSEXT != MM_RAM_FLAGS_CREFS_PHYSEXT"
1963# endif
1964# if MM_RAM_FLAGS_IDX_SHIFT - 48 != PGMPOOL_TD_IDX_SHIFT
1965# error "MM_RAM_FLAGS_IDX_SHIFT - 48 != PGMPOOL_TD_IDX_SHIFT"
1966# endif
1967# if MM_RAM_FLAGS_IDX_MASK != PGMPOOL_TD_IDX_MASK
1968# error "MM_RAM_FLAGS_IDX_MASK != PGMPOOL_TD_IDX_MASK"
1969# endif
1970# if MM_RAM_FLAGS_IDX_OVERFLOWED != PGMPOOL_TD_IDX_OVERFLOWED
1971# error "MM_RAM_FLAGS_IDX_OVERFLOWED != PGMPOOL_TD_IDX_OVERFLOWED"
1972# endif
1973#endif
1974
1975
1976/**
1977 * Trees are using self relative offsets as pointers.
1978 * So, all its data, including the root pointer, must be in the heap for HC and GC
1979 * to have the same layout.
1980 */
1981typedef struct PGMTREES
1982{
1983 /** Physical access handlers (AVL range+offsetptr tree). */
1984 AVLROGCPHYSTREE PhysHandlers;
1985 /** Virtual access handlers (AVL range + GC ptr tree). */
1986 AVLROGCPTRTREE VirtHandlers;
1987 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
1988 AVLROGCPHYSTREE PhysToVirtHandlers;
1989 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
1990 AVLROGCPTRTREE HyperVirtHandlers;
1991} PGMTREES;
1992/** Pointer to PGM trees. */
1993typedef PGMTREES *PPGMTREES;
1994
1995
1996/** @name Paging mode macros
1997 * @{ */
1998#ifdef IN_RC
1999# define PGM_CTX(a,b) a##RC##b
2000# define PGM_CTX_STR(a,b) a "GC" b
2001# define PGM_CTX_DECL(type) VMMRCDECL(type)
2002#else
2003# ifdef IN_RING3
2004# define PGM_CTX(a,b) a##R3##b
2005# define PGM_CTX_STR(a,b) a "R3" b
2006# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2007# else
2008# define PGM_CTX(a,b) a##R0##b
2009# define PGM_CTX_STR(a,b) a "R0" b
2010# define PGM_CTX_DECL(type) VMMDECL(type)
2011# endif
2012#endif
2013
2014#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2015#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2016#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2017#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2018#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2019#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2020#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2021#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2022#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2023#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2024#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2025#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2026#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2027#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2028#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2029#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
2030#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2031
2032#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2033#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2034#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2035#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2036#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2037#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2038#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2039#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2040#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2041#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2042#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2043#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2044#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2045#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2046#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2047#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2048#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
2049
2050/* Shw_Gst */
2051#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2052#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2053#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2054#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2055#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2056#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2057#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2058#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2059#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2060#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2061#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2062#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2063#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2064#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2065#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2066#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2067#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2068#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2069#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2070
2071#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2072#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2073#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2074#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2075#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2076#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2077#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2078#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2079#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2080#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2081#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2082#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2083#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2084#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2085#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2086#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2087#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2088#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2089#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2090#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2091#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2092#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2093#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2094#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2095#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2096#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2097#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2098#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2099#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2100#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2101#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2102#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2103#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2104#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2105#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2106#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2107#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2108
2109#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2110#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
2111/** @} */
2112
2113/**
2114 * Data for each paging mode.
2115 */
2116typedef struct PGMMODEDATA
2117{
2118 /** The guest mode type. */
2119 uint32_t uGstType;
2120 /** The shadow mode type. */
2121 uint32_t uShwType;
2122
2123 /** @name Function pointers for Shadow paging.
2124 * @{
2125 */
2126 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCPTR offDelta));
2127 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
2128 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2129 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2130
2131 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2132 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2133
2134 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2135 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2136 /** @} */
2137
2138 /** @name Function pointers for Guest paging.
2139 * @{
2140 */
2141 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCPTR offDelta));
2142 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
2143 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2144 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2145 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2146#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2147 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2148 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
2149#endif
2150#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2151 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
2152 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
2153 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
2154 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
2155#endif
2156 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2157 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2158 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2159#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2160 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2161 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM));
2162#endif
2163#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2164 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3;
2165 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3;
2166#endif
2167 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2168 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2169 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2170#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2171 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2172 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
2173#endif
2174#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2175 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
2176 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
2177#endif
2178 /** @} */
2179
2180 /** @name Function pointers for Both Shadow and Guest paging.
2181 * @{
2182 */
2183 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCPTR offDelta));
2184 /* no pfnR3BthTrap0eHandler */
2185 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2186 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2187 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2188 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2189 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2190#ifdef VBOX_STRICT
2191 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2192#endif
2193 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2194 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM));
2195
2196 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2197 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2198 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2199 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2200 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2201 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2202#ifdef VBOX_STRICT
2203 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2204#endif
2205 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2206 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM));
2207
2208 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2209 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2210 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2211 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2212 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2213 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2214#ifdef VBOX_STRICT
2215 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2216#endif
2217 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2218 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM));
2219 /** @} */
2220} PGMMODEDATA, *PPGMMODEDATA;
2221
2222
2223
2224/**
2225 * Converts a PGM pointer into a VM pointer.
2226 * @returns Pointer to the VM structure the PGM is part of.
2227 * @param pPGM Pointer to PGM instance data.
2228 */
2229#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2230
2231/**
2232 * PGM Data (part of VM)
2233 */
2234typedef struct PGM
2235{
2236 /** Offset to the VM structure. */
2237 RTINT offVM;
2238 /** Offset of the PGMCPU structure relative to VMCPU. */
2239 int32_t offVCpu;
2240 /** Alignment padding. */
2241 int32_t i32Alignment;
2242
2243 /*
2244 * This will be redefined at least two more times before we're done, I'm sure.
2245 * The current code is only to get on with the coding.
2246 * - 2004-06-10: initial version, bird.
2247 * - 2004-07-02: 1st time, bird.
2248 * - 2004-10-18: 2nd time, bird.
2249 * - 2005-07-xx: 3rd time, bird.
2250 */
2251
2252 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2253 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2254 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2255 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2256
2257 /** The host paging mode. (This is what SUPLib reports.) */
2258 SUPPAGINGMODE enmHostMode;
2259 /** The shadow paging mode. */
2260 PGMMODE enmShadowMode;
2261 /** The guest paging mode. */
2262 PGMMODE enmGuestMode;
2263
2264 /** The current physical address representing in the guest CR3 register. */
2265 RTGCPHYS GCPhysCR3;
2266 /** Pointer to the 5 page CR3 content mapping.
2267 * The first page is always the CR3 (in some form) while the 4 other pages
2268 * are used of the PDs in PAE mode. */
2269 RTGCPTR GCPtrCR3Mapping;
2270#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2271 uint32_t u32Alignment;
2272#endif
2273#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2274 /** The physical address of the currently monitored guest CR3 page.
2275 * When this value is NIL_RTGCPHYS no page is being monitored. */
2276 RTGCPHYS GCPhysGstCR3Monitored;
2277#endif
2278 /** @name 32-bit Guest Paging.
2279 * @{ */
2280 /** The guest's page directory, R3 pointer. */
2281 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2282#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2283 /** The guest's page directory, R0 pointer. */
2284 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2285#endif
2286 /** The guest's page directory, static RC mapping. */
2287 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2288 /** @} */
2289
2290 /** @name PAE Guest Paging.
2291 * @{ */
2292 /** The guest's page directory pointer table, static RC mapping. */
2293 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2294 /** The guest's page directory pointer table, R3 pointer. */
2295 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2296#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2297 /** The guest's page directory pointer table, R0 pointer. */
2298 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2299#endif
2300
2301 /** The guest's page directories, R3 pointers.
2302 * These are individual pointers and don't have to be adjecent.
2303 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2304 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2305 /** The guest's page directories, R0 pointers.
2306 * Same restrictions as apGstPaePDsR3. */
2307#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2308 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2309#endif
2310 /** The guest's page directories, static GC mapping.
2311 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2312 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2313 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2314 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2315 RTGCPHYS aGCPhysGstPaePDs[4];
2316 /** The physical addresses of the monitored guest page directories (PAE). */
2317 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2318 /** @} */
2319
2320 /** @name AMD64 Guest Paging.
2321 * @{ */
2322 /** The guest's page directory pointer table, R3 pointer. */
2323 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
2324#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2325 /** The guest's page directory pointer table, R0 pointer. */
2326 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
2327#endif
2328 /** @} */
2329
2330 /** @name Shadow paging
2331 * @{ */
2332 /** The root page table - R3 Ptr. */
2333 R3PTRTYPE(void *) pShwRootR3;
2334# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2335 /** The root page table - R0 Ptr. */
2336 R0PTRTYPE(void *) pShwRootR0;
2337# endif
2338 /** The root page table - RC Ptr. */
2339 RCPTRTYPE(void *) pShwRootRC;
2340# if HC_ARCH_BITS == 64
2341 uint32_t u32Padding1; /**< alignment padding. */
2342# endif
2343 /** The Physical Address (HC) of the current active shadow CR3. */
2344 RTHCPHYS HCPhysShwCR3;
2345 /** Pointer to the page of the current active CR3 - R3 Ptr. */
2346 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
2347 /** Pointer to the page of the current active CR3 - R0 Ptr. */
2348 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
2349 /** Pointer to the page of the current active CR3 - RC Ptr. */
2350 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
2351 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
2352 uint32_t iShwUser;
2353 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
2354 uint32_t iShwUserTable;
2355# if HC_ARCH_BITS == 64
2356 RTRCPTR alignment6; /**< structure size alignment. */
2357# endif
2358 /** @} */
2359#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2360 /** @name 32-bit Shadow Paging
2361 * @{ */
2362 /** The 32-Bit PD - R3 Ptr. */
2363 R3PTRTYPE(PX86PD) pShw32BitPdR3;
2364 /** The 32-Bit PD - R0 Ptr. */
2365 R0PTRTYPE(PX86PD) pShw32BitPdR0;
2366 /** The 32-Bit PD - RC Ptr. */
2367 RCPTRTYPE(PX86PD) pShw32BitPdRC;
2368# if HC_ARCH_BITS == 64
2369 uint32_t u32Padding10; /**< alignment padding. */
2370# endif
2371 /** The Physical Address (HC) of the 32-Bit PD. */
2372 RTHCPHYS HCPhysShw32BitPD;
2373 /** @} */
2374
2375 /** @name PAE Shadow Paging
2376 * @{ */
2377 /** The four PDs for the low 4GB - R3 Ptr.
2378 * Even though these are 4 pointers, what they point at is a single table.
2379 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
2380 R3PTRTYPE(PX86PDPAE) apShwPaePDsR3[4];
2381# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2382 /** The four PDs for the low 4GB - R0 Ptr.
2383 * Same kind of mapping as apHCPaePDs. */
2384 R0PTRTYPE(PX86PDPAE) apShwPaePDsR0[4];
2385# endif
2386 /** The four PDs for the low 4GB - RC Ptr.
2387 * Same kind of mapping as apHCPaePDs. */
2388 RCPTRTYPE(PX86PDPAE) apShwPaePDsRC[4];
2389 /** The Physical Address (HC) of the four PDs for the low 4GB.
2390 * These are *NOT* 4 contiguous pages. */
2391 RTHCPHYS aHCPhysPaePDs[4];
2392 /** The Physical Address (HC) of the PAE PDPT. */
2393 RTHCPHYS HCPhysShwPaePdpt;
2394 /** The PAE PDPT - R3 Ptr. */
2395 R3PTRTYPE(PX86PDPT) pShwPaePdptR3;
2396 /** The PAE PDPT - R0 Ptr. */
2397 R0PTRTYPE(PX86PDPT) pShwPaePdptR0;
2398 /** The PAE PDPT - RC Ptr. */
2399 RCPTRTYPE(PX86PDPT) pShwPaePdptRC;
2400 /** @} */
2401# if HC_ARCH_BITS == 64
2402 RTRCPTR alignment5; /**< structure size alignment. */
2403# endif
2404#endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
2405 /** @name Nested Shadow Paging
2406 * @{ */
2407 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R3 pointer. */
2408 RTR3PTR pShwNestedRootR3;
2409# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2410 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R0 pointer. */
2411 RTR0PTR pShwNestedRootR0;
2412# endif
2413 /** The Physical Address (HC) of the nested paging root. */
2414 RTHCPHYS HCPhysShwNestedRoot;
2415 /** @} */
2416
2417 /** @name Function pointers for Shadow paging.
2418 * @{
2419 */
2420 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCPTR offDelta));
2421 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
2422 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2423 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2424
2425 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2426 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2427
2428 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2429 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2430
2431 /** @} */
2432
2433 /** @name Function pointers for Guest paging.
2434 * @{
2435 */
2436 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCPTR offDelta));
2437 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
2438 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2439 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2440 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2441#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2442 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2443 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
2444#endif
2445#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2446 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
2447 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
2448 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
2449 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
2450#endif
2451 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2452 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2453 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2454#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2455 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2456 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM));
2457#endif
2458#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2459 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3;
2460 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3;
2461#endif
2462#if HC_ARCH_BITS == 64
2463 RTRCPTR alignment3; /**< structure size alignment. */
2464#endif
2465
2466 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2467 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2468 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2469#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2470 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2471 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
2472#endif
2473#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
2474 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
2475 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
2476#endif
2477 /** @} */
2478
2479 /** @name Function pointers for Both Shadow and Guest paging.
2480 * @{
2481 */
2482 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCPTR offDelta));
2483 /* no pfnR3BthTrap0eHandler */
2484 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2485 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2486 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2487 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2488 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2489 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2490 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2491 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM));
2492
2493 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2494 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2495 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2496 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2497 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2498 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2499 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2500 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2501 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM));
2502
2503 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2504 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2505 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2506 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2507 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2508 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2509 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2510 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2511 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM));
2512#if HC_ARCH_BITS == 64
2513 RTRCPTR alignment2; /**< structure size alignment. */
2514#endif
2515 /** @} */
2516
2517 /** Pointer to SHW+GST mode data (function pointers).
2518 * The index into this table is made up from */
2519 R3PTRTYPE(PPGMMODEDATA) paModeData;
2520
2521 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2522 * This is sorted by physical address and contains no overlapping ranges. */
2523 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2524 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2525 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2526 /** RC pointer corresponding to PGM::pRamRangesR3. */
2527 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2528 /** The configured RAM size. */
2529 RTUINT cbRamSize;
2530
2531 /** Pointer to the list of ROM ranges - for R3.
2532 * This is sorted by physical address and contains no overlapping ranges. */
2533 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2534 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2535 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2536 /** RC pointer corresponding to PGM::pRomRangesR3. */
2537 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2538 /** Alignment padding. */
2539 RTRCPTR GCPtrPadding2;
2540
2541 /** Pointer to the list of MMIO2 ranges - for R3.
2542 * Registration order. */
2543 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2544
2545 /** PGM offset based trees - R3 Ptr. */
2546 R3PTRTYPE(PPGMTREES) pTreesR3;
2547 /** PGM offset based trees - R0 Ptr. */
2548 R0PTRTYPE(PPGMTREES) pTreesR0;
2549 /** PGM offset based trees - RC Ptr. */
2550 RCPTRTYPE(PPGMTREES) pTreesRC;
2551
2552 /** Linked list of GC mappings - for RC.
2553 * The list is sorted ascending on address.
2554 */
2555 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2556 /** Linked list of GC mappings - for HC.
2557 * The list is sorted ascending on address.
2558 */
2559 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2560 /** Linked list of GC mappings - for R0.
2561 * The list is sorted ascending on address.
2562 */
2563 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2564
2565 /** Indicates that PGMR3FinalizeMappings has been called and that further
2566 * PGMR3MapIntermediate calls will be rejected. */
2567 bool fFinalizedMappings;
2568 /** If set no conflict checks are required. (boolean) */
2569 bool fMappingsFixed;
2570 /** If set, then no mappings are put into the shadow page table. (boolean) */
2571 bool fDisableMappings;
2572 /** Size of fixed mapping */
2573 uint32_t cbMappingFixed;
2574 /** Base address (GC) of fixed mapping */
2575 RTGCPTR GCPtrMappingFixed;
2576#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2577 uint32_t u32Padding0; /**< alignment padding. */
2578#endif
2579
2580
2581 /** @name Intermediate Context
2582 * @{ */
2583 /** Pointer to the intermediate page directory - Normal. */
2584 R3PTRTYPE(PX86PD) pInterPD;
2585 /** Pointer to the intermedate page tables - Normal.
2586 * There are two page tables, one for the identity mapping and one for
2587 * the host context mapping (of the core code). */
2588 R3PTRTYPE(PX86PT) apInterPTs[2];
2589 /** Pointer to the intermedate page tables - PAE. */
2590 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2591 /** Pointer to the intermedate page directory - PAE. */
2592 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2593 /** Pointer to the intermedate page directory - PAE. */
2594 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2595 /** Pointer to the intermedate page-map level 4 - AMD64. */
2596 R3PTRTYPE(PX86PML4) pInterPaePML4;
2597 /** Pointer to the intermedate page directory - AMD64. */
2598 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2599 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2600 RTHCPHYS HCPhysInterPD;
2601 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2602 RTHCPHYS HCPhysInterPaePDPT;
2603 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2604 RTHCPHYS HCPhysInterPaePML4;
2605 /** @} */
2606
2607 /** Base address of the dynamic page mapping area.
2608 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2609 */
2610 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2611 /** The index of the last entry used in the dynamic page mapping area. */
2612 RTUINT iDynPageMapLast;
2613 /** Cache containing the last entries in the dynamic page mapping area.
2614 * The cache size is covering half of the mapping area. */
2615 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2616
2617 /** The address of the ring-0 mapping cache if we're making use of it. */
2618 RTR0PTR pvR0DynMapUsed;
2619#if HC_ARCH_BITS == 32
2620 RTR0PTR R0PtrPadding0; /**< Alignment. */
2621#endif
2622
2623
2624 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 */
2625 RTGCPHYS GCPhys4MBPSEMask;
2626
2627 /** A20 gate mask.
2628 * Our current approach to A20 emulation is to let REM do it and don't bother
2629 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2630 * But whould need arrise, we'll subject physical addresses to this mask. */
2631 RTGCPHYS GCPhysA20Mask;
2632 /** A20 gate state - boolean! */
2633 RTUINT fA20Enabled;
2634
2635 /** What needs syncing (PGM_SYNC_*).
2636 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2637 * PGMFlushTLB, and PGMR3Load. */
2638 RTUINT fSyncFlags;
2639
2640 /** PGM critical section.
2641 * This protects the physical & virtual access handlers, ram ranges,
2642 * and the page flag updating (some of it anyway).
2643 */
2644 PDMCRITSECT CritSect;
2645
2646 /** Shadow Page Pool - R3 Ptr. */
2647 R3PTRTYPE(PPGMPOOL) pPoolR3;
2648 /** Shadow Page Pool - R0 Ptr. */
2649 R0PTRTYPE(PPGMPOOL) pPoolR0;
2650 /** Shadow Page Pool - RC Ptr. */
2651 RCPTRTYPE(PPGMPOOL) pPoolRC;
2652
2653 /** We're not in a state which permits writes to guest memory.
2654 * (Only used in strict builds.) */
2655 bool fNoMorePhysWrites;
2656
2657 /** Flush the cache on the next access. */
2658 bool fPhysCacheFlushPending;
2659/** @todo r=bird: Fix member names!*/
2660 /** PGMPhysRead cache */
2661 PGMPHYSCACHE pgmphysreadcache;
2662 /** PGMPhysWrite cache */
2663 PGMPHYSCACHE pgmphyswritecache;
2664
2665 /**
2666 * Data associated with managing the ring-3 mappings of the allocation chunks.
2667 */
2668 struct
2669 {
2670 /** The chunk tree, ordered by chunk id. */
2671#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2672 R3PTRTYPE(PAVLU32NODECORE) pTree;
2673#else
2674 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2675#endif
2676 /** The chunk mapping TLB. */
2677 PGMCHUNKR3MAPTLB Tlb;
2678 /** The number of mapped chunks. */
2679 uint32_t c;
2680 /** The maximum number of mapped chunks.
2681 * @cfgm PGM/MaxRing3Chunks */
2682 uint32_t cMax;
2683 /** The chunk age tree, ordered by ageing sequence number. */
2684 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2685 /** The current time. */
2686 uint32_t iNow;
2687 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2688 uint32_t AgeingCountdown;
2689 } ChunkR3Map;
2690
2691 /**
2692 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2693 */
2694 PGMPAGER3MAPTLB PhysTlbHC;
2695
2696 /** @name The zero page.
2697 * @{ */
2698 /** The host physical address of the zero page. */
2699 RTHCPHYS HCPhysZeroPg;
2700 /** The ring-3 mapping of the zero page. */
2701 RTR3PTR pvZeroPgR3;
2702 /** The ring-0 mapping of the zero page. */
2703 RTR0PTR pvZeroPgR0;
2704 /** The GC mapping of the zero page. */
2705 RTGCPTR pvZeroPgGC;
2706#if GC_ARCH_BITS != 32
2707 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2708#endif
2709 /** @}*/
2710
2711 /** The number of handy pages. */
2712 uint32_t cHandyPages;
2713 /**
2714 * Array of handy pages.
2715 *
2716 * This array is used in a two way communication between pgmPhysAllocPage
2717 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2718 * an intermediary.
2719 *
2720 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2721 * (The current size of 32 pages, means 128 KB of handy memory.)
2722 */
2723 GMMPAGEDESC aHandyPages[32];
2724
2725 /** @name Release Statistics
2726 * @{ */
2727 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */
2728 uint32_t cPrivatePages; /**< The number of private pages. */
2729 uint32_t cSharedPages; /**< The number of shared pages. */
2730 uint32_t cZeroPages; /**< The number of zero backed pages. */
2731 /** The number of times the guest has switched mode since last reset or statistics reset. */
2732 STAMCOUNTER cGuestModeChanges;
2733 /** The number of times we were forced to change the hypervisor region location. */
2734 STAMCOUNTER cRelocations;
2735 /** @} */
2736
2737#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2738 /** RC: Which statistic this \#PF should be attributed to. */
2739 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
2740 RTRCPTR padding0;
2741 /** R0: Which statistic this \#PF should be attributed to. */
2742 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
2743 RTR0PTR padding1;
2744
2745 /* Common */
2746# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2747 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2748 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2749 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2750 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2751 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2752 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2753# endif
2754 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
2755 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
2756
2757 /* R3 only: */
2758 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2759 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2760 STAMCOUNTER StatR3GuestPDWrite; /**< R3: The total number of times pgmHCGuestPDWriteHandler() was called. */
2761 STAMCOUNTER StatR3GuestPDWriteConflict; /**< R3: The number of times GuestPDWriteContlict() detected a conflict. */
2762 STAMCOUNTER StatR3DynRamTotal; /**< R3: Allocated MBs of guest ram */
2763 STAMCOUNTER StatR3DynRamGrow; /**< R3: Nr of pgmr3PhysGrowRange calls. */
2764
2765 /* R0 only: */
2766 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
2767 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
2768 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
2769 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2770 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
2771 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
2772 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
2773 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
2774 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2775 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
2776 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
2777 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
2778 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
2779 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
2780 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
2781 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
2782 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
2783 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
2784 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
2785 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
2786 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
2787 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
2788 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
2789 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
2790 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
2791 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
2792
2793 /* RC only: */
2794 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache hits */
2795 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache misses */
2796 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2797 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2798
2799 /* RZ only: */
2800 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
2801 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
2802 STAMPROFILE StatRZTrap0eTimeSyncPT;
2803 STAMPROFILE StatRZTrap0eTimeMapping;
2804 STAMPROFILE StatRZTrap0eTimeOutOfSync;
2805 STAMPROFILE StatRZTrap0eTimeHandlers;
2806 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
2807 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
2808 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
2809 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
2810 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
2811 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
2812 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
2813 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
2814 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
2815 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
2816 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
2817 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
2818 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
2819 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
2820 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
2821 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
2822 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
2823 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
2824 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
2825 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
2826 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
2827 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: #PF err kind */
2828 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: #PF err kind */
2829 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: #PF err kind */
2830 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: #PF err kind */
2831 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: #PF err kind */
2832 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: #PF err kind */
2833 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: #PF err kind */
2834 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: #PF err kind */
2835 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: #PF err kind */
2836 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: #PF err kind */
2837 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: #PF err kind */
2838 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest #PFs. */
2839 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest #PF ending up at the end of the #PF code. */
2840 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest #PF to HMA or other mapping. */
2841 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
2842 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
2843 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the #PFs. */
2844 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
2845 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
2846 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
2847 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
2848 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
2849
2850 /* HC - R3 and (maybe) R0: */
2851
2852 /* RZ & R3: */
2853 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
2854 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
2855 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2856 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2857 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
2858 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
2859 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
2860 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
2861 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
2862 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
2863 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
2864 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
2865 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
2866 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
2867 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
2868 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
2869 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2870 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
2871 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
2872 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
2873 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2874 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2875 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
2876 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
2877 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
2878 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
2879 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
2880 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
2881 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
2882 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
2883 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
2884 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2885 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2886 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
2887 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2888 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2889 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2890 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2891 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2892 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2893 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
2894 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2895 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2896 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2897 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2898 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2899 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2900/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2901 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
2902 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2903 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2904 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2905 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2906 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
2907
2908 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
2909 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
2910 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2911 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2912 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
2913 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
2914 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
2915 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
2916 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
2917 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
2918 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
2919 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
2920 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
2921 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
2922 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
2923 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
2924 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2925 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
2926 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
2927 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
2928 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2929 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2930 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
2931 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
2932 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
2933 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
2934 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
2935 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
2936 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
2937 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
2938 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
2939 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2940 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
2941 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2942 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2943 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2944 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2945 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2946 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2947 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2948 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
2949 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2950 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2951 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2952 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2953 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2954 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2955/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2956 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
2957 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2958 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2959 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2960 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2961 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
2962#endif /* VBOX_WITH_STATISTICS */
2963} PGM;
2964/** Pointer to the PGM instance data. */
2965typedef PGM *PPGM;
2966
2967
2968/**
2969 * PGMCPU Data (part of VMCPU).
2970 */
2971typedef struct PGMCPU
2972{
2973 /** Offset to the VMCPU structure. */
2974 RTINT offVMCPU;
2975 /** Automatically tracked physical memory mapping set.
2976 * Ring-0 and strict raw-mode builds. */
2977 PGMMAPSET AutoSet;
2978} PGMCPU;
2979/** Pointer to the per-cpu PGM data. */
2980typedef PGMCPU *PPGMCPU;
2981
2982
2983/** @name PGM::fSyncFlags Flags
2984 * @{
2985 */
2986/** Updates the virtual access handler state bit in PGMPAGE. */
2987#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
2988/** Always sync CR3. */
2989#define PGM_SYNC_ALWAYS RT_BIT(1)
2990/** Check monitoring on next CR3 (re)load and invalidate page. */
2991#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
2992/** Check guest mapping in SyncCR3. */
2993#define PGM_SYNC_MAP_CR3 RT_BIT(3)
2994/** Clear the page pool (a light weight flush). */
2995#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(8)
2996/** @} */
2997
2998
2999__BEGIN_DECLS
3000
3001int pgmLock(PVM pVM);
3002void pgmUnlock(PVM pVM);
3003
3004VMMRCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3005VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3006
3007int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3008int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3009PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3010void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping);
3011DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3012
3013void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3014bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3015int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3016DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3017#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3018void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3019#else
3020# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3021#endif
3022DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3023
3024
3025void pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3026int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3027int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3028int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv);
3029#ifdef IN_RING3
3030int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3031int pgmR3PhysRamReset(PVM pVM);
3032int pgmR3PhysRomReset(PVM pVM);
3033# ifndef VBOX_WITH_NEW_PHYS_CODE
3034int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
3035# endif
3036
3037int pgmR3PoolInit(PVM pVM);
3038void pgmR3PoolRelocate(PVM pVM);
3039void pgmR3PoolReset(PVM pVM);
3040
3041#endif /* IN_RING3 */
3042#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3043int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3044#endif
3045#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3046void *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage);
3047#endif
3048int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage);
3049PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
3050void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3051void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3052int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3053void pgmPoolFlushAll(PVM pVM);
3054void pgmPoolClearAll(PVM pVM);
3055int pgmPoolSyncCR3(PVM pVM);
3056void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
3057void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
3058int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
3059PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
3060void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
3061void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
3062uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
3063void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
3064#ifdef PGMPOOL_WITH_MONITORING
3065void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu);
3066int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3067void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3068void pgmPoolMonitorModifiedClearAll(PVM pVM);
3069int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
3070int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
3071#endif
3072
3073#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
3074void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE);
3075void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3076int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3077#endif
3078int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3079int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3080
3081#ifndef IN_RC
3082int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3083#endif
3084int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3085
3086__END_DECLS
3087
3088
3089/**
3090 * Gets the PGMRAMRANGE structure for a guest page.
3091 *
3092 * @returns Pointer to the RAM range on success.
3093 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3094 *
3095 * @param pPGM PGM handle.
3096 * @param GCPhys The GC physical address.
3097 */
3098DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
3099{
3100 /*
3101 * Optimize for the first range.
3102 */
3103 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3104 RTGCPHYS off = GCPhys - pRam->GCPhys;
3105 if (RT_UNLIKELY(off >= pRam->cb))
3106 {
3107 do
3108 {
3109 pRam = pRam->CTX_SUFF(pNext);
3110 if (RT_UNLIKELY(!pRam))
3111 break;
3112 off = GCPhys - pRam->GCPhys;
3113 } while (off >= pRam->cb);
3114 }
3115 return pRam;
3116}
3117
3118
3119/**
3120 * Gets the PGMPAGE structure for a guest page.
3121 *
3122 * @returns Pointer to the page on success.
3123 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3124 *
3125 * @param pPGM PGM handle.
3126 * @param GCPhys The GC physical address.
3127 */
3128DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
3129{
3130 /*
3131 * Optimize for the first range.
3132 */
3133 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3134 RTGCPHYS off = GCPhys - pRam->GCPhys;
3135 if (RT_UNLIKELY(off >= pRam->cb))
3136 {
3137 do
3138 {
3139 pRam = pRam->CTX_SUFF(pNext);
3140 if (RT_UNLIKELY(!pRam))
3141 return NULL;
3142 off = GCPhys - pRam->GCPhys;
3143 } while (off >= pRam->cb);
3144 }
3145 return &pRam->aPages[off >> PAGE_SHIFT];
3146}
3147
3148
3149/**
3150 * Gets the PGMPAGE structure for a guest page.
3151 *
3152 * Old Phys code: Will make sure the page is present.
3153 *
3154 * @returns VBox status code.
3155 * @retval VINF_SUCCESS and a valid *ppPage on success.
3156 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3157 *
3158 * @param pPGM PGM handle.
3159 * @param GCPhys The GC physical address.
3160 * @param ppPage Where to store the page poitner on success.
3161 */
3162DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
3163{
3164 /*
3165 * Optimize for the first range.
3166 */
3167 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3168 RTGCPHYS off = GCPhys - pRam->GCPhys;
3169 if (RT_UNLIKELY(off >= pRam->cb))
3170 {
3171 do
3172 {
3173 pRam = pRam->CTX_SUFF(pNext);
3174 if (RT_UNLIKELY(!pRam))
3175 {
3176 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
3177 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3178 }
3179 off = GCPhys - pRam->GCPhys;
3180 } while (off >= pRam->cb);
3181 }
3182 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3183#ifndef VBOX_WITH_NEW_PHYS_CODE
3184
3185 /*
3186 * Make sure it's present.
3187 */
3188 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3189 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3190 {
3191#ifdef IN_RING3
3192 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3193#else
3194 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3195#endif
3196 if (RT_FAILURE(rc))
3197 {
3198 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
3199 return rc;
3200 }
3201 Assert(rc == VINF_SUCCESS);
3202 }
3203#endif
3204 return VINF_SUCCESS;
3205}
3206
3207
3208
3209
3210/**
3211 * Gets the PGMPAGE structure for a guest page.
3212 *
3213 * Old Phys code: Will make sure the page is present.
3214 *
3215 * @returns VBox status code.
3216 * @retval VINF_SUCCESS and a valid *ppPage on success.
3217 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3218 *
3219 * @param pPGM PGM handle.
3220 * @param GCPhys The GC physical address.
3221 * @param ppPage Where to store the page poitner on success.
3222 * @param ppRamHint Where to read and store the ram list hint.
3223 * The caller initializes this to NULL before the call.
3224 */
3225DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
3226{
3227 RTGCPHYS off;
3228 PPGMRAMRANGE pRam = *ppRamHint;
3229 if ( !pRam
3230 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
3231 {
3232 pRam = pPGM->CTX_SUFF(pRamRanges);
3233 off = GCPhys - pRam->GCPhys;
3234 if (RT_UNLIKELY(off >= pRam->cb))
3235 {
3236 do
3237 {
3238 pRam = pRam->CTX_SUFF(pNext);
3239 if (RT_UNLIKELY(!pRam))
3240 {
3241 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
3242 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3243 }
3244 off = GCPhys - pRam->GCPhys;
3245 } while (off >= pRam->cb);
3246 }
3247 *ppRamHint = pRam;
3248 }
3249 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3250#ifndef VBOX_WITH_NEW_PHYS_CODE
3251
3252 /*
3253 * Make sure it's present.
3254 */
3255 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3256 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3257 {
3258#ifdef IN_RING3
3259 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3260#else
3261 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3262#endif
3263 if (RT_FAILURE(rc))
3264 {
3265 *ppPage = NULL; /* Shut up annoying smart ass. */
3266 return rc;
3267 }
3268 Assert(rc == VINF_SUCCESS);
3269 }
3270#endif
3271 return VINF_SUCCESS;
3272}
3273
3274
3275/**
3276 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3277 *
3278 * @returns Pointer to the page on success.
3279 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3280 *
3281 * @param pPGM PGM handle.
3282 * @param GCPhys The GC physical address.
3283 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
3284 */
3285DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
3286{
3287 /*
3288 * Optimize for the first range.
3289 */
3290 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3291 RTGCPHYS off = GCPhys - pRam->GCPhys;
3292 if (RT_UNLIKELY(off >= pRam->cb))
3293 {
3294 do
3295 {
3296 pRam = pRam->CTX_SUFF(pNext);
3297 if (RT_UNLIKELY(!pRam))
3298 return NULL;
3299 off = GCPhys - pRam->GCPhys;
3300 } while (off >= pRam->cb);
3301 }
3302 *ppRam = pRam;
3303 return &pRam->aPages[off >> PAGE_SHIFT];
3304}
3305
3306
3307/**
3308 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3309 *
3310 * @returns Pointer to the page on success.
3311 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3312 *
3313 * @param pPGM PGM handle.
3314 * @param GCPhys The GC physical address.
3315 * @param ppPage Where to store the pointer to the PGMPAGE structure.
3316 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
3317 */
3318DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
3319{
3320 /*
3321 * Optimize for the first range.
3322 */
3323 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3324 RTGCPHYS off = GCPhys - pRam->GCPhys;
3325 if (RT_UNLIKELY(off >= pRam->cb))
3326 {
3327 do
3328 {
3329 pRam = pRam->CTX_SUFF(pNext);
3330 if (RT_UNLIKELY(!pRam))
3331 {
3332 *ppRam = NULL; /* Shut up silly GCC warnings. */
3333 *ppPage = NULL; /* ditto */
3334 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3335 }
3336 off = GCPhys - pRam->GCPhys;
3337 } while (off >= pRam->cb);
3338 }
3339 *ppRam = pRam;
3340 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3341#ifndef VBOX_WITH_NEW_PHYS_CODE
3342
3343 /*
3344 * Make sure it's present.
3345 */
3346 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3347 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3348 {
3349#ifdef IN_RING3
3350 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3351#else
3352 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3353#endif
3354 if (RT_FAILURE(rc))
3355 {
3356 *ppPage = NULL; /* Shut up silly GCC warnings. */
3357 *ppPage = NULL; /* ditto */
3358 return rc;
3359 }
3360 Assert(rc == VINF_SUCCESS);
3361
3362 }
3363#endif
3364 return VINF_SUCCESS;
3365}
3366
3367
3368/**
3369 * Convert GC Phys to HC Phys.
3370 *
3371 * @returns VBox status.
3372 * @param pPGM PGM handle.
3373 * @param GCPhys The GC physical address.
3374 * @param pHCPhys Where to store the corresponding HC physical address.
3375 *
3376 * @deprecated Doesn't deal with zero, shared or write monitored pages.
3377 * Avoid when writing new code!
3378 */
3379DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
3380{
3381 PPGMPAGE pPage;
3382 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3383 if (RT_FAILURE(rc))
3384 return rc;
3385 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
3386 return VINF_SUCCESS;
3387}
3388
3389#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3390
3391/**
3392 * Inlined version of the ring-0 version of PGMDynMapHCPage that
3393 * optimizes access to pages already in the set.
3394 *
3395 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
3396 * @param pPGM Pointer to the PVM instance data.
3397 * @param HCPhys The physical address of the page.
3398 * @param ppv Where to store the mapping address.
3399 */
3400DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
3401{
3402 STAM_PROFILE_START(&pPGM->StatR0DynMapHCPageInl, a);
3403 PPGMMAPSET pSet = &((PPGMCPU)((uint8_t *)VMMGetCpu(PGM2VM(pPGM)) + pPGM->offVCpu))->AutoSet; /* very pretty ;-) */
3404 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3405 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3406
3407 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3408 unsigned iEntry = pSet->aiHashTable[iHash];
3409 if ( iEntry < pSet->cEntries
3410 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3411 {
3412 *ppv = pSet->aEntries[iEntry].pvPage;
3413 STAM_COUNTER_INC(&pPGM->StatR0DynMapHCPageInlHits);
3414 }
3415 else
3416 {
3417 STAM_COUNTER_INC(&pPGM->StatR0DynMapHCPageInlMisses);
3418 pgmR0DynMapHCPageCommon(PGM2VM(pPGM), pSet, HCPhys, ppv);
3419 }
3420
3421 STAM_PROFILE_STOP(&pPGM->StatR0DynMapHCPageInl, a);
3422 return VINF_SUCCESS;
3423}
3424
3425
3426/**
3427 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
3428 * access to pages already in the set.
3429 *
3430 * @returns See PGMDynMapGCPage.
3431 * @param pPGM Pointer to the PVM instance data.
3432 * @param HCPhys The physical address of the page.
3433 * @param ppv Where to store the mapping address.
3434 */
3435DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
3436{
3437 STAM_PROFILE_START(&pPGM->StatR0DynMapGCPageInl, a);
3438 Assert(!(GCPhys & PAGE_OFFSET_MASK));
3439
3440 /*
3441 * Get the ram range.
3442 */
3443 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3444 RTGCPHYS off = GCPhys - pRam->GCPhys;
3445 if (RT_UNLIKELY(off >= pRam->cb
3446 /** @todo || page state stuff */))
3447 {
3448 /* This case is not counted into StatR0DynMapGCPageInl. */
3449 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamMisses);
3450 return PGMDynMapGCPage(PGM2VM(pPGM), GCPhys, ppv);
3451 }
3452
3453 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
3454 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamHits);
3455
3456 /*
3457 * pgmR0DynMapHCPageInlined with out stats.
3458 */
3459 PPGMMAPSET pSet = &((PPGMCPU)((uint8_t *)VMMGetCpu(PGM2VM(pPGM)) + pPGM->offVCpu))->AutoSet; /* very pretty ;-) */
3460 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3461 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3462
3463 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3464 unsigned iEntry = pSet->aiHashTable[iHash];
3465 if ( iEntry < pSet->cEntries
3466 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3467 {
3468 *ppv = pSet->aEntries[iEntry].pvPage;
3469 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlHits);
3470 }
3471 else
3472 {
3473 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlMisses);
3474 pgmR0DynMapHCPageCommon(PGM2VM(pPGM), pSet, HCPhys, ppv);
3475 }
3476
3477 STAM_PROFILE_STOP(&pPGM->StatR0DynMapGCPageInl, a);
3478 return VINF_SUCCESS;
3479}
3480
3481#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3482
3483#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3484/**
3485 * Maps the page into current context (RC and maybe R0).
3486 *
3487 * @returns pointer to the mapping.
3488 * @param pVM Pointer to the PGM instance data.
3489 * @param pPage The page.
3490 */
3491DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
3492{
3493 if (pPage->idx >= PGMPOOL_IDX_FIRST)
3494 {
3495 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
3496 void *pv;
3497# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3498 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
3499# else
3500 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
3501# endif
3502 return pv;
3503 }
3504 return pgmPoolMapPageFallback(pPGM, pPage);
3505}
3506
3507/**
3508 * Temporarily maps one host page specified by HC physical address, returning
3509 * pointer within the page.
3510 *
3511 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
3512 * reused after 8 mappings (or perhaps a few more if you score with the cache).
3513 *
3514 * @returns The address corresponding to HCPhys.
3515 * @param pPGM Pointer to the PVM instance data.
3516 * @param HCPhys HC Physical address of the page.
3517 */
3518DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
3519{
3520 void *pv;
3521# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3522 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3523# else
3524 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3525# endif
3526 pv = (void *)((uintptr_t)pv | (HCPhys & PAGE_OFFSET_MASK));
3527 return pv;
3528}
3529#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
3530
3531
3532#ifndef IN_RC
3533/**
3534 * Queries the Physical TLB entry for a physical guest page,
3535 * attemting to load the TLB entry if necessary.
3536 *
3537 * @returns VBox status code.
3538 * @retval VINF_SUCCESS on success
3539 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3540 * @param pPGM The PGM instance handle.
3541 * @param GCPhys The address of the guest page.
3542 * @param ppTlbe Where to store the pointer to the TLB entry.
3543 */
3544
3545DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3546{
3547 int rc;
3548 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3549 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3550 {
3551 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
3552 rc = VINF_SUCCESS;
3553 }
3554 else
3555 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
3556 *ppTlbe = pTlbe;
3557 return rc;
3558}
3559#endif /* !IN_RC */
3560
3561#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3562
3563# ifndef VBOX_WITH_NEW_PHYS_CODE
3564/**
3565 * Convert GC Phys to HC Virt.
3566 *
3567 * @returns VBox status.
3568 * @param pPGM PGM handle.
3569 * @param GCPhys The GC physical address.
3570 * @param pHCPtr Where to store the corresponding HC virtual address.
3571 *
3572 * @deprecated This will be eliminated by PGMPhysGCPhys2CCPtr. Only user is
3573 * pgmPoolMonitorGCPtr2CCPtr.
3574 */
3575DECLINLINE(int) pgmRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
3576{
3577 PPGMRAMRANGE pRam;
3578 PPGMPAGE pPage;
3579 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3580 if (RT_FAILURE(rc))
3581 {
3582 *pHCPtr = 0; /* Shut up silly GCC warnings. */
3583 return rc;
3584 }
3585 RTGCPHYS off = GCPhys - pRam->GCPhys;
3586
3587 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3588 {
3589 unsigned iChunk = off >> PGM_DYNAMIC_CHUNK_SHIFT;
3590 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3591 return VINF_SUCCESS;
3592 }
3593 if (pRam->pvR3)
3594 {
3595 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off);
3596 return VINF_SUCCESS;
3597 }
3598 *pHCPtr = 0; /* Shut up silly GCC warnings. */
3599 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3600}
3601# endif /* !VBOX_WITH_NEW_PHYS_CODE */
3602#endif /* !IN_RC && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) */
3603
3604/**
3605 * Convert GC Phys to HC Virt and HC Phys.
3606 *
3607 * @returns VBox status.
3608 * @param pPGM PGM handle.
3609 * @param GCPhys The GC physical address.
3610 * @param pHCPtr Where to store the corresponding HC virtual address.
3611 * @param pHCPhys Where to store the HC Physical address and its flags.
3612 *
3613 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
3614 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
3615 */
3616DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
3617{
3618 PPGMRAMRANGE pRam;
3619 PPGMPAGE pPage;
3620 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3621 if (RT_FAILURE(rc))
3622 {
3623 *pHCPtr = 0; /* Shut up crappy GCC warnings */
3624 *pHCPhys = 0; /* ditto */
3625 return rc;
3626 }
3627 RTGCPHYS off = GCPhys - pRam->GCPhys;
3628
3629 *pHCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
3630 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3631 {
3632 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3633#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES only MapCR3 usage. */
3634 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(PGM2VM(pPGM), pRam->paChunkR3Ptrs);
3635 *pHCPtr = (RTHCPTR)(paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3636#else
3637 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3638#endif
3639 return VINF_SUCCESS;
3640 }
3641 if (pRam->pvR3)
3642 {
3643 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off);
3644 return VINF_SUCCESS;
3645 }
3646 *pHCPtr = 0;
3647 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3648}
3649
3650
3651/**
3652 * Clears flags associated with a RAM address.
3653 *
3654 * @returns VBox status code.
3655 * @param pPGM PGM handle.
3656 * @param GCPhys Guest context physical address.
3657 * @param fFlags fFlags to clear. (Bits 0-11.)
3658 */
3659DECLINLINE(int) pgmRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3660{
3661 PPGMPAGE pPage;
3662 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3663 if (RT_FAILURE(rc))
3664 return rc;
3665
3666 fFlags &= ~X86_PTE_PAE_PG_MASK;
3667 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3668 return VINF_SUCCESS;
3669}
3670
3671
3672/**
3673 * Clears flags associated with a RAM address.
3674 *
3675 * @returns VBox status code.
3676 * @param pPGM PGM handle.
3677 * @param GCPhys Guest context physical address.
3678 * @param fFlags fFlags to clear. (Bits 0-11.)
3679 * @param ppRamHint Where to read and store the ram list hint.
3680 * The caller initializes this to NULL before the call.
3681 */
3682DECLINLINE(int) pgmRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3683{
3684 PPGMPAGE pPage;
3685 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3686 if (RT_FAILURE(rc))
3687 return rc;
3688
3689 fFlags &= ~X86_PTE_PAE_PG_MASK;
3690 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3691 return VINF_SUCCESS;
3692}
3693
3694
3695/**
3696 * Sets (bitwise OR) flags associated with a RAM address.
3697 *
3698 * @returns VBox status code.
3699 * @param pPGM PGM handle.
3700 * @param GCPhys Guest context physical address.
3701 * @param fFlags fFlags to set clear. (Bits 0-11.)
3702 */
3703DECLINLINE(int) pgmRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3704{
3705 PPGMPAGE pPage;
3706 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3707 if (RT_FAILURE(rc))
3708 return rc;
3709
3710 fFlags &= ~X86_PTE_PAE_PG_MASK;
3711 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3712 return VINF_SUCCESS;
3713}
3714
3715
3716/**
3717 * Sets (bitwise OR) flags associated with a RAM address.
3718 *
3719 * @returns VBox status code.
3720 * @param pPGM PGM handle.
3721 * @param GCPhys Guest context physical address.
3722 * @param fFlags fFlags to set clear. (Bits 0-11.)
3723 * @param ppRamHint Where to read and store the ram list hint.
3724 * The caller initializes this to NULL before the call.
3725 */
3726DECLINLINE(int) pgmRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3727{
3728 PPGMPAGE pPage;
3729 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3730 if (RT_FAILURE(rc))
3731 return rc;
3732
3733 fFlags &= ~X86_PTE_PAE_PG_MASK;
3734 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3735 return VINF_SUCCESS;
3736}
3737
3738
3739/**
3740 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
3741 * Takes PSE-36 into account.
3742 *
3743 * @returns guest physical address
3744 * @param pPGM Pointer to the PGM instance data.
3745 * @param Pde Guest Pde
3746 */
3747DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
3748{
3749 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
3750 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
3751
3752 return GCPhys & pPGM->GCPhys4MBPSEMask;
3753}
3754
3755
3756/**
3757 * Gets the page directory entry for the specified address (32-bit paging).
3758 *
3759 * @returns The page directory entry in question.
3760 * @param pPGM Pointer to the PGM instance data.
3761 * @param GCPtr The address.
3762 */
3763DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGM pPGM, RTGCPTR GCPtr)
3764{
3765#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3766 PCX86PD pGuestPD = 0;
3767 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3768 if (RT_FAILURE(rc))
3769 {
3770 X86PDE ZeroPde = {0};
3771 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
3772 }
3773 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3774#else
3775 return pPGM->CTX_SUFF(pGst32BitPd)->a[GCPtr >> X86_PD_SHIFT];
3776#endif
3777}
3778
3779
3780/**
3781 * Gets the address of a specific page directory entry (32-bit paging).
3782 *
3783 * @returns Pointer the page directory entry in question.
3784 * @param pPGM Pointer to the PGM instance data.
3785 * @param GCPtr The address.
3786 */
3787DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGM pPGM, RTGCPTR GCPtr)
3788{
3789#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3790 PX86PD pGuestPD = 0;
3791 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3792 AssertRCReturn(rc, 0);
3793 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3794#else
3795 return &pPGM->CTX_SUFF(pGst32BitPd)->a[GCPtr >> X86_PD_SHIFT];
3796#endif
3797}
3798
3799
3800/**
3801 * Gets the address the guest page directory (32-bit paging).
3802 *
3803 * @returns Pointer the page directory entry in question.
3804 * @param pPGM Pointer to the PGM instance data.
3805 */
3806DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGM pPGM)
3807{
3808#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3809 PX86PD pGuestPD = 0;
3810 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3811 AssertRCReturn(rc, 0);
3812 return pGuestPD;
3813#else
3814 return pPGM->CTX_SUFF(pGst32BitPd);
3815#endif
3816}
3817
3818
3819/**
3820 * Gets the guest page directory pointer table.
3821 *
3822 * @returns Pointer to the page directory in question.
3823 * @returns NULL if the page directory is not present or on an invalid page.
3824 * @param pPGM Pointer to the PGM instance data.
3825 */
3826DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGM pPGM)
3827{
3828#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3829 PX86PDPT pGuestPDPT = 0;
3830 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3831 AssertRCReturn(rc, 0);
3832 return pGuestPDPT;
3833#else
3834 return pPGM->CTX_SUFF(pGstPaePdpt);
3835#endif
3836}
3837
3838
3839/**
3840 * Gets the guest page directory pointer table entry for the specified address.
3841 *
3842 * @returns Pointer to the page directory in question.
3843 * @returns NULL if the page directory is not present or on an invalid page.
3844 * @param pPGM Pointer to the PGM instance data.
3845 * @param GCPtr The address.
3846 */
3847DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGM pPGM, RTGCPTR GCPtr)
3848{
3849 AssertGCPtr32(GCPtr);
3850
3851#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3852 PX86PDPT pGuestPDPT = 0;
3853 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3854 AssertRCReturn(rc, 0);
3855 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
3856#else
3857 return &pPGM->CTX_SUFF(pGstPaePdpt)->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
3858#endif
3859}
3860
3861
3862/**
3863 * Gets the page directory for the specified address.
3864 *
3865 * @returns Pointer to the page directory in question.
3866 * @returns NULL if the page directory is not present or on an invalid page.
3867 * @param pPGM Pointer to the PGM instance data.
3868 * @param GCPtr The address.
3869 */
3870DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCPTR GCPtr)
3871{
3872 AssertGCPtr32(GCPtr);
3873
3874#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3875 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3876 AssertReturn(pGuestPDPT, 0);
3877#else
3878 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3879#endif
3880 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3881 if (pGuestPDPT->a[iPdPt].n.u1Present)
3882 {
3883#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3884 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3885 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt];
3886#endif
3887
3888 /* cache is out-of-sync. */
3889 PX86PDPAE pPD;
3890 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3891 if (RT_SUCCESS(rc))
3892 return pPD;
3893 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u));
3894 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
3895 }
3896 return NULL;
3897}
3898
3899
3900/**
3901 * Gets the page directory entry for the specified address.
3902 *
3903 * @returns Pointer to the page directory entry in question.
3904 * @returns NULL if the page directory is not present or on an invalid page.
3905 * @param pPGM Pointer to the PGM instance data.
3906 * @param GCPtr The address.
3907 */
3908DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCPTR GCPtr)
3909{
3910 AssertGCPtr32(GCPtr);
3911
3912#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3913 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3914 AssertReturn(pGuestPDPT, 0);
3915#else
3916 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3917#endif
3918 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3919 if (pGuestPDPT->a[iPdPt].n.u1Present)
3920 {
3921 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3922#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3923 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3924 return &pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD];
3925#endif
3926
3927 /* The cache is out-of-sync. */
3928 PX86PDPAE pPD;
3929 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3930 if (RT_SUCCESS(rc))
3931 return &pPD->a[iPD];
3932 AssertMsgFailed(("Impossible! rc=%Rrc PDPE=%RX64\n", rc, pGuestPDPT->a[iPdPt].u));
3933 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
3934 }
3935 return NULL;
3936}
3937
3938
3939/**
3940 * Gets the page directory entry for the specified address.
3941 *
3942 * @returns The page directory entry in question.
3943 * @returns A non-present entry if the page directory is not present or on an invalid page.
3944 * @param pPGM Pointer to the PGM instance data.
3945 * @param GCPtr The address.
3946 */
3947DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGM pPGM, RTGCPTR GCPtr)
3948{
3949 AssertGCPtr32(GCPtr);
3950
3951#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3952 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3953 if (RT_LIKELY(pGuestPDPT))
3954#else
3955 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3956#endif
3957 {
3958 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3959 if (pGuestPDPT->a[iPdPt].n.u1Present)
3960 {
3961 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3962#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3963 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3964 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD];
3965#endif
3966
3967 /* cache is out-of-sync. */
3968 PX86PDPAE pPD;
3969 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3970 if (RT_SUCCESS(rc))
3971 return pPD->a[iPD];
3972 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt]));
3973 }
3974 }
3975 X86PDEPAE ZeroPde = {0};
3976 return ZeroPde;
3977}
3978
3979
3980/**
3981 * Gets the page directory pointer table entry for the specified address
3982 * and returns the index into the page directory
3983 *
3984 * @returns Pointer to the page directory in question.
3985 * @returns NULL if the page directory is not present or on an invalid page.
3986 * @param pPGM Pointer to the PGM instance data.
3987 * @param GCPtr The address.
3988 * @param piPD Receives the index into the returned page directory
3989 * @param pPdpe Receives the page directory pointer entry. Optional.
3990 */
3991DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
3992{
3993 AssertGCPtr32(GCPtr);
3994
3995#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3996 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3997 AssertReturn(pGuestPDPT, 0);
3998#else
3999 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
4000#endif
4001 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4002 if (pPdpe)
4003 *pPdpe = pGuestPDPT->a[iPdPt];
4004 if (pGuestPDPT->a[iPdPt].n.u1Present)
4005 {
4006 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4007#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4008 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
4009 {
4010 *piPD = iPD;
4011 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt];
4012 }
4013#endif
4014
4015 /* cache is out-of-sync. */
4016 PX86PDPAE pPD;
4017 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4018 if (RT_SUCCESS(rc))
4019 {
4020 *piPD = iPD;
4021 return pPD;
4022 }
4023 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u));
4024 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
4025 }
4026 return NULL;
4027}
4028
4029#ifndef IN_RC
4030
4031/**
4032 * Gets the page map level-4 pointer for the guest.
4033 *
4034 * @returns Pointer to the PML4 page.
4035 * @param pPGM Pointer to the PGM instance data.
4036 */
4037DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGM pPGM)
4038{
4039#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4040 PX86PML4 pGuestPml4;
4041 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
4042 AssertRCReturn(rc, NULL);
4043 return pGuestPml4;
4044#else
4045 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
4046 return pPGM->CTX_SUFF(pGstAmd64Pml4);
4047#endif
4048}
4049
4050
4051/**
4052 * Gets the pointer to a page map level-4 entry.
4053 *
4054 * @returns Pointer to the PML4 entry.
4055 * @param pPGM Pointer to the PGM instance data.
4056 * @param iPml4 The index.
4057 */
4058DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4)
4059{
4060#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4061 PX86PML4 pGuestPml4;
4062 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
4063 AssertRCReturn(rc, NULL);
4064 return &pGuestPml4->a[iPml4];
4065#else
4066 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
4067 return &pPGM->CTX_SUFF(pGstAmd64Pml4)->a[iPml4];
4068#endif
4069}
4070
4071
4072/**
4073 * Gets a page map level-4 entry.
4074 *
4075 * @returns The PML4 entry.
4076 * @param pPGM Pointer to the PGM instance data.
4077 * @param iPml4 The index.
4078 */
4079DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGM pPGM, unsigned int iPml4)
4080{
4081#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4082 PX86PML4 pGuestPml4;
4083 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
4084 if (RT_FAILURE(rc))
4085 {
4086 X86PML4E ZeroPml4e = {0};
4087 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
4088 }
4089 return pGuestPml4->a[iPml4];
4090#else
4091 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
4092 return pPGM->CTX_SUFF(pGstAmd64Pml4)->a[iPml4];
4093#endif
4094}
4095
4096
4097/**
4098 * Gets the page directory pointer entry for the specified address.
4099 *
4100 * @returns Pointer to the page directory pointer entry in question.
4101 * @returns NULL if the page directory is not present or on an invalid page.
4102 * @param pPGM Pointer to the PGM instance data.
4103 * @param GCPtr The address.
4104 * @param ppPml4e Page Map Level-4 Entry (out)
4105 */
4106DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
4107{
4108 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4109 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4110 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4111 if (pPml4e->n.u1Present)
4112 {
4113 PX86PDPT pPdpt;
4114 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
4115 AssertRCReturn(rc, NULL);
4116
4117 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4118 return &pPdpt->a[iPdPt];
4119 }
4120 return NULL;
4121}
4122
4123
4124/**
4125 * Gets the page directory entry for the specified address.
4126 *
4127 * @returns The page directory entry in question.
4128 * @returns A non-present entry if the page directory is not present or on an invalid page.
4129 * @param pPGM Pointer to the PGM instance data.
4130 * @param GCPtr The address.
4131 * @param ppPml4e Page Map Level-4 Entry (out)
4132 * @param pPdpe Page directory pointer table entry (out)
4133 */
4134DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
4135{
4136 X86PDEPAE ZeroPde = {0};
4137 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4138 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4139 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4140 if (pPml4e->n.u1Present)
4141 {
4142 PCX86PDPT pPdptTemp;
4143 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
4144 AssertRCReturn(rc, ZeroPde);
4145
4146 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4147 *pPdpe = pPdptTemp->a[iPdPt];
4148 if (pPdptTemp->a[iPdPt].n.u1Present)
4149 {
4150 PCX86PDPAE pPD;
4151 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4152 AssertRCReturn(rc, ZeroPde);
4153
4154 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4155 return pPD->a[iPD];
4156 }
4157 }
4158
4159 return ZeroPde;
4160}
4161
4162
4163/**
4164 * Gets the page directory entry for the specified address.
4165 *
4166 * @returns The page directory entry in question.
4167 * @returns A non-present entry if the page directory is not present or on an invalid page.
4168 * @param pPGM Pointer to the PGM instance data.
4169 * @param GCPtr The address.
4170 */
4171DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGM pPGM, RTGCPTR64 GCPtr)
4172{
4173 X86PDEPAE ZeroPde = {0};
4174 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4175 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4176 if (pGuestPml4->a[iPml4].n.u1Present)
4177 {
4178 PCX86PDPT pPdptTemp;
4179 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4180 AssertRCReturn(rc, ZeroPde);
4181
4182 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4183 if (pPdptTemp->a[iPdPt].n.u1Present)
4184 {
4185 PCX86PDPAE pPD;
4186 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4187 AssertRCReturn(rc, ZeroPde);
4188
4189 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4190 return pPD->a[iPD];
4191 }
4192 }
4193 return ZeroPde;
4194}
4195
4196
4197/**
4198 * Gets the page directory entry for the specified address.
4199 *
4200 * @returns Pointer to the page directory entry in question.
4201 * @returns NULL if the page directory is not present or on an invalid page.
4202 * @param pPGM Pointer to the PGM instance data.
4203 * @param GCPtr The address.
4204 */
4205DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGM pPGM, RTGCPTR64 GCPtr)
4206{
4207 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4208 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4209 if (pGuestPml4->a[iPml4].n.u1Present)
4210 {
4211 PCX86PDPT pPdptTemp;
4212 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4213 AssertRCReturn(rc, NULL);
4214
4215 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4216 if (pPdptTemp->a[iPdPt].n.u1Present)
4217 {
4218 PX86PDPAE pPD;
4219 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4220 AssertRCReturn(rc, NULL);
4221
4222 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4223 return &pPD->a[iPD];
4224 }
4225 }
4226 return NULL;
4227}
4228
4229
4230/**
4231 * Gets the GUEST page directory pointer for the specified address.
4232 *
4233 * @returns The page directory in question.
4234 * @returns NULL if the page directory is not present or on an invalid page.
4235 * @param pPGM Pointer to the PGM instance data.
4236 * @param GCPtr The address.
4237 * @param ppPml4e Page Map Level-4 Entry (out)
4238 * @param pPdpe Page directory pointer table entry (out)
4239 * @param piPD Receives the index into the returned page directory
4240 */
4241DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
4242{
4243 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4244 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4245 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4246 if (pPml4e->n.u1Present)
4247 {
4248 PCX86PDPT pPdptTemp;
4249 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
4250 AssertRCReturn(rc, NULL);
4251
4252 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4253 *pPdpe = pPdptTemp->a[iPdPt];
4254 if (pPdptTemp->a[iPdPt].n.u1Present)
4255 {
4256 PX86PDPAE pPD;
4257 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4258 AssertRCReturn(rc, NULL);
4259
4260 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4261 return pPD;
4262 }
4263 }
4264 return 0;
4265}
4266
4267#endif /* !IN_RC */
4268
4269
4270/**
4271 * Gets the shadow page directory, 32-bit.
4272 *
4273 * @returns Pointer to the shadow 32-bit PD.
4274 * @param pPGM Pointer to the PGM instance data.
4275 */
4276DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGM pPGM)
4277{
4278#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4279 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4280#else
4281# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4282 PX86PD pShwPd;
4283 Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS);
4284 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShw32BitPD, &pShwPd);
4285 AssertRCReturn(rc, NULL);
4286 return pShwPd;
4287# else
4288 return pPGM->CTX_SUFF(pShw32BitPd);
4289# endif
4290#endif
4291}
4292
4293
4294/**
4295 * Gets the shadow page directory entry for the specified address, 32-bit.
4296 *
4297 * @returns Shadow 32-bit PDE.
4298 * @param pPGM Pointer to the PGM instance data.
4299 * @param GCPtr The address.
4300 */
4301DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGM pPGM, RTGCPTR GCPtr)
4302{
4303 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4304
4305 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
4306 if (!pShwPde)
4307 {
4308 X86PDE ZeroPde = {0};
4309 return ZeroPde;
4310 }
4311 return pShwPde->a[iPd];
4312}
4313
4314
4315/**
4316 * Gets the pointer to the shadow page directory entry for the specified
4317 * address, 32-bit.
4318 *
4319 * @returns Pointer to the shadow 32-bit PDE.
4320 * @param pPGM Pointer to the PGM instance data.
4321 * @param GCPtr The address.
4322 */
4323DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGM pPGM, RTGCPTR GCPtr)
4324{
4325 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4326
4327 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
4328 AssertReturn(pPde, NULL);
4329 return &pPde->a[iPd];
4330}
4331
4332
4333/**
4334 * Gets the shadow page pointer table, PAE.
4335 *
4336 * @returns Pointer to the shadow PAE PDPT.
4337 * @param pPGM Pointer to the PGM instance data.
4338 */
4339DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGM pPGM)
4340{
4341#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4342 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4343#else
4344# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4345 PX86PDPT pShwPdpt;
4346 Assert(pPGM->HCPhysShwPaePdpt != 0 && pPGM->HCPhysShwPaePdpt != NIL_RTHCPHYS);
4347 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwPaePdpt, &pShwPdpt);
4348 AssertRCReturn(rc, 0);
4349 return pShwPdpt;
4350# else
4351 return pPGM->CTX_SUFF(pShwPaePdpt);
4352# endif
4353#endif
4354}
4355
4356
4357/**
4358 * Gets the shadow page directory for the specified address, PAE.
4359 *
4360 * @returns Pointer to the shadow PD.
4361 * @param pPGM Pointer to the PGM instance data.
4362 * @param GCPtr The address.
4363 */
4364DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr)
4365{
4366#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4367 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4368 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
4369
4370 if (!pPdpt->a[iPdpt].n.u1Present)
4371 return NULL;
4372
4373 /* Fetch the pgm pool shadow descriptor. */
4374 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4375 AssertReturn(pShwPde, NULL);
4376
4377 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
4378#else
4379 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4380# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4381 PX86PDPAE pPD;
4382 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->aHCPhysPaePDs[iPdpt], &pPD);
4383 AssertRCReturn(rc, 0);
4384 return pPD;
4385# else
4386 PX86PDPAE pPD = pPGM->CTX_SUFF(apShwPaePDs)[iPdpt];
4387 Assert(pPD);
4388 return pPD;
4389# endif
4390#endif
4391}
4392
4393/**
4394 * Gets the shadow page directory for the specified address, PAE.
4395 *
4396 * @returns Pointer to the shadow PD.
4397 * @param pPGM Pointer to the PGM instance data.
4398 * @param GCPtr The address.
4399 */
4400DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
4401{
4402#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4403 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4404
4405 if (!pPdpt->a[iPdpt].n.u1Present)
4406 return NULL;
4407
4408 /* Fetch the pgm pool shadow descriptor. */
4409 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4410 AssertReturn(pShwPde, NULL);
4411
4412 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
4413#else
4414 AssertFailed();
4415 return NULL;
4416#endif
4417}
4418
4419/**
4420 * Gets the shadow page directory entry, PAE.
4421 *
4422 * @returns PDE.
4423 * @param pPGM Pointer to the PGM instance data.
4424 * @param GCPtr The address.
4425 */
4426DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGM pPGM, RTGCPTR GCPtr)
4427{
4428 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4429
4430 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4431 if (!pShwPde)
4432 {
4433 X86PDEPAE ZeroPde = {0};
4434 return ZeroPde;
4435 }
4436 return pShwPde->a[iPd];
4437}
4438
4439
4440/**
4441 * Gets the pointer to the shadow page directory entry for an address, PAE.
4442 *
4443 * @returns Pointer to the PDE.
4444 * @param pPGM Pointer to the PGM instance data.
4445 * @param GCPtr The address.
4446 */
4447DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGM pPGM, RTGCPTR GCPtr)
4448{
4449 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4450
4451 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4452 AssertReturn(pPde, NULL);
4453 return &pPde->a[iPd];
4454}
4455
4456#ifndef IN_RC
4457/**
4458 * Gets the shadow page map level-4 pointer.
4459 *
4460 * @returns Pointer to the shadow PML4.
4461 * @param pPGM Pointer to the PGM instance data.
4462 */
4463DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM)
4464{
4465#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4466 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4467#else
4468# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
4469 PX86PML4 pShwPml4;
4470 Assert(pPGM->HCPhysShwCR3 != 0 && pPGM->HCPhysShwCR3 != NIL_RTHCPHYS);
4471 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwCR3, &pShwPml4);
4472 AssertRCReturn(rc, 0);
4473 return pShwPml4;
4474# else
4475 Assert(pPGM->CTX_SUFF(pShwRoot));
4476 return (PX86PML4)pPGM->CTX_SUFF(pShwRoot);
4477# endif
4478#endif
4479}
4480
4481
4482/**
4483 * Gets the shadow page map level-4 entry for the specified address.
4484 *
4485 * @returns The entry.
4486 * @param pPGM Pointer to the PGM instance data.
4487 * @param GCPtr The address.
4488 */
4489DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGM pPGM, RTGCPTR GCPtr)
4490{
4491 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4492 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4493
4494 if (!pShwPml4)
4495 {
4496 X86PML4E ZeroPml4e = {0};
4497 return ZeroPml4e;
4498 }
4499 return pShwPml4->a[iPml4];
4500}
4501
4502
4503/**
4504 * Gets the pointer to the specified shadow page map level-4 entry.
4505 *
4506 * @returns The entry.
4507 * @param pPGM Pointer to the PGM instance data.
4508 * @param iPml4 The PML4 index.
4509 */
4510DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4)
4511{
4512 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4513 if (!pShwPml4)
4514 return NULL;
4515 return &pShwPml4->a[iPml4];
4516}
4517
4518
4519/**
4520 * Gets the GUEST page directory pointer for the specified address.
4521 *
4522 * @returns The page directory in question.
4523 * @returns NULL if the page directory is not present or on an invalid page.
4524 * @param pPGM Pointer to the PGM instance data.
4525 * @param GCPtr The address.
4526 * @param piPD Receives the index into the returned page directory
4527 */
4528DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
4529{
4530 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4531 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4532 if (pGuestPml4->a[iPml4].n.u1Present)
4533 {
4534 PCX86PDPT pPdptTemp;
4535 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4536 AssertRCReturn(rc, NULL);
4537
4538 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4539 if (pPdptTemp->a[iPdPt].n.u1Present)
4540 {
4541 PX86PDPAE pPD;
4542 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4543 AssertRCReturn(rc, NULL);
4544
4545 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4546 return pPD;
4547 }
4548 }
4549 return NULL;
4550}
4551
4552#endif /* !IN_RC */
4553
4554/**
4555 * Checks if any of the specified page flags are set for the given page.
4556 *
4557 * @returns true if any of the flags are set.
4558 * @returns false if all the flags are clear.
4559 * @param pPGM PGM handle.
4560 * @param GCPhys The GC physical address.
4561 * @param fFlags The flags to check for.
4562 */
4563DECLINLINE(bool) pgmRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
4564{
4565 PPGMPAGE pPage = pgmPhysGetPage(pPGM, GCPhys);
4566 return pPage
4567 && (pPage->HCPhys & fFlags) != 0; /** @todo PAGE FLAGS */
4568}
4569
4570
4571/**
4572 * Gets the page state for a physical handler.
4573 *
4574 * @returns The physical handler page state.
4575 * @param pCur The physical handler in question.
4576 */
4577DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
4578{
4579 switch (pCur->enmType)
4580 {
4581 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
4582 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
4583
4584 case PGMPHYSHANDLERTYPE_MMIO:
4585 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
4586 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
4587
4588 default:
4589 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4590 }
4591}
4592
4593
4594/**
4595 * Gets the page state for a virtual handler.
4596 *
4597 * @returns The virtual handler page state.
4598 * @param pCur The virtual handler in question.
4599 * @remarks This should never be used on a hypervisor access handler.
4600 */
4601DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
4602{
4603 switch (pCur->enmType)
4604 {
4605 case PGMVIRTHANDLERTYPE_WRITE:
4606 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
4607 case PGMVIRTHANDLERTYPE_ALL:
4608 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
4609 default:
4610 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4611 }
4612}
4613
4614
4615/**
4616 * Clears one physical page of a virtual handler
4617 *
4618 * @param pPGM Pointer to the PGM instance.
4619 * @param pCur Virtual handler structure
4620 * @param iPage Physical page index
4621 *
4622 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
4623 * need to care about other handlers in the same page.
4624 */
4625DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
4626{
4627 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
4628
4629 /*
4630 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
4631 */
4632#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4633 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4634 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4635 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4636#endif
4637 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
4638 {
4639 /* We're the head of the alias chain. */
4640 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
4641#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4642 AssertReleaseMsg(pRemove != NULL,
4643 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4644 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4645 AssertReleaseMsg(pRemove == pPhys2Virt,
4646 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
4647 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4648 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
4649 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
4650#endif
4651 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
4652 {
4653 /* Insert the next list in the alias chain into the tree. */
4654 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4655#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4656 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4657 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4658 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
4659#endif
4660 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
4661 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
4662 AssertRelease(fRc);
4663 }
4664 }
4665 else
4666 {
4667 /* Locate the previous node in the alias chain. */
4668 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
4669#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4670 AssertReleaseMsg(pPrev != pPhys2Virt,
4671 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4672 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4673#endif
4674 for (;;)
4675 {
4676 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4677 if (pNext == pPhys2Virt)
4678 {
4679 /* unlink. */
4680 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
4681 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
4682 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
4683 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
4684 else
4685 {
4686 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4687 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
4688 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
4689 }
4690 break;
4691 }
4692
4693 /* next */
4694 if (pNext == pPrev)
4695 {
4696#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4697 AssertReleaseMsg(pNext != pPrev,
4698 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4699 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4700#endif
4701 break;
4702 }
4703 pPrev = pNext;
4704 }
4705 }
4706 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
4707 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
4708 pPhys2Virt->offNextAlias = 0;
4709 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
4710
4711 /*
4712 * Clear the ram flags for this page.
4713 */
4714 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
4715 AssertReturnVoid(pPage);
4716 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
4717}
4718
4719
4720/**
4721 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4722 *
4723 * @returns Pointer to the shadow page structure.
4724 * @param pPool The pool.
4725 * @param HCPhys The HC physical address of the shadow page.
4726 */
4727DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
4728{
4729 /*
4730 * Look up the page.
4731 */
4732 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
4733 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
4734 return pPage;
4735}
4736
4737
4738/**
4739 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4740 *
4741 * @returns Pointer to the shadow page structure.
4742 * @param pPool The pool.
4743 * @param idx The pool page index.
4744 */
4745DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
4746{
4747 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
4748 return &pPool->aPages[idx];
4749}
4750
4751
4752#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
4753/**
4754 * Clear references to guest physical memory.
4755 *
4756 * @param pPool The pool.
4757 * @param pPoolPage The pool page.
4758 * @param pPhysPage The physical guest page tracking structure.
4759 */
4760DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
4761{
4762 /*
4763 * Just deal with the simple case here.
4764 */
4765# ifdef LOG_ENABLED
4766 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
4767# endif
4768 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
4769 if (cRefs == 1)
4770 {
4771 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
4772 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
4773 }
4774 else
4775 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
4776 Log2(("pgmTrackDerefGCPhys: %x -> %x HCPhys=%RGp\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), PGM_PAGE_GET_HCPHYS(pPhysPage) ));
4777}
4778#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
4779
4780
4781#ifdef PGMPOOL_WITH_CACHE
4782/**
4783 * Moves the page to the head of the age list.
4784 *
4785 * This is done when the cached page is used in one way or another.
4786 *
4787 * @param pPool The pool.
4788 * @param pPage The cached page.
4789 * @todo inline in PGMInternal.h!
4790 */
4791DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4792{
4793 /*
4794 * Move to the head of the age list.
4795 */
4796 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
4797 {
4798 /* unlink */
4799 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
4800 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
4801 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
4802 else
4803 pPool->iAgeTail = pPage->iAgePrev;
4804
4805 /* insert at head */
4806 pPage->iAgePrev = NIL_PGMPOOL_IDX;
4807 pPage->iAgeNext = pPool->iAgeHead;
4808 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
4809 pPool->iAgeHead = pPage->idx;
4810 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
4811 }
4812}
4813#endif /* PGMPOOL_WITH_CACHE */
4814
4815#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
4816/**
4817 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
4818 *
4819 * @returns VBox status code.
4820 * @param pVM VM Handle.
4821 * @param pPage PGM pool page
4822 */
4823DECLINLINE(int) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4824{
4825 Assert(!pPage->fLocked);
4826 pPage->fLocked = true;
4827 return VINF_SUCCESS;
4828}
4829
4830/**
4831 * Unlocks a page to allow flushing again
4832 *
4833 * @returns VBox status code.
4834 * @param pVM VM Handle.
4835 * @param pPage PGM pool page
4836 */
4837DECLINLINE(int) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4838{
4839 Assert(pPage->fLocked);
4840 pPage->fLocked = false;
4841 return VINF_SUCCESS;
4842}
4843
4844/**
4845 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
4846 *
4847 * @returns VBox status code.
4848 * @param pPage PGM pool page
4849 */
4850DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
4851{
4852 if (pPage->fLocked)
4853 {
4854 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
4855 if (pPage->cModifications)
4856 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
4857 return true;
4858 }
4859 return false;
4860}
4861#endif
4862
4863/**
4864 * Tells if mappings are to be put into the shadow page table or not
4865 *
4866 * @returns boolean result
4867 * @param pVM VM handle.
4868 */
4869
4870DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
4871{
4872#ifdef IN_RING0
4873 /* There are no mappings in VT-x and AMD-V mode. */
4874 Assert(pPGM->fDisableMappings);
4875 return false;
4876#else
4877 return !pPGM->fDisableMappings;
4878#endif
4879}
4880
4881/** @} */
4882
4883#endif
4884
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette