VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 17639

Last change on this file since 17639 was 17622, checked in by vboxsync, 16 years ago

Simple check added to make sure we don't bounce back and forth between hypervisor mappings that have caused conflicts in the past.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 187.1 KB
Line 
1/* $Id: PGMInternal.h 17622 2009-03-10 12:32:23Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/avl.h>
40#include <iprt/assert.h>
41#include <iprt/critsect.h>
42
43
44
45/** @defgroup grp_pgm_int Internals
46 * @ingroup grp_pgm
47 * @internal
48 * @{
49 */
50
51
52/** @name PGM Compile Time Config
53 * @{
54 */
55
56/**
57 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
58 * Comment it if it will break something.
59 */
60#define PGM_OUT_OF_SYNC_IN_GC
61
62/**
63 * Check and skip global PDEs for non-global flushes
64 */
65#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
66
67/**
68 * Sync N pages instead of a whole page table
69 */
70#define PGM_SYNC_N_PAGES
71
72/**
73 * Number of pages to sync during a page fault
74 *
75 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
76 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
77 */
78#define PGM_SYNC_NR_PAGES 8
79
80/**
81 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
82 */
83#define PGM_MAX_PHYSCACHE_ENTRIES 64
84#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
85
86/**
87 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
88 */
89#define PGM_PHYSMEMACCESS_CACHING
90
91/** @def PGMPOOL_WITH_CACHE
92 * Enable agressive caching using the page pool.
93 *
94 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
95 */
96#define PGMPOOL_WITH_CACHE
97
98/** @def PGMPOOL_WITH_MIXED_PT_CR3
99 * When defined, we'll deal with 'uncachable' pages.
100 */
101#ifdef PGMPOOL_WITH_CACHE
102# define PGMPOOL_WITH_MIXED_PT_CR3
103#endif
104
105/** @def PGMPOOL_WITH_MONITORING
106 * Monitor the guest pages which are shadowed.
107 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
108 * be enabled as well.
109 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
110 */
111#ifdef PGMPOOL_WITH_CACHE
112# define PGMPOOL_WITH_MONITORING
113#endif
114
115/** @def PGMPOOL_WITH_GCPHYS_TRACKING
116 * Tracking the of shadow pages mapping guest physical pages.
117 *
118 * This is very expensive, the current cache prototype is trying to figure out
119 * whether it will be acceptable with an agressive caching policy.
120 */
121#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
122# define PGMPOOL_WITH_GCPHYS_TRACKING
123#endif
124
125/** @def PGMPOOL_WITH_USER_TRACKING
126 * Tracking users of shadow pages. This is required for the linking of shadow page
127 * tables and physical guest addresses.
128 */
129#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
130# define PGMPOOL_WITH_USER_TRACKING
131#endif
132
133/** @def PGMPOOL_CFG_MAX_GROW
134 * The maximum number of pages to add to the pool in one go.
135 */
136#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
137
138/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
139 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
140 */
141#ifdef VBOX_STRICT
142# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
143#endif
144
145#ifdef VBOX_WITH_NEW_PHYS_CODE
146/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
147 * Enables the experimental lazy page allocation code. */
148/*# define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
149#endif
150/** @} */
151
152
153/** @name PDPT and PML4 flags.
154 * These are placed in the three bits available for system programs in
155 * the PDPT and PML4 entries.
156 * @{ */
157/** The entry is a permanent one and it's must always be present.
158 * Never free such an entry. */
159#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
160/** Mapping (hypervisor allocated pagetable). */
161#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
162/** @} */
163
164/** @name Page directory flags.
165 * These are placed in the three bits available for system programs in
166 * the page directory entries.
167 * @{ */
168/** Mapping (hypervisor allocated pagetable). */
169#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
170/** Made read-only to facilitate dirty bit tracking. */
171#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
172/** @} */
173
174/** @name Page flags.
175 * These are placed in the three bits available for system programs in
176 * the page entries.
177 * @{ */
178/** Made read-only to facilitate dirty bit tracking. */
179#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
180
181#ifndef PGM_PTFLAGS_CSAM_VALIDATED
182/** Scanned and approved by CSAM (tm).
183 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
184 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
185#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
186#endif
187
188/** @} */
189
190/** @name Defines used to indicate the shadow and guest paging in the templates.
191 * @{ */
192#define PGM_TYPE_REAL 1
193#define PGM_TYPE_PROT 2
194#define PGM_TYPE_32BIT 3
195#define PGM_TYPE_PAE 4
196#define PGM_TYPE_AMD64 5
197#define PGM_TYPE_NESTED 6
198#define PGM_TYPE_EPT 7
199#define PGM_TYPE_MAX PGM_TYPE_EPT
200/** @} */
201
202/** Macro for checking if the guest is using paging.
203 * @param uGstType PGM_TYPE_*
204 * @param uShwType PGM_TYPE_*
205 * @remark ASSUMES certain order of the PGM_TYPE_* values.
206 */
207#define PGM_WITH_PAGING(uGstType, uShwType) \
208 ( (uGstType) >= PGM_TYPE_32BIT \
209 && (uShwType) != PGM_TYPE_NESTED \
210 && (uShwType) != PGM_TYPE_EPT)
211
212/** Macro for checking if the guest supports the NX bit.
213 * @param uGstType PGM_TYPE_*
214 * @param uShwType PGM_TYPE_*
215 * @remark ASSUMES certain order of the PGM_TYPE_* values.
216 */
217#define PGM_WITH_NX(uGstType, uShwType) \
218 ( (uGstType) >= PGM_TYPE_PAE \
219 && (uShwType) != PGM_TYPE_NESTED \
220 && (uShwType) != PGM_TYPE_EPT)
221
222
223/** @def PGM_HCPHYS_2_PTR
224 * Maps a HC physical page pool address to a virtual address.
225 *
226 * @returns VBox status code.
227 * @param pVM The VM handle.
228 * @param HCPhys The HC physical address to map to a virtual one.
229 * @param ppv Where to store the virtual address. No need to cast this.
230 *
231 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
232 * small page window employeed by that function. Be careful.
233 * @remark There is no need to assert on the result.
234 */
235#ifdef IN_RC
236# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
237 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
238#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
239# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
240 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
241#else
242# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
243 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
244#endif
245
246/** @def PGM_HCPHYS_2_PTR_BY_PGM
247 * Maps a HC physical page pool address to a virtual address.
248 *
249 * @returns VBox status code.
250 * @param pPGM The PGM instance data.
251 * @param HCPhys The HC physical address to map to a virtual one.
252 * @param ppv Where to store the virtual address. No need to cast this.
253 *
254 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
255 * small page window employeed by that function. Be careful.
256 * @remark There is no need to assert on the result.
257 */
258#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
259# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
260 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
261#else
262# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
263 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
264#endif
265
266/** @def PGM_GCPHYS_2_PTR
267 * Maps a GC physical page address to a virtual address.
268 *
269 * @returns VBox status code.
270 * @param pVM The VM handle.
271 * @param GCPhys The GC physical address to map to a virtual one.
272 * @param ppv Where to store the virtual address. No need to cast this.
273 *
274 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
275 * small page window employeed by that function. Be careful.
276 * @remark There is no need to assert on the result.
277 */
278#ifdef IN_RC
279# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
280 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
281#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
282# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
283 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
284#else
285# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
286 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
287#endif
288
289/** @def PGM_GCPHYS_2_PTR_BY_PGM
290 * Maps a GC physical page address to a virtual address.
291 *
292 * @returns VBox status code.
293 * @param pPGM Pointer to the PGM instance data.
294 * @param GCPhys The GC physical address to map to a virtual one.
295 * @param ppv Where to store the virtual address. No need to cast this.
296 *
297 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
298 * small page window employeed by that function. Be careful.
299 * @remark There is no need to assert on the result.
300 */
301#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
302# define PGM_GCPHYS_2_PTR_BY_PGM(pPGM, GCPhys, ppv) \
303 pgmR0DynMapGCPageInlined(pPGM, GCPhys, (void **)(ppv))
304#else
305# define PGM_GCPHYS_2_PTR_BY_PGM(pPGM, GCPhys, ppv) \
306 PGM_GCPHYS_2_PTR(PGM2VM(pPGM), GCPhys, ppv)
307#endif
308
309/** @def PGM_GCPHYS_2_PTR_EX
310 * Maps a unaligned GC physical page address to a virtual address.
311 *
312 * @returns VBox status code.
313 * @param pVM The VM handle.
314 * @param GCPhys The GC physical address to map to a virtual one.
315 * @param ppv Where to store the virtual address. No need to cast this.
316 *
317 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
318 * small page window employeed by that function. Be careful.
319 * @remark There is no need to assert on the result.
320 */
321#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
322# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
323 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
324#else
325# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
326 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
327#endif
328
329/** @def PGM_INVL_PG
330 * Invalidates a page when in GC does nothing in HC.
331 *
332 * @param GCVirt The virtual address of the page to invalidate.
333 */
334#ifdef IN_RC
335# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
336#elif defined(IN_RING0)
337# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
338#else
339# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
340#endif
341
342/** @def PGM_INVL_BIG_PG
343 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
344 *
345 * @param GCVirt The virtual address within the page directory to invalidate.
346 */
347#ifdef IN_RC
348# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
349#elif defined(IN_RING0)
350# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
351#else
352# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
353#endif
354
355/** @def PGM_INVL_GUEST_TLBS()
356 * Invalidates all guest TLBs.
357 */
358#ifdef IN_RC
359# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
360#elif defined(IN_RING0)
361# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
362#else
363# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
364#endif
365
366/** Size of the GCPtrConflict array in PGMMAPPING. */
367#define PGMMAPPING_CONFLICT_MAX 8
368
369/**
370 * Structure for tracking GC Mappings.
371 *
372 * This structure is used by linked list in both GC and HC.
373 */
374typedef struct PGMMAPPING
375{
376 /** Pointer to next entry. */
377 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
378 /** Pointer to next entry. */
379 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
380 /** Pointer to next entry. */
381 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
382 /** Indicate whether this entry is finalized. */
383 bool fFinalized;
384 /** Start Virtual address. */
385 RTGCPTR GCPtr;
386 /** Last Virtual address (inclusive). */
387 RTGCPTR GCPtrLast;
388 /** Range size (bytes). */
389 RTGCPTR cb;
390 /** Pointer to relocation callback function. */
391 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
392 /** User argument to the callback. */
393 R3PTRTYPE(void *) pvUser;
394 /** Mapping description / name. For easing debugging. */
395 R3PTRTYPE(const char *) pszDesc;
396 /** Last 8 addresses that caused conflicts. */
397 RTGCPTR GCPtrConflict[PGMMAPPING_CONFLICT_MAX];
398 /** Number of conflicts for this hypervisor mapping. */
399 uint32_t cConflicts;
400 /** Number of page tables. */
401 uint32_t cPTs;
402
403 /** Array of page table mapping data. Each entry
404 * describes one page table. The array can be longer
405 * than the declared length.
406 */
407 struct
408 {
409 /** The HC physical address of the page table. */
410 RTHCPHYS HCPhysPT;
411 /** The HC physical address of the first PAE page table. */
412 RTHCPHYS HCPhysPaePT0;
413 /** The HC physical address of the second PAE page table. */
414 RTHCPHYS HCPhysPaePT1;
415 /** The HC virtual address of the 32-bit page table. */
416 R3PTRTYPE(PX86PT) pPTR3;
417 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
418 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
419 /** The GC virtual address of the 32-bit page table. */
420 RCPTRTYPE(PX86PT) pPTRC;
421 /** The GC virtual address of the two PAE page table. */
422 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
423 /** The GC virtual address of the 32-bit page table. */
424 R0PTRTYPE(PX86PT) pPTR0;
425 /** The GC virtual address of the two PAE page table. */
426 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
427 } aPTs[1];
428} PGMMAPPING;
429/** Pointer to structure for tracking GC Mappings. */
430typedef struct PGMMAPPING *PPGMMAPPING;
431
432
433/**
434 * Physical page access handler structure.
435 *
436 * This is used to keep track of physical address ranges
437 * which are being monitored in some kind of way.
438 */
439typedef struct PGMPHYSHANDLER
440{
441 AVLROGCPHYSNODECORE Core;
442 /** Access type. */
443 PGMPHYSHANDLERTYPE enmType;
444 /** Number of pages to update. */
445 uint32_t cPages;
446 /** Pointer to R3 callback function. */
447 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
448 /** User argument for R3 handlers. */
449 R3PTRTYPE(void *) pvUserR3;
450 /** Pointer to R0 callback function. */
451 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
452 /** User argument for R0 handlers. */
453 R0PTRTYPE(void *) pvUserR0;
454 /** Pointer to GC callback function. */
455 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
456 /** User argument for RC handlers. */
457 RCPTRTYPE(void *) pvUserRC;
458 /** Description / Name. For easing debugging. */
459 R3PTRTYPE(const char *) pszDesc;
460#ifdef VBOX_WITH_STATISTICS
461 /** Profiling of this handler. */
462 STAMPROFILE Stat;
463#endif
464} PGMPHYSHANDLER;
465/** Pointer to a physical page access handler structure. */
466typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
467
468
469/**
470 * Cache node for the physical addresses covered by a virtual handler.
471 */
472typedef struct PGMPHYS2VIRTHANDLER
473{
474 /** Core node for the tree based on physical ranges. */
475 AVLROGCPHYSNODECORE Core;
476 /** Offset from this struct to the PGMVIRTHANDLER structure. */
477 int32_t offVirtHandler;
478 /** Offset of the next alias relative to this one.
479 * Bit 0 is used for indicating whether we're in the tree.
480 * Bit 1 is used for indicating that we're the head node.
481 */
482 int32_t offNextAlias;
483} PGMPHYS2VIRTHANDLER;
484/** Pointer to a phys to virtual handler structure. */
485typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
486
487/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
488 * node is in the tree. */
489#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
490/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
491 * node is in the head of an alias chain.
492 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
493#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
494/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
495#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
496
497
498/**
499 * Virtual page access handler structure.
500 *
501 * This is used to keep track of virtual address ranges
502 * which are being monitored in some kind of way.
503 */
504typedef struct PGMVIRTHANDLER
505{
506 /** Core node for the tree based on virtual ranges. */
507 AVLROGCPTRNODECORE Core;
508 /** Size of the range (in bytes). */
509 RTGCPTR cb;
510 /** Number of cache pages. */
511 uint32_t cPages;
512 /** Access type. */
513 PGMVIRTHANDLERTYPE enmType;
514 /** Pointer to the RC callback function. */
515 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
516#if HC_ARCH_BITS == 64
517 RTRCPTR padding;
518#endif
519 /** Pointer to the R3 callback function for invalidation. */
520 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
521 /** Pointer to the R3 callback function. */
522 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
523 /** Description / Name. For easing debugging. */
524 R3PTRTYPE(const char *) pszDesc;
525#ifdef VBOX_WITH_STATISTICS
526 /** Profiling of this handler. */
527 STAMPROFILE Stat;
528#endif
529 /** Array of cached physical addresses for the monitored ranged. */
530 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
531} PGMVIRTHANDLER;
532/** Pointer to a virtual page access handler structure. */
533typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
534
535
536/**
537 * Page type.
538 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
539 * @todo convert to \#defines.
540 */
541typedef enum PGMPAGETYPE
542{
543 /** The usual invalid zero entry. */
544 PGMPAGETYPE_INVALID = 0,
545 /** RAM page. (RWX) */
546 PGMPAGETYPE_RAM,
547 /** MMIO2 page. (RWX) */
548 PGMPAGETYPE_MMIO2,
549 /** MMIO2 page aliased over an MMIO page. (RWX)
550 * See PGMHandlerPhysicalPageAlias(). */
551 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
552 /** Shadowed ROM. (RWX) */
553 PGMPAGETYPE_ROM_SHADOW,
554 /** ROM page. (R-X) */
555 PGMPAGETYPE_ROM,
556 /** MMIO page. (---) */
557 PGMPAGETYPE_MMIO,
558 /** End of valid entries. */
559 PGMPAGETYPE_END
560} PGMPAGETYPE;
561AssertCompile(PGMPAGETYPE_END <= 7);
562
563/** @name Page type predicates.
564 * @{ */
565#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
566#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
567#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
568#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
569#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
570/** @} */
571
572
573/**
574 * A Physical Guest Page tracking structure.
575 *
576 * The format of this structure is complicated because we have to fit a lot
577 * of information into as few bits as possible. The format is also subject
578 * to change (there is one comming up soon). Which means that for we'll be
579 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
580 * accessess to the structure.
581 */
582typedef struct PGMPAGE
583{
584 /** The physical address and a whole lot of other stuff. All bits are used! */
585#ifdef VBOX_WITH_NEW_PHYS_CODE
586 RTHCPHYS HCPhysX;
587#else
588 RTHCPHYS HCPhys;
589#define HCPhysX HCPhys /**< Temporary while in the process of eliminating direct access to PGMPAGE::HCPhys. */
590#endif
591 /** The page state. */
592 uint32_t u2StateX : 2;
593 /** Flag indicating that a write monitored page was written to when set. */
594 uint32_t fWrittenToX : 1;
595 /** For later. */
596 uint32_t fSomethingElse : 1;
597 /** The Page ID.
598 * @todo Merge with HCPhysX once we've liberated HCPhysX of its stuff.
599 * The HCPhysX will then be 100% static. */
600 uint32_t idPageX : 28;
601 /** The page type (PGMPAGETYPE). */
602 uint32_t u3Type : 3;
603 /** The physical handler state (PGM_PAGE_HNDL_PHYS_STATE*) */
604 uint32_t u2HandlerPhysStateX : 2;
605 /** The virtual handler state (PGM_PAGE_HNDL_VIRT_STATE*) */
606 uint32_t u2HandlerVirtStateX : 2;
607 uint32_t u29B : 25;
608} PGMPAGE;
609AssertCompileSize(PGMPAGE, 16);
610/** Pointer to a physical guest page. */
611typedef PGMPAGE *PPGMPAGE;
612/** Pointer to a const physical guest page. */
613typedef const PGMPAGE *PCPGMPAGE;
614/** Pointer to a physical guest page pointer. */
615typedef PPGMPAGE *PPPGMPAGE;
616
617
618/**
619 * Clears the page structure.
620 * @param pPage Pointer to the physical guest page tracking structure.
621 */
622#define PGM_PAGE_CLEAR(pPage) \
623 do { \
624 (pPage)->HCPhysX = 0; \
625 (pPage)->u2StateX = 0; \
626 (pPage)->fWrittenToX = 0; \
627 (pPage)->fSomethingElse = 0; \
628 (pPage)->idPageX = 0; \
629 (pPage)->u3Type = 0; \
630 (pPage)->u29B = 0; \
631 } while (0)
632
633/**
634 * Initializes the page structure.
635 * @param pPage Pointer to the physical guest page tracking structure.
636 */
637#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
638 do { \
639 (pPage)->HCPhysX = (_HCPhys); \
640 (pPage)->u2StateX = (_uState); \
641 (pPage)->fWrittenToX = 0; \
642 (pPage)->fSomethingElse = 0; \
643 (pPage)->idPageX = (_idPage); \
644 /*(pPage)->u3Type = (_uType); - later */ \
645 PGM_PAGE_SET_TYPE(pPage, _uType); \
646 (pPage)->u29B = 0; \
647 } while (0)
648
649/**
650 * Initializes the page structure of a ZERO page.
651 * @param pPage Pointer to the physical guest page tracking structure.
652 */
653#ifdef VBOX_WITH_NEW_PHYS_CODE
654# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
655 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
656#else
657# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
658 PGM_PAGE_INIT(pPage, 0, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
659#endif
660/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
661# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType) \
662 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
663
664
665/** @name The Page state, PGMPAGE::u2StateX.
666 * @{ */
667/** The zero page.
668 * This is a per-VM page that's never ever mapped writable. */
669#define PGM_PAGE_STATE_ZERO 0
670/** A allocated page.
671 * This is a per-VM page allocated from the page pool (or wherever
672 * we get MMIO2 pages from if the type is MMIO2).
673 */
674#define PGM_PAGE_STATE_ALLOCATED 1
675/** A allocated page that's being monitored for writes.
676 * The shadow page table mappings are read-only. When a write occurs, the
677 * fWrittenTo member is set, the page remapped as read-write and the state
678 * moved back to allocated. */
679#define PGM_PAGE_STATE_WRITE_MONITORED 2
680/** The page is shared, aka. copy-on-write.
681 * This is a page that's shared with other VMs. */
682#define PGM_PAGE_STATE_SHARED 3
683/** @} */
684
685
686/**
687 * Gets the page state.
688 * @returns page state (PGM_PAGE_STATE_*).
689 * @param pPage Pointer to the physical guest page tracking structure.
690 */
691#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->u2StateX )
692
693/**
694 * Sets the page state.
695 * @param pPage Pointer to the physical guest page tracking structure.
696 * @param _uState The new page state.
697 */
698#define PGM_PAGE_SET_STATE(pPage, _uState) \
699 do { (pPage)->u2StateX = (_uState); } while (0)
700
701
702/**
703 * Gets the host physical address of the guest page.
704 * @returns host physical address (RTHCPHYS).
705 * @param pPage Pointer to the physical guest page tracking structure.
706 */
707#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhysX & UINT64_C(0x0000fffffffff000) )
708
709/**
710 * Sets the host physical address of the guest page.
711 * @param pPage Pointer to the physical guest page tracking structure.
712 * @param _HCPhys The new host physical address.
713 */
714#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
715 do { (pPage)->HCPhysX = (((pPage)->HCPhysX) & UINT64_C(0xffff000000000fff)) \
716 | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
717
718/**
719 * Get the Page ID.
720 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
721 * @param pPage Pointer to the physical guest page tracking structure.
722 */
723#define PGM_PAGE_GET_PAGEID(pPage) ( (pPage)->idPageX )
724/* later:
725#define PGM_PAGE_GET_PAGEID(pPage) ( ((uint32_t)(pPage)->HCPhysX >> (48 - 12))
726 | ((uint32_t)(pPage)->HCPhysX & 0xfff) )
727*/
728/**
729 * Sets the Page ID.
730 * @param pPage Pointer to the physical guest page tracking structure.
731 */
732#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPageX = (_idPage); } while (0)
733/* later:
734#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhysX = (((pPage)->HCPhysX) & UINT64_C(0x0000fffffffff000)) \
735 | ((_idPage) & 0xfff) \
736 | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
737*/
738
739/**
740 * Get the Chunk ID.
741 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
742 * @param pPage Pointer to the physical guest page tracking structure.
743 */
744#define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPageX >> GMM_CHUNKID_SHIFT )
745/* later:
746#if GMM_CHUNKID_SHIFT == 12
747# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhysX >> 48) )
748#elif GMM_CHUNKID_SHIFT > 12
749# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhysX >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
750#elif GMM_CHUNKID_SHIFT < 12
751# define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhysX >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \
752 | ( (uint32_t)((pPage)->HCPhysX & 0xfff) >> GMM_CHUNKID_SHIFT ) )
753#else
754# error "GMM_CHUNKID_SHIFT isn't defined or something."
755#endif
756*/
757
758/**
759 * Get the index of the page within the allocaiton chunk.
760 * @returns The page index.
761 * @param pPage Pointer to the physical guest page tracking structure.
762 */
763#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPageX & GMM_PAGEID_IDX_MASK )
764/* later:
765#if GMM_CHUNKID_SHIFT <= 12
766# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysX & GMM_PAGEID_IDX_MASK) )
767#else
768# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysX & 0xfff) \
769 | ( (uint32_t)((pPage)->HCPhysX >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
770#endif
771*/
772
773
774/**
775 * Gets the page type.
776 * @returns The page type.
777 * @param pPage Pointer to the physical guest page tracking structure.
778 */
779#define PGM_PAGE_GET_TYPE(pPage) (pPage)->u3Type
780
781/**
782 * Sets the page type.
783 * @param pPage Pointer to the physical guest page tracking structure.
784 * @param _enmType The new page type (PGMPAGETYPE).
785 */
786#ifdef VBOX_WITH_NEW_PHYS_CODE
787#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
788 do { (pPage)->u3Type = (_enmType); } while (0)
789#else
790#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
791 do { \
792 (pPage)->u3Type = (_enmType); \
793 if ((_enmType) == PGMPAGETYPE_ROM) \
794 (pPage)->HCPhysX |= MM_RAM_FLAGS_ROM; \
795 else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \
796 (pPage)->HCPhysX |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \
797 else if ((_enmType) == PGMPAGETYPE_MMIO2) \
798 (pPage)->HCPhysX |= MM_RAM_FLAGS_MMIO2; \
799 } while (0)
800#endif
801
802
803/**
804 * Checks if the page is 'reserved'.
805 * @returns true/false.
806 * @param pPage Pointer to the physical guest page tracking structure.
807 */
808#define PGM_PAGE_IS_RESERVED(pPage) ( !!((pPage)->HCPhysX & MM_RAM_FLAGS_RESERVED) )
809
810/**
811 * Checks if the page is marked for MMIO.
812 * @returns true/false.
813 * @param pPage Pointer to the physical guest page tracking structure.
814 */
815#ifdef VBOX_WITH_NEW_PHYS_CODE
816# define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->u3Type == PGMPAGETYPE_MMIO )
817#else
818# define PGM_PAGE_IS_MMIO(pPage) ( !!((pPage)->HCPhysX & MM_RAM_FLAGS_MMIO) )
819#endif
820
821/**
822 * Checks if the page is backed by the ZERO page.
823 * @returns true/false.
824 * @param pPage Pointer to the physical guest page tracking structure.
825 */
826#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_ZERO )
827
828/**
829 * Checks if the page is backed by a SHARED page.
830 * @returns true/false.
831 * @param pPage Pointer to the physical guest page tracking structure.
832 */
833#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
834
835
836/**
837 * Marks the paget as written to (for GMM change monitoring).
838 * @param pPage Pointer to the physical guest page tracking structure.
839 */
840#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 1; } while (0)
841
842/**
843 * Clears the written-to indicator.
844 * @param pPage Pointer to the physical guest page tracking structure.
845 */
846#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 0; } while (0)
847
848/**
849 * Checks if the page was marked as written-to.
850 * @returns true/false.
851 * @param pPage Pointer to the physical guest page tracking structure.
852 */
853#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( (pPage)->fWrittenToX )
854
855
856/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateX).
857 *
858 * @remarks The values are assigned in order of priority, so we can calculate
859 * the correct state for a page with different handlers installed.
860 * @{ */
861/** No handler installed. */
862#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
863/** Monitoring is temporarily disabled. */
864#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
865/** Write access is monitored. */
866#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
867/** All access is monitored. */
868#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
869/** @} */
870
871/**
872 * Gets the physical access handler state of a page.
873 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
874 * @param pPage Pointer to the physical guest page tracking structure.
875 */
876#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) ( (pPage)->u2HandlerPhysStateX )
877
878/**
879 * Sets the physical access handler state of a page.
880 * @param pPage Pointer to the physical guest page tracking structure.
881 * @param _uState The new state value.
882 */
883#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
884 do { (pPage)->u2HandlerPhysStateX = (_uState); } while (0)
885
886/**
887 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
888 * @returns true/false
889 * @param pPage Pointer to the physical guest page tracking structure.
890 */
891#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE )
892
893/**
894 * Checks if the page has any active physical access handlers.
895 * @returns true/false
896 * @param pPage Pointer to the physical guest page tracking structure.
897 */
898#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
899
900
901/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateX).
902 *
903 * @remarks The values are assigned in order of priority, so we can calculate
904 * the correct state for a page with different handlers installed.
905 * @{ */
906/** No handler installed. */
907#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
908/* 1 is reserved so the lineup is identical with the physical ones. */
909/** Write access is monitored. */
910#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
911/** All access is monitored. */
912#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
913/** @} */
914
915/**
916 * Gets the virtual access handler state of a page.
917 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
918 * @param pPage Pointer to the physical guest page tracking structure.
919 */
920#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u2HandlerVirtStateX )
921
922/**
923 * Sets the virtual access handler state of a page.
924 * @param pPage Pointer to the physical guest page tracking structure.
925 * @param _uState The new state value.
926 */
927#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
928 do { (pPage)->u2HandlerVirtStateX = (_uState); } while (0)
929
930/**
931 * Checks if the page has any virtual access handlers.
932 * @returns true/false
933 * @param pPage Pointer to the physical guest page tracking structure.
934 */
935#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ( (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
936
937/**
938 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
939 * virtual handlers.
940 * @returns true/false
941 * @param pPage Pointer to the physical guest page tracking structure.
942 */
943#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
944
945
946
947/**
948 * Checks if the page has any access handlers, including temporarily disabled ones.
949 * @returns true/false
950 * @param pPage Pointer to the physical guest page tracking structure.
951 */
952#define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
953 ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE \
954 || (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
955
956/**
957 * Checks if the page has any active access handlers.
958 * @returns true/false
959 * @param pPage Pointer to the physical guest page tracking structure.
960 */
961#define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
962 ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
963 || (pPage)->u2HandlerVirtStateX >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
964
965/**
966 * Checks if the page has any active access handlers catching all accesses.
967 * @returns true/false
968 * @param pPage Pointer to the physical guest page tracking structure.
969 */
970#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
971 ( (pPage)->u2HandlerPhysStateX == PGM_PAGE_HNDL_PHYS_STATE_ALL \
972 || (pPage)->u2HandlerVirtStateX == PGM_PAGE_HNDL_VIRT_STATE_ALL )
973
974
975
976
977/** @def PGM_PAGE_GET_TRACKING
978 * Gets the packed shadow page pool tracking data associated with a guest page.
979 * @returns uint16_t containing the data.
980 * @param pPage Pointer to the physical guest page tracking structure.
981 */
982#define PGM_PAGE_GET_TRACKING(pPage) \
983 ( *((uint16_t *)&(pPage)->HCPhysX + 3) )
984
985/** @def PGM_PAGE_SET_TRACKING
986 * Sets the packed shadow page pool tracking data associated with a guest page.
987 * @param pPage Pointer to the physical guest page tracking structure.
988 * @param u16TrackingData The tracking data to store.
989 */
990#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
991 do { *((uint16_t *)&(pPage)->HCPhysX + 3) = (u16TrackingData); } while (0)
992
993/** @def PGM_PAGE_GET_TD_CREFS
994 * Gets the @a cRefs tracking data member.
995 * @returns cRefs.
996 * @param pPage Pointer to the physical guest page tracking structure.
997 */
998#define PGM_PAGE_GET_TD_CREFS(pPage) \
999 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1000
1001#define PGM_PAGE_GET_TD_IDX(pPage) \
1002 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1003
1004/**
1005 * Ram range for GC Phys to HC Phys conversion.
1006 *
1007 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1008 * conversions too, but we'll let MM handle that for now.
1009 *
1010 * This structure is used by linked lists in both GC and HC.
1011 */
1012typedef struct PGMRAMRANGE
1013{
1014 /** Pointer to the next RAM range - for R3. */
1015 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1016 /** Pointer to the next RAM range - for R0. */
1017 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1018 /** Pointer to the next RAM range - for RC. */
1019 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1020 /** Pointer alignment. */
1021 RTRCPTR RCPtrAlignment;
1022 /** Start of the range. Page aligned. */
1023 RTGCPHYS GCPhys;
1024 /** Last address in the range (inclusive). Page aligned (-1). */
1025 RTGCPHYS GCPhysLast;
1026 /** Size of the range. (Page aligned of course). */
1027 RTGCPHYS cb;
1028 /** MM_RAM_* flags */
1029 uint32_t fFlags;
1030 uint32_t u32Alignment; /**< alignment. */
1031#ifndef VBOX_WITH_NEW_PHYS_CODE
1032 /** R3 virtual lookup ranges for chunks.
1033 * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges.
1034 * @remarks This is occationally accessed from ring-0!! (not darwin) */
1035# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1036 R3PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs;
1037# else
1038 R3R0PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs;
1039# endif
1040#endif
1041 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1042 R3PTRTYPE(void *) pvR3;
1043 /** The range description. */
1044 R3PTRTYPE(const char *) pszDesc;
1045
1046 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1047#ifdef VBOX_WITH_NEW_PHYS_CODE
1048 uint32_t au32Reserved[2];
1049#elif HC_ARCH_BITS == 32
1050 uint32_t au32Reserved[1];
1051#endif
1052
1053 /** Array of physical guest page tracking structures. */
1054 PGMPAGE aPages[1];
1055} PGMRAMRANGE;
1056/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1057typedef PGMRAMRANGE *PPGMRAMRANGE;
1058
1059#ifndef VBOX_WITH_NEW_PHYS_CODE
1060/** Return hc ptr corresponding to the ram range and physical offset */
1061#define PGMRAMRANGE_GETHCPTR(pRam, off) \
1062 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((pRam)->paChunkR3Ptrs[(off) >> PGM_DYNAMIC_CHUNK_SHIFT] + ((off) & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
1063 : (RTHCPTR)((RTR3UINTPTR)(pRam)->pvR3 + (off));
1064#endif
1065
1066/**
1067 * Per page tracking structure for ROM image.
1068 *
1069 * A ROM image may have a shadow page, in which case we may have
1070 * two pages backing it. This structure contains the PGMPAGE for
1071 * both while PGMRAMRANGE have a copy of the active one. It is
1072 * important that these aren't out of sync in any regard other
1073 * than page pool tracking data.
1074 */
1075typedef struct PGMROMPAGE
1076{
1077 /** The page structure for the virgin ROM page. */
1078 PGMPAGE Virgin;
1079 /** The page structure for the shadow RAM page. */
1080 PGMPAGE Shadow;
1081 /** The current protection setting. */
1082 PGMROMPROT enmProt;
1083 /** Pad the structure size to a multiple of 8. */
1084 uint32_t u32Padding;
1085} PGMROMPAGE;
1086/** Pointer to a ROM page tracking structure. */
1087typedef PGMROMPAGE *PPGMROMPAGE;
1088
1089
1090/**
1091 * A registered ROM image.
1092 *
1093 * This is needed to keep track of ROM image since they generally
1094 * intrude into a PGMRAMRANGE. It also keeps track of additional
1095 * info like the two page sets (read-only virgin and read-write shadow),
1096 * the current state of each page.
1097 *
1098 * Because access handlers cannot easily be executed in a different
1099 * context, the ROM ranges needs to be accessible and in all contexts.
1100 */
1101typedef struct PGMROMRANGE
1102{
1103 /** Pointer to the next range - R3. */
1104 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1105 /** Pointer to the next range - R0. */
1106 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1107 /** Pointer to the next range - RC. */
1108 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1109 /** Pointer alignment */
1110 RTRCPTR GCPtrAlignment;
1111 /** Address of the range. */
1112 RTGCPHYS GCPhys;
1113 /** Address of the last byte in the range. */
1114 RTGCPHYS GCPhysLast;
1115 /** Size of the range. */
1116 RTGCPHYS cb;
1117 /** The flags (PGMPHYS_ROM_FLAG_*). */
1118 uint32_t fFlags;
1119 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1120 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 7 : 3];
1121 /** Pointer to the original bits when PGMPHYS_ROM_FLAG_PERMANENT_BINARY was specified.
1122 * This is used for strictness checks. */
1123 R3PTRTYPE(const void *) pvOriginal;
1124 /** The ROM description. */
1125 R3PTRTYPE(const char *) pszDesc;
1126 /** The per page tracking structures. */
1127 PGMROMPAGE aPages[1];
1128} PGMROMRANGE;
1129/** Pointer to a ROM range. */
1130typedef PGMROMRANGE *PPGMROMRANGE;
1131
1132
1133/**
1134 * A registered MMIO2 (= Device RAM) range.
1135 *
1136 * There are a few reason why we need to keep track of these
1137 * registrations. One of them is the deregistration & cleanup
1138 * stuff, while another is that the PGMRAMRANGE associated with
1139 * such a region may have to be removed from the ram range list.
1140 *
1141 * Overlapping with a RAM range has to be 100% or none at all. The
1142 * pages in the existing RAM range must not be ROM nor MMIO. A guru
1143 * meditation will be raised if a partial overlap or an overlap of
1144 * ROM pages is encountered. On an overlap we will free all the
1145 * existing RAM pages and put in the ram range pages instead.
1146 */
1147typedef struct PGMMMIO2RANGE
1148{
1149 /** The owner of the range. (a device) */
1150 PPDMDEVINSR3 pDevInsR3;
1151 /** Pointer to the ring-3 mapping of the allocation. */
1152 RTR3PTR pvR3;
1153 /** Pointer to the next range - R3. */
1154 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1155 /** Whether it's mapped or not. */
1156 bool fMapped;
1157 /** Whether it's overlapping or not. */
1158 bool fOverlapping;
1159 /** The PCI region number.
1160 * @remarks This ASSUMES that nobody will ever really need to have multiple
1161 * PCI devices with matching MMIO region numbers on a single device. */
1162 uint8_t iRegion;
1163 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1164 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5];
1165 /** The associated RAM range. */
1166 PGMRAMRANGE RamRange;
1167} PGMMMIO2RANGE;
1168/** Pointer to a MMIO2 range. */
1169typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1170
1171
1172
1173
1174/**
1175 * PGMPhysRead/Write cache entry
1176 */
1177typedef struct PGMPHYSCACHEENTRY
1178{
1179 /** R3 pointer to physical page. */
1180 R3PTRTYPE(uint8_t *) pbR3;
1181 /** GC Physical address for cache entry */
1182 RTGCPHYS GCPhys;
1183#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1184 RTGCPHYS u32Padding0; /**< alignment padding. */
1185#endif
1186} PGMPHYSCACHEENTRY;
1187
1188/**
1189 * PGMPhysRead/Write cache to reduce REM memory access overhead
1190 */
1191typedef struct PGMPHYSCACHE
1192{
1193 /** Bitmap of valid cache entries */
1194 uint64_t aEntries;
1195 /** Cache entries */
1196 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1197} PGMPHYSCACHE;
1198
1199
1200/** Pointer to an allocation chunk ring-3 mapping. */
1201typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1202/** Pointer to an allocation chunk ring-3 mapping pointer. */
1203typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1204
1205/**
1206 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1207 *
1208 * The primary tree (Core) uses the chunk id as key.
1209 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1210 */
1211typedef struct PGMCHUNKR3MAP
1212{
1213 /** The key is the chunk id. */
1214 AVLU32NODECORE Core;
1215 /** The key is the ageing sequence number. */
1216 AVLLU32NODECORE AgeCore;
1217 /** The current age thingy. */
1218 uint32_t iAge;
1219 /** The current reference count. */
1220 uint32_t volatile cRefs;
1221 /** The current permanent reference count. */
1222 uint32_t volatile cPermRefs;
1223 /** The mapping address. */
1224 void *pv;
1225} PGMCHUNKR3MAP;
1226
1227/**
1228 * Allocation chunk ring-3 mapping TLB entry.
1229 */
1230typedef struct PGMCHUNKR3MAPTLBE
1231{
1232 /** The chunk id. */
1233 uint32_t volatile idChunk;
1234#if HC_ARCH_BITS == 64
1235 uint32_t u32Padding; /**< alignment padding. */
1236#endif
1237 /** The chunk map. */
1238#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1239 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1240#else
1241 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1242#endif
1243} PGMCHUNKR3MAPTLBE;
1244/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1245typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1246
1247/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1248 * @remark Must be a power of two value. */
1249#define PGM_CHUNKR3MAPTLB_ENTRIES 32
1250
1251/**
1252 * Allocation chunk ring-3 mapping TLB.
1253 *
1254 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1255 * At first glance this might look kinda odd since AVL trees are
1256 * supposed to give the most optimial lookup times of all trees
1257 * due to their balancing. However, take a tree with 1023 nodes
1258 * in it, that's 10 levels, meaning that most searches has to go
1259 * down 9 levels before they find what they want. This isn't fast
1260 * compared to a TLB hit. There is the factor of cache misses,
1261 * and of course the problem with trees and branch prediction.
1262 * This is why we use TLBs in front of most of the trees.
1263 *
1264 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1265 * difficult when we switch to the new inlined AVL trees (from kStuff).
1266 */
1267typedef struct PGMCHUNKR3MAPTLB
1268{
1269 /** The TLB entries. */
1270 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1271} PGMCHUNKR3MAPTLB;
1272
1273/**
1274 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1275 * @returns Chunk TLB index.
1276 * @param idChunk The Chunk ID.
1277 */
1278#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1279
1280
1281/**
1282 * Ring-3 guest page mapping TLB entry.
1283 * @remarks used in ring-0 as well at the moment.
1284 */
1285typedef struct PGMPAGER3MAPTLBE
1286{
1287 /** Address of the page. */
1288 RTGCPHYS volatile GCPhys;
1289 /** The guest page. */
1290#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1291 R3PTRTYPE(PPGMPAGE) volatile pPage;
1292#else
1293 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1294#endif
1295 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1296#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1297 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1298#else
1299 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1300#endif
1301 /** The address */
1302#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1303 R3PTRTYPE(void *) volatile pv;
1304#else
1305 R3R0PTRTYPE(void *) volatile pv;
1306#endif
1307#if HC_ARCH_BITS == 32
1308 uint32_t u32Padding; /**< alignment padding. */
1309#endif
1310} PGMPAGER3MAPTLBE;
1311/** Pointer to an entry in the HC physical TLB. */
1312typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1313
1314
1315/** The number of entries in the ring-3 guest page mapping TLB.
1316 * @remarks The value must be a power of two. */
1317#define PGM_PAGER3MAPTLB_ENTRIES 64
1318
1319/**
1320 * Ring-3 guest page mapping TLB.
1321 * @remarks used in ring-0 as well at the moment.
1322 */
1323typedef struct PGMPAGER3MAPTLB
1324{
1325 /** The TLB entries. */
1326 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1327} PGMPAGER3MAPTLB;
1328/** Pointer to the ring-3 guest page mapping TLB. */
1329typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1330
1331/**
1332 * Calculates the index of the TLB entry for the specified guest page.
1333 * @returns Physical TLB index.
1334 * @param GCPhys The guest physical address.
1335 */
1336#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1337
1338
1339/**
1340 * Mapping cache usage set entry.
1341 *
1342 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1343 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1344 * cache. If it's extended to include ring-3, well, then something will
1345 * have be changed here...
1346 */
1347typedef struct PGMMAPSETENTRY
1348{
1349 /** The mapping cache index. */
1350 uint16_t iPage;
1351 /** The number of references.
1352 * The max is UINT16_MAX - 1. */
1353 uint16_t cRefs;
1354 /** Pointer to the page. */
1355 RTR0PTR pvPage;
1356 /** The physical address for this entry. */
1357 RTHCPHYS HCPhys;
1358} PGMMAPSETENTRY;
1359/** Pointer to a mapping cache usage set entry. */
1360typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1361
1362/**
1363 * Mapping cache usage set.
1364 *
1365 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1366 * done during exits / traps. The set is
1367 */
1368typedef struct PGMMAPSET
1369{
1370 /** The number of occupied entries.
1371 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1372 * dynamic mappings. */
1373 uint32_t cEntries;
1374 /** The start of the current subset.
1375 * This is UINT32_MAX if no subset is currently open. */
1376 uint32_t iSubset;
1377 /** The index of the current CPU, only valid if the set is open. */
1378 int32_t iCpu;
1379 /** The entries. */
1380 PGMMAPSETENTRY aEntries[64];
1381 /** HCPhys -> iEntry fast lookup table.
1382 * Use PGMMAPSET_HASH for hashing.
1383 * The entries may or may not be valid, check against cEntries. */
1384 uint8_t aiHashTable[128];
1385} PGMMAPSET;
1386/** Pointer to the mapping cache set. */
1387typedef PGMMAPSET *PPGMMAPSET;
1388
1389/** PGMMAPSET::cEntries value for a closed set. */
1390#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1391
1392/** Hash function for aiHashTable. */
1393#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1394
1395/** The max fill size (strict builds). */
1396#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1397
1398
1399/** @name Context neutrual page mapper TLB.
1400 *
1401 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1402 * code is writting in a kind of context neutrual way. Time will show whether
1403 * this actually makes sense or not...
1404 *
1405 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1406 * context ends up using a global mapping cache on some platforms
1407 * (darwin).
1408 *
1409 * @{ */
1410/** @typedef PPGMPAGEMAPTLB
1411 * The page mapper TLB pointer type for the current context. */
1412/** @typedef PPGMPAGEMAPTLB
1413 * The page mapper TLB entry pointer type for the current context. */
1414/** @typedef PPGMPAGEMAPTLB
1415 * The page mapper TLB entry pointer pointer type for the current context. */
1416/** @def PGM_PAGEMAPTLB_ENTRIES
1417 * The number of TLB entries in the page mapper TLB for the current context. */
1418/** @def PGM_PAGEMAPTLB_IDX
1419 * Calculate the TLB index for a guest physical address.
1420 * @returns The TLB index.
1421 * @param GCPhys The guest physical address. */
1422/** @typedef PPGMPAGEMAP
1423 * Pointer to a page mapper unit for current context. */
1424/** @typedef PPPGMPAGEMAP
1425 * Pointer to a page mapper unit pointer for current context. */
1426#ifdef IN_RC
1427// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1428// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1429// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1430# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1431# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1432 typedef void * PPGMPAGEMAP;
1433 typedef void ** PPPGMPAGEMAP;
1434//#elif IN_RING0
1435// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1436// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1437// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1438//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1439//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1440// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1441// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1442#else
1443 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1444 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1445 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1446# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1447# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1448 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1449 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1450#endif
1451/** @} */
1452
1453
1454/** @name PGM Pool Indexes.
1455 * Aka. the unique shadow page identifier.
1456 * @{ */
1457/** NIL page pool IDX. */
1458#define NIL_PGMPOOL_IDX 0
1459/** The first normal index. */
1460#define PGMPOOL_IDX_FIRST_SPECIAL 1
1461/** Page directory (32-bit root). */
1462#define PGMPOOL_IDX_PD 1
1463/** Page Directory Pointer Table (PAE root). */
1464#define PGMPOOL_IDX_PDPT 2
1465/** AMD64 CR3 level index.*/
1466#define PGMPOOL_IDX_AMD64_CR3 3
1467/** Nested paging root.*/
1468#define PGMPOOL_IDX_NESTED_ROOT 4
1469/** The first normal index. */
1470#define PGMPOOL_IDX_FIRST 5
1471/** The last valid index. (inclusive, 14 bits) */
1472#define PGMPOOL_IDX_LAST 0x3fff
1473/** @} */
1474
1475/** The NIL index for the parent chain. */
1476#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1477
1478/**
1479 * Node in the chain linking a shadowed page to it's parent (user).
1480 */
1481#pragma pack(1)
1482typedef struct PGMPOOLUSER
1483{
1484 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1485 uint16_t iNext;
1486 /** The user page index. */
1487 uint16_t iUser;
1488 /** Index into the user table. */
1489 uint32_t iUserTable;
1490} PGMPOOLUSER, *PPGMPOOLUSER;
1491typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1492#pragma pack()
1493
1494
1495/** The NIL index for the phys ext chain. */
1496#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1497
1498/**
1499 * Node in the chain of physical cross reference extents.
1500 * @todo Calling this an 'extent' is not quite right, find a better name.
1501 */
1502#pragma pack(1)
1503typedef struct PGMPOOLPHYSEXT
1504{
1505 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1506 uint16_t iNext;
1507 /** The user page index. */
1508 uint16_t aidx[3];
1509} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1510typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1511#pragma pack()
1512
1513
1514/**
1515 * The kind of page that's being shadowed.
1516 */
1517typedef enum PGMPOOLKIND
1518{
1519 /** The virtual invalid 0 entry. */
1520 PGMPOOLKIND_INVALID = 0,
1521 /** The entry is free (=unused). */
1522 PGMPOOLKIND_FREE,
1523
1524 /** Shw: 32-bit page table; Gst: no paging */
1525 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1526 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1527 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1528 /** Shw: 32-bit page table; Gst: 4MB page. */
1529 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1530 /** Shw: PAE page table; Gst: no paging */
1531 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1532 /** Shw: PAE page table; Gst: 32-bit page table. */
1533 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1534 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1535 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1536 /** Shw: PAE page table; Gst: PAE page table. */
1537 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1538 /** Shw: PAE page table; Gst: 2MB page. */
1539 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1540
1541 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1542 PGMPOOLKIND_32BIT_PD,
1543 /** Shw: 32-bit page directory. Gst: no paging. */
1544 PGMPOOLKIND_32BIT_PD_PHYS,
1545 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1546 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1547 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1548 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1549 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1550 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1551 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1552 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1553 /** Shw: PAE page directory; Gst: PAE page directory. */
1554 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1555 /** Shw: PAE page directory; Gst: no paging. */
1556 PGMPOOLKIND_PAE_PD_PHYS,
1557
1558 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1559 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1560 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1561 PGMPOOLKIND_PAE_PDPT,
1562 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1563 PGMPOOLKIND_PAE_PDPT_PHYS,
1564
1565 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1566 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1567 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1568 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1569 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1570 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1571 /** Shw: 64-bit page directory table; Gst: no paging */
1572 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1573
1574 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1575 PGMPOOLKIND_64BIT_PML4,
1576
1577 /** Shw: EPT page directory pointer table; Gst: no paging */
1578 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1579 /** Shw: EPT page directory table; Gst: no paging */
1580 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1581 /** Shw: EPT page table; Gst: no paging */
1582 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1583
1584 /** Shw: Root Nested paging table. */
1585 PGMPOOLKIND_ROOT_NESTED,
1586
1587 /** The last valid entry. */
1588 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1589} PGMPOOLKIND;
1590
1591
1592/**
1593 * The tracking data for a page in the pool.
1594 */
1595typedef struct PGMPOOLPAGE
1596{
1597 /** AVL node code with the (R3) physical address of this page. */
1598 AVLOHCPHYSNODECORE Core;
1599 /** Pointer to the R3 mapping of the page. */
1600#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1601 R3PTRTYPE(void *) pvPageR3;
1602#else
1603 R3R0PTRTYPE(void *) pvPageR3;
1604#endif
1605 /** The guest physical address. */
1606#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1607 uint32_t Alignment0;
1608#endif
1609 RTGCPHYS GCPhys;
1610 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1611 uint8_t enmKind;
1612 uint8_t bPadding;
1613 /** The index of this page. */
1614 uint16_t idx;
1615 /** The next entry in the list this page currently resides in.
1616 * It's either in the free list or in the GCPhys hash. */
1617 uint16_t iNext;
1618#ifdef PGMPOOL_WITH_USER_TRACKING
1619 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1620 uint16_t iUserHead;
1621 /** The number of present entries. */
1622 uint16_t cPresent;
1623 /** The first entry in the table which is present. */
1624 uint16_t iFirstPresent;
1625#endif
1626#ifdef PGMPOOL_WITH_MONITORING
1627 /** The number of modifications to the monitored page. */
1628 uint16_t cModifications;
1629 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1630 uint16_t iModifiedNext;
1631 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1632 uint16_t iModifiedPrev;
1633 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1634 uint16_t iMonitoredNext;
1635 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1636 uint16_t iMonitoredPrev;
1637#endif
1638#ifdef PGMPOOL_WITH_CACHE
1639 /** The next page in the age list. */
1640 uint16_t iAgeNext;
1641 /** The previous page in the age list. */
1642 uint16_t iAgePrev;
1643#endif /* PGMPOOL_WITH_CACHE */
1644 /** Used to indicate that the page is zeroed. */
1645 bool fZeroed;
1646 /** Used to indicate that a PT has non-global entries. */
1647 bool fSeenNonGlobal;
1648 /** Used to indicate that we're monitoring writes to the guest page. */
1649 bool fMonitored;
1650 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1651 * (All pages are in the age list.) */
1652 bool fCached;
1653 /** This is used by the R3 access handlers when invoked by an async thread.
1654 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1655 bool volatile fReusedFlushPending;
1656 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1657 bool fLocked;
1658} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1659
1660
1661#ifdef PGMPOOL_WITH_CACHE
1662/** The hash table size. */
1663# define PGMPOOL_HASH_SIZE 0x40
1664/** The hash function. */
1665# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1666#endif
1667
1668
1669/**
1670 * The shadow page pool instance data.
1671 *
1672 * It's all one big allocation made at init time, except for the
1673 * pages that is. The user nodes follows immediatly after the
1674 * page structures.
1675 */
1676typedef struct PGMPOOL
1677{
1678 /** The VM handle - R3 Ptr. */
1679 PVMR3 pVMR3;
1680 /** The VM handle - R0 Ptr. */
1681 PVMR0 pVMR0;
1682 /** The VM handle - RC Ptr. */
1683 PVMRC pVMRC;
1684 /** The max pool size. This includes the special IDs. */
1685 uint16_t cMaxPages;
1686 /** The current pool size. */
1687 uint16_t cCurPages;
1688 /** The head of the free page list. */
1689 uint16_t iFreeHead;
1690 /* Padding. */
1691 uint16_t u16Padding;
1692#ifdef PGMPOOL_WITH_USER_TRACKING
1693 /** Head of the chain of free user nodes. */
1694 uint16_t iUserFreeHead;
1695 /** The number of user nodes we've allocated. */
1696 uint16_t cMaxUsers;
1697 /** The number of present page table entries in the entire pool. */
1698 uint32_t cPresent;
1699 /** Pointer to the array of user nodes - RC pointer. */
1700 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1701 /** Pointer to the array of user nodes - R3 pointer. */
1702 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1703 /** Pointer to the array of user nodes - R0 pointer. */
1704 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1705#endif /* PGMPOOL_WITH_USER_TRACKING */
1706#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1707 /** Head of the chain of free phys ext nodes. */
1708 uint16_t iPhysExtFreeHead;
1709 /** The number of user nodes we've allocated. */
1710 uint16_t cMaxPhysExts;
1711 /** Pointer to the array of physical xref extent - RC pointer. */
1712 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1713 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1714 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1715 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1716 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1717#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1718#ifdef PGMPOOL_WITH_CACHE
1719 /** Hash table for GCPhys addresses. */
1720 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1721 /** The head of the age list. */
1722 uint16_t iAgeHead;
1723 /** The tail of the age list. */
1724 uint16_t iAgeTail;
1725 /** Set if the cache is enabled. */
1726 bool fCacheEnabled;
1727#endif /* PGMPOOL_WITH_CACHE */
1728#ifdef PGMPOOL_WITH_MONITORING
1729 /** Head of the list of modified pages. */
1730 uint16_t iModifiedHead;
1731 /** The current number of modified pages. */
1732 uint16_t cModifiedPages;
1733 /** Access handler, RC. */
1734 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1735 /** Access handler, R0. */
1736 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1737 /** Access handler, R3. */
1738 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1739 /** The access handler description (HC ptr). */
1740 R3PTRTYPE(const char *) pszAccessHandler;
1741#endif /* PGMPOOL_WITH_MONITORING */
1742 /** The number of pages currently in use. */
1743 uint16_t cUsedPages;
1744#ifdef VBOX_WITH_STATISTICS
1745 /** The high wather mark for cUsedPages. */
1746 uint16_t cUsedPagesHigh;
1747 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1748 /** Profiling pgmPoolAlloc(). */
1749 STAMPROFILEADV StatAlloc;
1750 /** Profiling pgmPoolClearAll(). */
1751 STAMPROFILE StatClearAll;
1752 /** Profiling pgmPoolFlushAllInt(). */
1753 STAMPROFILE StatFlushAllInt;
1754 /** Profiling pgmPoolFlushPage(). */
1755 STAMPROFILE StatFlushPage;
1756 /** Profiling pgmPoolFree(). */
1757 STAMPROFILE StatFree;
1758 /** Profiling time spent zeroing pages. */
1759 STAMPROFILE StatZeroPage;
1760# ifdef PGMPOOL_WITH_USER_TRACKING
1761 /** Profiling of pgmPoolTrackDeref. */
1762 STAMPROFILE StatTrackDeref;
1763 /** Profiling pgmTrackFlushGCPhysPT. */
1764 STAMPROFILE StatTrackFlushGCPhysPT;
1765 /** Profiling pgmTrackFlushGCPhysPTs. */
1766 STAMPROFILE StatTrackFlushGCPhysPTs;
1767 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1768 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1769 /** Number of times we've been out of user records. */
1770 STAMCOUNTER StatTrackFreeUpOneUser;
1771# endif
1772# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1773 /** Profiling deref activity related tracking GC physical pages. */
1774 STAMPROFILE StatTrackDerefGCPhys;
1775 /** Number of linear searches for a HCPhys in the ram ranges. */
1776 STAMCOUNTER StatTrackLinearRamSearches;
1777 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1778 STAMCOUNTER StamTrackPhysExtAllocFailures;
1779# endif
1780# ifdef PGMPOOL_WITH_MONITORING
1781 /** Profiling the RC/R0 access handler. */
1782 STAMPROFILE StatMonitorRZ;
1783 /** Times we've failed interpreting the instruction. */
1784 STAMCOUNTER StatMonitorRZEmulateInstr;
1785 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
1786 STAMPROFILE StatMonitorRZFlushPage;
1787 /** Times we've detected fork(). */
1788 STAMCOUNTER StatMonitorRZFork;
1789 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
1790 STAMPROFILE StatMonitorRZHandled;
1791 /** Times we've failed interpreting a patch code instruction. */
1792 STAMCOUNTER StatMonitorRZIntrFailPatch1;
1793 /** Times we've failed interpreting a patch code instruction during flushing. */
1794 STAMCOUNTER StatMonitorRZIntrFailPatch2;
1795 /** The number of times we've seen rep prefixes we can't handle. */
1796 STAMCOUNTER StatMonitorRZRepPrefix;
1797 /** Profiling the REP STOSD cases we've handled. */
1798 STAMPROFILE StatMonitorRZRepStosd;
1799
1800 /** Profiling the R3 access handler. */
1801 STAMPROFILE StatMonitorR3;
1802 /** Times we've failed interpreting the instruction. */
1803 STAMCOUNTER StatMonitorR3EmulateInstr;
1804 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
1805 STAMPROFILE StatMonitorR3FlushPage;
1806 /** Times we've detected fork(). */
1807 STAMCOUNTER StatMonitorR3Fork;
1808 /** Profiling the R3 access we've handled (except REP STOSD). */
1809 STAMPROFILE StatMonitorR3Handled;
1810 /** The number of times we've seen rep prefixes we can't handle. */
1811 STAMCOUNTER StatMonitorR3RepPrefix;
1812 /** Profiling the REP STOSD cases we've handled. */
1813 STAMPROFILE StatMonitorR3RepStosd;
1814 /** The number of times we're called in an async thread an need to flush. */
1815 STAMCOUNTER StatMonitorR3Async;
1816 /** The high wather mark for cModifiedPages. */
1817 uint16_t cModifiedPagesHigh;
1818 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
1819# endif
1820# ifdef PGMPOOL_WITH_CACHE
1821 /** The number of cache hits. */
1822 STAMCOUNTER StatCacheHits;
1823 /** The number of cache misses. */
1824 STAMCOUNTER StatCacheMisses;
1825 /** The number of times we've got a conflict of 'kind' in the cache. */
1826 STAMCOUNTER StatCacheKindMismatches;
1827 /** Number of times we've been out of pages. */
1828 STAMCOUNTER StatCacheFreeUpOne;
1829 /** The number of cacheable allocations. */
1830 STAMCOUNTER StatCacheCacheable;
1831 /** The number of uncacheable allocations. */
1832 STAMCOUNTER StatCacheUncacheable;
1833# endif
1834#elif HC_ARCH_BITS == 64
1835 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
1836#endif
1837 /** The AVL tree for looking up a page by its HC physical address. */
1838 AVLOHCPHYSTREE HCPhysTree;
1839 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
1840 /** Array of pages. (cMaxPages in length)
1841 * The Id is the index into thist array.
1842 */
1843 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
1844} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
1845
1846
1847/** @def PGMPOOL_PAGE_2_PTR
1848 * Maps a pool page pool into the current context.
1849 *
1850 * @returns VBox status code.
1851 * @param pVM The VM handle.
1852 * @param pPage The pool page.
1853 *
1854 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1855 * small page window employeed by that function. Be careful.
1856 * @remark There is no need to assert on the result.
1857 */
1858#if defined(IN_RC)
1859# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1860#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1861# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1862#elif defined(VBOX_STRICT)
1863# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
1864DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
1865{
1866 Assert(pPage && pPage->pvPageR3);
1867 return pPage->pvPageR3;
1868}
1869#else
1870# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
1871#endif
1872
1873/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
1874 * Maps a pool page pool into the current context.
1875 *
1876 * @returns VBox status code.
1877 * @param pPGM Pointer to the PGM instance data.
1878 * @param pPage The pool page.
1879 *
1880 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1881 * small page window employeed by that function. Be careful.
1882 * @remark There is no need to assert on the result.
1883 */
1884#if defined(IN_RC)
1885# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined((pPGM), (pPage))
1886#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1887# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined((pPGM), (pPage))
1888#else
1889# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
1890#endif
1891
1892
1893/** @name Per guest page tracking data.
1894 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
1895 * is to use more bits for it and split it up later on. But for now we'll play
1896 * safe and change as little as possible.
1897 *
1898 * The 16-bit word has two parts:
1899 *
1900 * The first 14-bit forms the @a idx field. It is either the index of a page in
1901 * the shadow page pool, or and index into the extent list.
1902 *
1903 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
1904 * shadow page pool references to the page. If cRefs equals
1905 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
1906 * (misnomer) table and not the shadow page pool.
1907 *
1908 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
1909 * the 16-bit word.
1910 *
1911 * @{ */
1912/** The shift count for getting to the cRefs part. */
1913#define PGMPOOL_TD_CREFS_SHIFT 14
1914/** The mask applied after shifting the tracking data down by
1915 * PGMPOOL_TD_CREFS_SHIFT. */
1916#define PGMPOOL_TD_CREFS_MASK 0x3
1917/** The cRef value used to indiciate that the idx is the head of a
1918 * physical cross reference list. */
1919#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
1920/** The shift used to get idx. */
1921#define PGMPOOL_TD_IDX_SHIFT 0
1922/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
1923#define PGMPOOL_TD_IDX_MASK 0x3fff
1924/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
1925 * simply too many mappings of this page. */
1926#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
1927
1928/** @def PGMPOOL_TD_MAKE
1929 * Makes a 16-bit tracking data word.
1930 *
1931 * @returns tracking data.
1932 * @param cRefs The @a cRefs field. Must be within bounds!
1933 * @param idx The @a idx field. Must also be within bounds! */
1934#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
1935
1936/** @def PGMPOOL_TD_GET_CREFS
1937 * Get the @a cRefs field from a tracking data word.
1938 *
1939 * @returns The @a cRefs field
1940 * @param u16 The tracking data word. */
1941#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
1942
1943/** @def PGMPOOL_TD_GET_IDX
1944 * Get the @a idx field from a tracking data word.
1945 *
1946 * @returns The @a idx field
1947 * @param u16 The tracking data word. */
1948#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
1949/** @} */
1950
1951
1952/**
1953 * Trees are using self relative offsets as pointers.
1954 * So, all its data, including the root pointer, must be in the heap for HC and GC
1955 * to have the same layout.
1956 */
1957typedef struct PGMTREES
1958{
1959 /** Physical access handlers (AVL range+offsetptr tree). */
1960 AVLROGCPHYSTREE PhysHandlers;
1961 /** Virtual access handlers (AVL range + GC ptr tree). */
1962 AVLROGCPTRTREE VirtHandlers;
1963 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
1964 AVLROGCPHYSTREE PhysToVirtHandlers;
1965 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
1966 AVLROGCPTRTREE HyperVirtHandlers;
1967} PGMTREES;
1968/** Pointer to PGM trees. */
1969typedef PGMTREES *PPGMTREES;
1970
1971
1972/** @name Paging mode macros
1973 * @{ */
1974#ifdef IN_RC
1975# define PGM_CTX(a,b) a##RC##b
1976# define PGM_CTX_STR(a,b) a "GC" b
1977# define PGM_CTX_DECL(type) VMMRCDECL(type)
1978#else
1979# ifdef IN_RING3
1980# define PGM_CTX(a,b) a##R3##b
1981# define PGM_CTX_STR(a,b) a "R3" b
1982# define PGM_CTX_DECL(type) DECLCALLBACK(type)
1983# else
1984# define PGM_CTX(a,b) a##R0##b
1985# define PGM_CTX_STR(a,b) a "R0" b
1986# define PGM_CTX_DECL(type) VMMDECL(type)
1987# endif
1988#endif
1989
1990#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
1991#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
1992#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
1993#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
1994#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
1995#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
1996#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
1997#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
1998#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
1999#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2000#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2001#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2002#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2003#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2004#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2005#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
2006#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2007
2008#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2009#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2010#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2011#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2012#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2013#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2014#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2015#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2016#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2017#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2018#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2019#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2020#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2021#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2022#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2023#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2024#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
2025
2026/* Shw_Gst */
2027#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2028#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2029#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2030#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2031#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2032#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2033#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2034#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2035#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2036#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2037#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2038#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2039#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2040#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2041#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2042#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2043#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2044#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2045#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2046
2047#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2048#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2049#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2050#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2051#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2052#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2053#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2054#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2055#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2056#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2057#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2058#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2059#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2060#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2061#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2062#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2063#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2064#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2065#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2066#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2067#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2068#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2069#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2070#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2071#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2072#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2073#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2074#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2075#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2076#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2077#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2078#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2079#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2080#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2081#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2082#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2083#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2084
2085#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2086#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
2087/** @} */
2088
2089/**
2090 * Data for each paging mode.
2091 */
2092typedef struct PGMMODEDATA
2093{
2094 /** The guest mode type. */
2095 uint32_t uGstType;
2096 /** The shadow mode type. */
2097 uint32_t uShwType;
2098
2099 /** @name Function pointers for Shadow paging.
2100 * @{
2101 */
2102 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCPTR offDelta));
2103 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
2104 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2105 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2106
2107 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2108 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2109
2110 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2111 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2112 /** @} */
2113
2114 /** @name Function pointers for Guest paging.
2115 * @{
2116 */
2117 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCPTR offDelta));
2118 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
2119 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2120 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2121 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2122 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2123 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2124 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2125 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2126 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2127 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2128 /** @} */
2129
2130 /** @name Function pointers for Both Shadow and Guest paging.
2131 * @{
2132 */
2133 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCPTR offDelta));
2134 /* no pfnR3BthTrap0eHandler */
2135 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2136 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2137 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2138 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2139 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2140#ifdef VBOX_STRICT
2141 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2142#endif
2143 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2144 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM));
2145
2146 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2147 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2148 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2149 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2150 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2151 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2152#ifdef VBOX_STRICT
2153 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2154#endif
2155 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2156 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM));
2157
2158 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2159 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2160 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2161 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2162 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2163 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2164#ifdef VBOX_STRICT
2165 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2166#endif
2167 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2168 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM));
2169 /** @} */
2170} PGMMODEDATA, *PPGMMODEDATA;
2171
2172
2173
2174/**
2175 * Converts a PGM pointer into a VM pointer.
2176 * @returns Pointer to the VM structure the PGM is part of.
2177 * @param pPGM Pointer to PGM instance data.
2178 */
2179#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2180
2181/**
2182 * PGM Data (part of VM)
2183 */
2184typedef struct PGM
2185{
2186 /** Offset to the VM structure. */
2187 RTINT offVM;
2188 /** Offset of the PGMCPU structure relative to VMCPU. */
2189 int32_t offVCpu;
2190 /** @cfgm{PGM/RamPreAlloc, bool, false}
2191 * Whether to preallocate all the guest RAM or not. */
2192 bool fRamPreAlloc;
2193 /** Alignment padding. */
2194 bool afAlignment0[3];
2195
2196
2197 /*
2198 * This will be redefined at least two more times before we're done, I'm sure.
2199 * The current code is only to get on with the coding.
2200 * - 2004-06-10: initial version, bird.
2201 * - 2004-07-02: 1st time, bird.
2202 * - 2004-10-18: 2nd time, bird.
2203 * - 2005-07-xx: 3rd time, bird.
2204 */
2205
2206 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2207 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2208 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2209 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2210
2211 /** The host paging mode. (This is what SUPLib reports.) */
2212 SUPPAGINGMODE enmHostMode;
2213 /** The shadow paging mode. */
2214 PGMMODE enmShadowMode;
2215 /** The guest paging mode. */
2216 PGMMODE enmGuestMode;
2217
2218 /** The current physical address representing in the guest CR3 register. */
2219 RTGCPHYS GCPhysCR3;
2220 /** Pointer to the 5 page CR3 content mapping.
2221 * The first page is always the CR3 (in some form) while the 4 other pages
2222 * are used of the PDs in PAE mode. */
2223 RTGCPTR GCPtrCR3Mapping;
2224#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2225 uint32_t u32Alignment;
2226#endif
2227 /** @name 32-bit Guest Paging.
2228 * @{ */
2229 /** The guest's page directory, R3 pointer. */
2230 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2231#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2232 /** The guest's page directory, R0 pointer. */
2233 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2234#endif
2235 /** The guest's page directory, static RC mapping. */
2236 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2237 /** @} */
2238
2239 /** @name PAE Guest Paging.
2240 * @{ */
2241 /** The guest's page directory pointer table, static RC mapping. */
2242 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2243 /** The guest's page directory pointer table, R3 pointer. */
2244 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2245#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2246 /** The guest's page directory pointer table, R0 pointer. */
2247 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2248#endif
2249
2250 /** The guest's page directories, R3 pointers.
2251 * These are individual pointers and don't have to be adjecent.
2252 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2253 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2254 /** The guest's page directories, R0 pointers.
2255 * Same restrictions as apGstPaePDsR3. */
2256#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2257 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2258#endif
2259 /** The guest's page directories, static GC mapping.
2260 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2261 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2262 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2263 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2264 RTGCPHYS aGCPhysGstPaePDs[4];
2265 /** The physical addresses of the monitored guest page directories (PAE). */
2266 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2267 /** @} */
2268
2269 /** @name AMD64 Guest Paging.
2270 * @{ */
2271 /** The guest's page directory pointer table, R3 pointer. */
2272 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
2273#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2274 /** The guest's page directory pointer table, R0 pointer. */
2275 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
2276#endif
2277 /** @} */
2278
2279 /** Pointer to the page of the current active CR3 - R3 Ptr. */
2280 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
2281 /** Pointer to the page of the current active CR3 - R0 Ptr. */
2282 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
2283 /** Pointer to the page of the current active CR3 - RC Ptr. */
2284 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
2285 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
2286 uint32_t iShwUser;
2287 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
2288 uint32_t iShwUserTable;
2289# if HC_ARCH_BITS == 64
2290 RTRCPTR alignment6; /**< structure size alignment. */
2291# endif
2292 /** @} */
2293
2294 /** @name Function pointers for Shadow paging.
2295 * @{
2296 */
2297 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCPTR offDelta));
2298 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
2299 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2300 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2301
2302 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2303 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2304
2305 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2306 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2307
2308 /** @} */
2309
2310 /** @name Function pointers for Guest paging.
2311 * @{
2312 */
2313 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCPTR offDelta));
2314 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
2315 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2316 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2317 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2318 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2319 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2320 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2321#if HC_ARCH_BITS == 64
2322 RTRCPTR alignment3; /**< structure size alignment. */
2323#endif
2324
2325 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2326 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2327 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
2328 /** @} */
2329
2330 /** @name Function pointers for Both Shadow and Guest paging.
2331 * @{
2332 */
2333 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCPTR offDelta));
2334 /* no pfnR3BthTrap0eHandler */
2335 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2336 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2337 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2338 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2339 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2340 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2341 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2342 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVM pVM));
2343
2344 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2345 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2346 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2347 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2348 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2349 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2350 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2351 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2352 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVM pVM));
2353
2354 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2355 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2356 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2357 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2358 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVM pVM, RTGCPTR GCPtrPage));
2359 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVM pVM, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2360 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2361 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2362 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVM pVM));
2363#if HC_ARCH_BITS == 64
2364 RTRCPTR alignment2; /**< structure size alignment. */
2365#endif
2366 /** @} */
2367
2368 /** Pointer to SHW+GST mode data (function pointers).
2369 * The index into this table is made up from */
2370 R3PTRTYPE(PPGMMODEDATA) paModeData;
2371
2372 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2373 * This is sorted by physical address and contains no overlapping ranges. */
2374 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2375 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2376 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2377 /** RC pointer corresponding to PGM::pRamRangesR3. */
2378 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2379 /** The configured RAM size. */
2380 RTUINT cbRamSize;
2381
2382 /** Pointer to the list of ROM ranges - for R3.
2383 * This is sorted by physical address and contains no overlapping ranges. */
2384 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2385 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2386 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2387 /** RC pointer corresponding to PGM::pRomRangesR3. */
2388 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2389 /** Alignment padding. */
2390 RTRCPTR GCPtrPadding2;
2391
2392 /** Pointer to the list of MMIO2 ranges - for R3.
2393 * Registration order. */
2394 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2395
2396 /** PGM offset based trees - R3 Ptr. */
2397 R3PTRTYPE(PPGMTREES) pTreesR3;
2398 /** PGM offset based trees - R0 Ptr. */
2399 R0PTRTYPE(PPGMTREES) pTreesR0;
2400 /** PGM offset based trees - RC Ptr. */
2401 RCPTRTYPE(PPGMTREES) pTreesRC;
2402
2403 /** Linked list of GC mappings - for RC.
2404 * The list is sorted ascending on address.
2405 */
2406 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2407 /** Linked list of GC mappings - for HC.
2408 * The list is sorted ascending on address.
2409 */
2410 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2411 /** Linked list of GC mappings - for R0.
2412 * The list is sorted ascending on address.
2413 */
2414 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2415
2416 /** Indicates that PGMR3FinalizeMappings has been called and that further
2417 * PGMR3MapIntermediate calls will be rejected. */
2418 bool fFinalizedMappings;
2419 /** If set no conflict checks are required. (boolean) */
2420 bool fMappingsFixed;
2421 /** If set, then no mappings are put into the shadow page table. (boolean) */
2422 bool fDisableMappings;
2423 /** Size of fixed mapping */
2424 uint32_t cbMappingFixed;
2425 /** Base address (GC) of fixed mapping */
2426 RTGCPTR GCPtrMappingFixed;
2427#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2428 uint32_t u32Padding0; /**< alignment padding. */
2429#endif
2430
2431
2432 /** @name Intermediate Context
2433 * @{ */
2434 /** Pointer to the intermediate page directory - Normal. */
2435 R3PTRTYPE(PX86PD) pInterPD;
2436 /** Pointer to the intermedate page tables - Normal.
2437 * There are two page tables, one for the identity mapping and one for
2438 * the host context mapping (of the core code). */
2439 R3PTRTYPE(PX86PT) apInterPTs[2];
2440 /** Pointer to the intermedate page tables - PAE. */
2441 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2442 /** Pointer to the intermedate page directory - PAE. */
2443 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2444 /** Pointer to the intermedate page directory - PAE. */
2445 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2446 /** Pointer to the intermedate page-map level 4 - AMD64. */
2447 R3PTRTYPE(PX86PML4) pInterPaePML4;
2448 /** Pointer to the intermedate page directory - AMD64. */
2449 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2450 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2451 RTHCPHYS HCPhysInterPD;
2452 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2453 RTHCPHYS HCPhysInterPaePDPT;
2454 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2455 RTHCPHYS HCPhysInterPaePML4;
2456 /** @} */
2457
2458 /** Base address of the dynamic page mapping area.
2459 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2460 */
2461 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2462 /** The index of the last entry used in the dynamic page mapping area. */
2463 RTUINT iDynPageMapLast;
2464 /** Cache containing the last entries in the dynamic page mapping area.
2465 * The cache size is covering half of the mapping area. */
2466 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2467 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2468
2469 /** The address of the ring-0 mapping cache if we're making use of it. */
2470 RTR0PTR pvR0DynMapUsed;
2471#if HC_ARCH_BITS == 32
2472 RTR0PTR R0PtrPadding0; /**< Alignment. */
2473#endif
2474
2475
2476 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 */
2477 RTGCPHYS GCPhys4MBPSEMask;
2478
2479 /** A20 gate mask.
2480 * Our current approach to A20 emulation is to let REM do it and don't bother
2481 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2482 * But whould need arrise, we'll subject physical addresses to this mask. */
2483 RTGCPHYS GCPhysA20Mask;
2484 /** A20 gate state - boolean! */
2485 RTUINT fA20Enabled;
2486
2487 /** What needs syncing (PGM_SYNC_*).
2488 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2489 * PGMFlushTLB, and PGMR3Load. */
2490 RTUINT fSyncFlags;
2491
2492 /** PGM critical section.
2493 * This protects the physical & virtual access handlers, ram ranges,
2494 * and the page flag updating (some of it anyway).
2495 */
2496 PDMCRITSECT CritSect;
2497
2498 /** Shadow Page Pool - R3 Ptr. */
2499 R3PTRTYPE(PPGMPOOL) pPoolR3;
2500 /** Shadow Page Pool - R0 Ptr. */
2501 R0PTRTYPE(PPGMPOOL) pPoolR0;
2502 /** Shadow Page Pool - RC Ptr. */
2503 RCPTRTYPE(PPGMPOOL) pPoolRC;
2504
2505 /** We're not in a state which permits writes to guest memory.
2506 * (Only used in strict builds.) */
2507 bool fNoMorePhysWrites;
2508
2509 /** Flush the cache on the next access. */
2510 bool fPhysCacheFlushPending;
2511/** @todo r=bird: Fix member names!*/
2512 /** PGMPhysRead cache */
2513 PGMPHYSCACHE pgmphysreadcache;
2514 /** PGMPhysWrite cache */
2515 PGMPHYSCACHE pgmphyswritecache;
2516
2517 /**
2518 * Data associated with managing the ring-3 mappings of the allocation chunks.
2519 */
2520 struct
2521 {
2522 /** The chunk tree, ordered by chunk id. */
2523#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2524 R3PTRTYPE(PAVLU32NODECORE) pTree;
2525#else
2526 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2527#endif
2528 /** The chunk mapping TLB. */
2529 PGMCHUNKR3MAPTLB Tlb;
2530 /** The number of mapped chunks. */
2531 uint32_t c;
2532 /** The maximum number of mapped chunks.
2533 * @cfgm PGM/MaxRing3Chunks */
2534 uint32_t cMax;
2535 /** The chunk age tree, ordered by ageing sequence number. */
2536 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2537 /** The current time. */
2538 uint32_t iNow;
2539 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2540 uint32_t AgeingCountdown;
2541 } ChunkR3Map;
2542
2543 /**
2544 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2545 */
2546 PGMPAGER3MAPTLB PhysTlbHC;
2547
2548 /** @name The zero page.
2549 * @{ */
2550 /** The host physical address of the zero page. */
2551 RTHCPHYS HCPhysZeroPg;
2552 /** The ring-3 mapping of the zero page. */
2553 RTR3PTR pvZeroPgR3;
2554 /** The ring-0 mapping of the zero page. */
2555 RTR0PTR pvZeroPgR0;
2556 /** The GC mapping of the zero page. */
2557 RTGCPTR pvZeroPgGC;
2558#if GC_ARCH_BITS != 32
2559 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2560#endif
2561 /** @}*/
2562
2563 /** The number of handy pages. */
2564 uint32_t cHandyPages;
2565 /**
2566 * Array of handy pages.
2567 *
2568 * This array is used in a two way communication between pgmPhysAllocPage
2569 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2570 * an intermediary.
2571 *
2572 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2573 * (The current size of 32 pages, means 128 KB of handy memory.)
2574 */
2575 GMMPAGEDESC aHandyPages[32];
2576
2577 /** @name Release Statistics
2578 * @{ */
2579 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */
2580 uint32_t cPrivatePages; /**< The number of private pages. */
2581 uint32_t cSharedPages; /**< The number of shared pages. */
2582 uint32_t cZeroPages; /**< The number of zero backed pages. */
2583 /** The number of times the guest has switched mode since last reset or statistics reset. */
2584 STAMCOUNTER cGuestModeChanges;
2585 /** The number of times we were forced to change the hypervisor region location. */
2586 STAMCOUNTER cRelocations;
2587 /** @} */
2588
2589#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2590 /** RC: Which statistic this \#PF should be attributed to. */
2591 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
2592 RTRCPTR padding0;
2593 /** R0: Which statistic this \#PF should be attributed to. */
2594 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
2595 RTR0PTR padding1;
2596
2597 /* Common */
2598# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2599 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2600 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2601 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2602 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2603 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2604 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2605# endif
2606 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
2607 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
2608
2609 /* R3 only: */
2610 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2611 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2612 STAMCOUNTER StatR3GuestPDWrite; /**< R3: The total number of times pgmHCGuestPDWriteHandler() was called. */
2613 STAMCOUNTER StatR3GuestPDWriteConflict; /**< R3: The number of times GuestPDWriteContlict() detected a conflict. */
2614#ifndef VBOX_WITH_NEW_PHYS_CODE
2615 STAMCOUNTER StatR3DynRamTotal; /**< R3: Allocated MBs of guest ram */
2616 STAMCOUNTER StatR3DynRamGrow; /**< R3: Nr of pgmr3PhysGrowRange calls. */
2617#endif
2618
2619 /* R0 only: */
2620 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
2621 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
2622 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
2623 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2624 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
2625 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
2626 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
2627 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
2628 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2629 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
2630 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
2631 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
2632 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
2633 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
2634 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
2635 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
2636 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
2637 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
2638 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
2639 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
2640 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
2641 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
2642 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
2643 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
2644 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
2645 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
2646
2647 /* RC only: */
2648 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache hits */
2649 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache misses */
2650 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2651 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2652
2653 /* RZ only: */
2654 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
2655 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
2656 STAMPROFILE StatRZTrap0eTimeSyncPT;
2657 STAMPROFILE StatRZTrap0eTimeMapping;
2658 STAMPROFILE StatRZTrap0eTimeOutOfSync;
2659 STAMPROFILE StatRZTrap0eTimeHandlers;
2660 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
2661 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
2662 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
2663 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
2664 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
2665 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
2666 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
2667 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
2668 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
2669 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
2670 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
2671 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
2672 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
2673 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
2674 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
2675 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
2676 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
2677 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
2678 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
2679 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
2680 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
2681 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: #PF err kind */
2682 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: #PF err kind */
2683 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: #PF err kind */
2684 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: #PF err kind */
2685 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: #PF err kind */
2686 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: #PF err kind */
2687 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: #PF err kind */
2688 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: #PF err kind */
2689 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: #PF err kind */
2690 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: #PF err kind */
2691 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: #PF err kind */
2692 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest #PFs. */
2693 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest #PF ending up at the end of the #PF code. */
2694 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest #PF to HMA or other mapping. */
2695 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
2696 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
2697 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the #PFs. */
2698 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
2699 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
2700 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
2701 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
2702 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
2703
2704 /* HC - R3 and (maybe) R0: */
2705
2706 /* RZ & R3: */
2707 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
2708 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
2709 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2710 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2711 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
2712 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
2713 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
2714 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
2715 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
2716 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
2717 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
2718 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
2719 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
2720 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
2721 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
2722 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
2723 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2724 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
2725 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
2726 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
2727 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2728 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2729 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
2730 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
2731 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
2732 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
2733 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
2734 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
2735 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
2736 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
2737 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
2738 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2739 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2740 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
2741 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2742 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2743 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2744 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2745 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2746 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2747 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
2748 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2749 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2750 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2751 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2752 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2753 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2754/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2755 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
2756 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2757 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2758 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2759 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2760 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
2761
2762 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
2763 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
2764 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2765 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2766 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
2767 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
2768 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
2769 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
2770 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
2771 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
2772 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
2773 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
2774 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
2775 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
2776 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
2777 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
2778 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2779 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
2780 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
2781 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
2782 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2783 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2784 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
2785 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
2786 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
2787 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
2788 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
2789 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
2790 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
2791 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
2792 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
2793 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2794 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
2795 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2796 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2797 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2798 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2799 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2800 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2801 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2802 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
2803 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2804 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2805 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2806 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2807 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2808 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2809/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2810 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
2811 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2812 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2813 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2814 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2815 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
2816#endif /* VBOX_WITH_STATISTICS */
2817} PGM;
2818/** Pointer to the PGM instance data. */
2819typedef PGM *PPGM;
2820
2821
2822/**
2823 * PGMCPU Data (part of VMCPU).
2824 */
2825typedef struct PGMCPU
2826{
2827 /** Offset to the VMCPU structure. */
2828 RTINT offVMCPU;
2829 /** Automatically tracked physical memory mapping set.
2830 * Ring-0 and strict raw-mode builds. */
2831 PGMMAPSET AutoSet;
2832} PGMCPU;
2833/** Pointer to the per-cpu PGM data. */
2834typedef PGMCPU *PPGMCPU;
2835
2836
2837/** @name PGM::fSyncFlags Flags
2838 * @{
2839 */
2840/** Updates the virtual access handler state bit in PGMPAGE. */
2841#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
2842/** Always sync CR3. */
2843#define PGM_SYNC_ALWAYS RT_BIT(1)
2844/** Check monitoring on next CR3 (re)load and invalidate page. */
2845#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
2846/** Check guest mapping in SyncCR3. */
2847#define PGM_SYNC_MAP_CR3 RT_BIT(3)
2848/** Clear the page pool (a light weight flush). */
2849#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(8)
2850/** @} */
2851
2852
2853__BEGIN_DECLS
2854
2855int pgmLock(PVM pVM);
2856void pgmUnlock(PVM pVM);
2857
2858int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
2859int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
2860PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
2861void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping);
2862DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2863
2864void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
2865bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
2866int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
2867DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
2868#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2869void pgmHandlerVirtualDumpPhysPages(PVM pVM);
2870#else
2871# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
2872#endif
2873DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2874
2875
2876int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
2877int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2878int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2879int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2880int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv);
2881int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
2882int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
2883int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
2884VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
2885#ifdef IN_RING3
2886int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
2887int pgmR3PhysRamReset(PVM pVM);
2888int pgmR3PhysRomReset(PVM pVM);
2889# ifndef VBOX_WITH_NEW_PHYS_CODE
2890int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
2891# endif
2892
2893int pgmR3PoolInit(PVM pVM);
2894void pgmR3PoolRelocate(PVM pVM);
2895void pgmR3PoolReset(PVM pVM);
2896
2897#endif /* IN_RING3 */
2898#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2899int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
2900#endif
2901int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage);
2902PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
2903void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
2904void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
2905int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2906void pgmPoolFlushAll(PVM pVM);
2907void pgmPoolClearAll(PVM pVM);
2908int pgmPoolSyncCR3(PVM pVM);
2909int pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs);
2910void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
2911void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
2912int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
2913PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
2914void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
2915void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
2916uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
2917void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
2918#ifdef PGMPOOL_WITH_MONITORING
2919void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu);
2920int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2921void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2922void pgmPoolMonitorModifiedClearAll(PVM pVM);
2923int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
2924int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
2925#endif
2926
2927void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE);
2928void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
2929int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
2930int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
2931int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
2932
2933#ifndef IN_RC
2934int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
2935#endif
2936int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
2937
2938__END_DECLS
2939
2940
2941/**
2942 * Gets the PGMRAMRANGE structure for a guest page.
2943 *
2944 * @returns Pointer to the RAM range on success.
2945 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2946 *
2947 * @param pPGM PGM handle.
2948 * @param GCPhys The GC physical address.
2949 */
2950DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
2951{
2952 /*
2953 * Optimize for the first range.
2954 */
2955 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2956 RTGCPHYS off = GCPhys - pRam->GCPhys;
2957 if (RT_UNLIKELY(off >= pRam->cb))
2958 {
2959 do
2960 {
2961 pRam = pRam->CTX_SUFF(pNext);
2962 if (RT_UNLIKELY(!pRam))
2963 break;
2964 off = GCPhys - pRam->GCPhys;
2965 } while (off >= pRam->cb);
2966 }
2967 return pRam;
2968}
2969
2970
2971/**
2972 * Gets the PGMPAGE structure for a guest page.
2973 *
2974 * @returns Pointer to the page on success.
2975 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2976 *
2977 * @param pPGM PGM handle.
2978 * @param GCPhys The GC physical address.
2979 */
2980DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
2981{
2982 /*
2983 * Optimize for the first range.
2984 */
2985 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2986 RTGCPHYS off = GCPhys - pRam->GCPhys;
2987 if (RT_UNLIKELY(off >= pRam->cb))
2988 {
2989 do
2990 {
2991 pRam = pRam->CTX_SUFF(pNext);
2992 if (RT_UNLIKELY(!pRam))
2993 return NULL;
2994 off = GCPhys - pRam->GCPhys;
2995 } while (off >= pRam->cb);
2996 }
2997 return &pRam->aPages[off >> PAGE_SHIFT];
2998}
2999
3000
3001/**
3002 * Gets the PGMPAGE structure for a guest page.
3003 *
3004 * Old Phys code: Will make sure the page is present.
3005 *
3006 * @returns VBox status code.
3007 * @retval VINF_SUCCESS and a valid *ppPage on success.
3008 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3009 *
3010 * @param pPGM PGM handle.
3011 * @param GCPhys The GC physical address.
3012 * @param ppPage Where to store the page poitner on success.
3013 */
3014DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
3015{
3016 /*
3017 * Optimize for the first range.
3018 */
3019 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3020 RTGCPHYS off = GCPhys - pRam->GCPhys;
3021 if (RT_UNLIKELY(off >= pRam->cb))
3022 {
3023 do
3024 {
3025 pRam = pRam->CTX_SUFF(pNext);
3026 if (RT_UNLIKELY(!pRam))
3027 {
3028 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
3029 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3030 }
3031 off = GCPhys - pRam->GCPhys;
3032 } while (off >= pRam->cb);
3033 }
3034 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3035#ifndef VBOX_WITH_NEW_PHYS_CODE
3036
3037 /*
3038 * Make sure it's present.
3039 */
3040 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3041 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3042 {
3043#ifdef IN_RING3
3044 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3045#else
3046 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3047#endif
3048 if (RT_FAILURE(rc))
3049 {
3050 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
3051 return rc;
3052 }
3053 Assert(rc == VINF_SUCCESS);
3054 }
3055#endif
3056 return VINF_SUCCESS;
3057}
3058
3059
3060
3061
3062/**
3063 * Gets the PGMPAGE structure for a guest page.
3064 *
3065 * Old Phys code: Will make sure the page is present.
3066 *
3067 * @returns VBox status code.
3068 * @retval VINF_SUCCESS and a valid *ppPage on success.
3069 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3070 *
3071 * @param pPGM PGM handle.
3072 * @param GCPhys The GC physical address.
3073 * @param ppPage Where to store the page poitner on success.
3074 * @param ppRamHint Where to read and store the ram list hint.
3075 * The caller initializes this to NULL before the call.
3076 */
3077DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
3078{
3079 RTGCPHYS off;
3080 PPGMRAMRANGE pRam = *ppRamHint;
3081 if ( !pRam
3082 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
3083 {
3084 pRam = pPGM->CTX_SUFF(pRamRanges);
3085 off = GCPhys - pRam->GCPhys;
3086 if (RT_UNLIKELY(off >= pRam->cb))
3087 {
3088 do
3089 {
3090 pRam = pRam->CTX_SUFF(pNext);
3091 if (RT_UNLIKELY(!pRam))
3092 {
3093 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
3094 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3095 }
3096 off = GCPhys - pRam->GCPhys;
3097 } while (off >= pRam->cb);
3098 }
3099 *ppRamHint = pRam;
3100 }
3101 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3102#ifndef VBOX_WITH_NEW_PHYS_CODE
3103
3104 /*
3105 * Make sure it's present.
3106 */
3107 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3108 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3109 {
3110#ifdef IN_RING3
3111 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3112#else
3113 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3114#endif
3115 if (RT_FAILURE(rc))
3116 {
3117 *ppPage = NULL; /* Shut up annoying smart ass. */
3118 return rc;
3119 }
3120 Assert(rc == VINF_SUCCESS);
3121 }
3122#endif
3123 return VINF_SUCCESS;
3124}
3125
3126
3127/**
3128 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3129 *
3130 * @returns Pointer to the page on success.
3131 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3132 *
3133 * @param pPGM PGM handle.
3134 * @param GCPhys The GC physical address.
3135 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
3136 */
3137DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
3138{
3139 /*
3140 * Optimize for the first range.
3141 */
3142 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3143 RTGCPHYS off = GCPhys - pRam->GCPhys;
3144 if (RT_UNLIKELY(off >= pRam->cb))
3145 {
3146 do
3147 {
3148 pRam = pRam->CTX_SUFF(pNext);
3149 if (RT_UNLIKELY(!pRam))
3150 return NULL;
3151 off = GCPhys - pRam->GCPhys;
3152 } while (off >= pRam->cb);
3153 }
3154 *ppRam = pRam;
3155 return &pRam->aPages[off >> PAGE_SHIFT];
3156}
3157
3158
3159/**
3160 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3161 *
3162 * @returns Pointer to the page on success.
3163 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3164 *
3165 * @param pPGM PGM handle.
3166 * @param GCPhys The GC physical address.
3167 * @param ppPage Where to store the pointer to the PGMPAGE structure.
3168 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
3169 */
3170DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
3171{
3172 /*
3173 * Optimize for the first range.
3174 */
3175 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3176 RTGCPHYS off = GCPhys - pRam->GCPhys;
3177 if (RT_UNLIKELY(off >= pRam->cb))
3178 {
3179 do
3180 {
3181 pRam = pRam->CTX_SUFF(pNext);
3182 if (RT_UNLIKELY(!pRam))
3183 {
3184 *ppRam = NULL; /* Shut up silly GCC warnings. */
3185 *ppPage = NULL; /* ditto */
3186 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3187 }
3188 off = GCPhys - pRam->GCPhys;
3189 } while (off >= pRam->cb);
3190 }
3191 *ppRam = pRam;
3192 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3193#ifndef VBOX_WITH_NEW_PHYS_CODE
3194
3195 /*
3196 * Make sure it's present.
3197 */
3198 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
3199 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
3200 {
3201#ifdef IN_RING3
3202 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
3203#else
3204 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3205#endif
3206 if (RT_FAILURE(rc))
3207 {
3208 *ppPage = NULL; /* Shut up silly GCC warnings. */
3209 *ppPage = NULL; /* ditto */
3210 return rc;
3211 }
3212 Assert(rc == VINF_SUCCESS);
3213
3214 }
3215#endif
3216 return VINF_SUCCESS;
3217}
3218
3219
3220/**
3221 * Convert GC Phys to HC Phys.
3222 *
3223 * @returns VBox status.
3224 * @param pPGM PGM handle.
3225 * @param GCPhys The GC physical address.
3226 * @param pHCPhys Where to store the corresponding HC physical address.
3227 *
3228 * @deprecated Doesn't deal with zero, shared or write monitored pages.
3229 * Avoid when writing new code!
3230 */
3231DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
3232{
3233 PPGMPAGE pPage;
3234 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3235 if (RT_FAILURE(rc))
3236 return rc;
3237 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
3238 return VINF_SUCCESS;
3239}
3240
3241#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3242
3243/**
3244 * Inlined version of the ring-0 version of PGMDynMapHCPage that
3245 * optimizes access to pages already in the set.
3246 *
3247 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
3248 * @param pPGM Pointer to the PVM instance data.
3249 * @param HCPhys The physical address of the page.
3250 * @param ppv Where to store the mapping address.
3251 */
3252DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
3253{
3254 STAM_PROFILE_START(&pPGM->StatR0DynMapHCPageInl, a);
3255 PPGMMAPSET pSet = &((PPGMCPU)((uint8_t *)VMMGetCpu(PGM2VM(pPGM)) + pPGM->offVCpu))->AutoSet; /* very pretty ;-) */
3256 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3257 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3258
3259 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3260 unsigned iEntry = pSet->aiHashTable[iHash];
3261 if ( iEntry < pSet->cEntries
3262 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3263 {
3264 *ppv = pSet->aEntries[iEntry].pvPage;
3265 STAM_COUNTER_INC(&pPGM->StatR0DynMapHCPageInlHits);
3266 }
3267 else
3268 {
3269 STAM_COUNTER_INC(&pPGM->StatR0DynMapHCPageInlMisses);
3270 pgmR0DynMapHCPageCommon(PGM2VM(pPGM), pSet, HCPhys, ppv);
3271 }
3272
3273 STAM_PROFILE_STOP(&pPGM->StatR0DynMapHCPageInl, a);
3274 return VINF_SUCCESS;
3275}
3276
3277
3278/**
3279 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
3280 * access to pages already in the set.
3281 *
3282 * @returns See PGMDynMapGCPage.
3283 * @param pPGM Pointer to the PVM instance data.
3284 * @param HCPhys The physical address of the page.
3285 * @param ppv Where to store the mapping address.
3286 */
3287DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
3288{
3289 STAM_PROFILE_START(&pPGM->StatR0DynMapGCPageInl, a);
3290 Assert(!(GCPhys & PAGE_OFFSET_MASK));
3291
3292 /*
3293 * Get the ram range.
3294 */
3295 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3296 RTGCPHYS off = GCPhys - pRam->GCPhys;
3297 if (RT_UNLIKELY(off >= pRam->cb
3298 /** @todo || page state stuff */))
3299 {
3300 /* This case is not counted into StatR0DynMapGCPageInl. */
3301 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamMisses);
3302 return PGMDynMapGCPage(PGM2VM(pPGM), GCPhys, ppv);
3303 }
3304
3305 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
3306 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlRamHits);
3307
3308 /*
3309 * pgmR0DynMapHCPageInlined with out stats.
3310 */
3311 PPGMMAPSET pSet = &((PPGMCPU)((uint8_t *)VMMGetCpu(PGM2VM(pPGM)) + pPGM->offVCpu))->AutoSet; /* very pretty ;-) */
3312 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3313 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3314
3315 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3316 unsigned iEntry = pSet->aiHashTable[iHash];
3317 if ( iEntry < pSet->cEntries
3318 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3319 {
3320 *ppv = pSet->aEntries[iEntry].pvPage;
3321 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlHits);
3322 }
3323 else
3324 {
3325 STAM_COUNTER_INC(&pPGM->StatR0DynMapGCPageInlMisses);
3326 pgmR0DynMapHCPageCommon(PGM2VM(pPGM), pSet, HCPhys, ppv);
3327 }
3328
3329 STAM_PROFILE_STOP(&pPGM->StatR0DynMapGCPageInl, a);
3330 return VINF_SUCCESS;
3331}
3332
3333#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3334#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3335
3336/**
3337 * Maps the page into current context (RC and maybe R0).
3338 *
3339 * @returns pointer to the mapping.
3340 * @param pVM Pointer to the PGM instance data.
3341 * @param pPage The page.
3342 */
3343DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
3344{
3345 if (pPage->idx >= PGMPOOL_IDX_FIRST)
3346 {
3347 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
3348 void *pv;
3349# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3350 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
3351# else
3352 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
3353# endif
3354 return pv;
3355 }
3356 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
3357}
3358
3359/**
3360 * Temporarily maps one host page specified by HC physical address, returning
3361 * pointer within the page.
3362 *
3363 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
3364 * reused after 8 mappings (or perhaps a few more if you score with the cache).
3365 *
3366 * @returns The address corresponding to HCPhys.
3367 * @param pPGM Pointer to the PVM instance data.
3368 * @param HCPhys HC Physical address of the page.
3369 */
3370DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
3371{
3372 void *pv;
3373# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3374 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3375# else
3376 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3377# endif
3378 pv = (void *)((uintptr_t)pv | (HCPhys & PAGE_OFFSET_MASK));
3379 return pv;
3380}
3381
3382#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
3383
3384#ifndef IN_RC
3385/**
3386 * Queries the Physical TLB entry for a physical guest page,
3387 * attemting to load the TLB entry if necessary.
3388 *
3389 * @returns VBox status code.
3390 * @retval VINF_SUCCESS on success
3391 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3392 *
3393 * @param pPGM The PGM instance handle.
3394 * @param GCPhys The address of the guest page.
3395 * @param ppTlbe Where to store the pointer to the TLB entry.
3396 */
3397DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3398{
3399 int rc;
3400 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3401 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3402 {
3403 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
3404 rc = VINF_SUCCESS;
3405 }
3406 else
3407 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
3408 *ppTlbe = pTlbe;
3409 return rc;
3410}
3411
3412
3413/**
3414 * Queries the Physical TLB entry for a physical guest page,
3415 * attemting to load the TLB entry if necessary.
3416 *
3417 * @returns VBox status code.
3418 * @retval VINF_SUCCESS on success
3419 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3420 *
3421 * @param pPGM The PGM instance handle.
3422 * @param pPage Pointer to the PGMPAGE structure corresponding to
3423 * GCPhys.
3424 * @param GCPhys The address of the guest page.
3425 * @param ppTlbe Where to store the pointer to the TLB entry.
3426 */
3427DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3428{
3429 int rc;
3430 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3431 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3432 {
3433 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
3434 rc = VINF_SUCCESS;
3435 }
3436 else
3437 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
3438 *ppTlbe = pTlbe;
3439 return rc;
3440}
3441#endif /* !IN_RC */
3442
3443
3444#ifndef VBOX_WITH_NEW_PHYS_CODE
3445/**
3446 * Convert GC Phys to HC Virt and HC Phys.
3447 *
3448 * @returns VBox status.
3449 * @param pPGM PGM handle.
3450 * @param GCPhys The GC physical address.
3451 * @param pHCPtr Where to store the corresponding HC virtual address.
3452 * @param pHCPhys Where to store the HC Physical address and its flags.
3453 *
3454 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
3455 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
3456 * Either way, we have to make sure the page is writable in MapCR3.
3457 */
3458DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
3459{
3460 PPGMRAMRANGE pRam;
3461 PPGMPAGE pPage;
3462 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3463 if (RT_FAILURE(rc))
3464 {
3465 *pHCPtr = 0; /* Shut up crappy GCC warnings */
3466 *pHCPhys = 0; /* ditto */
3467 return rc;
3468 }
3469 RTGCPHYS off = GCPhys - pRam->GCPhys;
3470
3471 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3472 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3473 {
3474 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3475#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES only MapCR3 usage. */
3476 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(PGM2VM(pPGM), pRam->paChunkR3Ptrs);
3477 *pHCPtr = (RTHCPTR)(paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3478#else
3479 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3480#endif
3481 return VINF_SUCCESS;
3482 }
3483 if (pRam->pvR3)
3484 {
3485 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off);
3486 return VINF_SUCCESS;
3487 }
3488 *pHCPtr = 0;
3489 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3490}
3491#endif /* VBOX_WITH_NEW_PHYS_CODE */
3492
3493
3494/**
3495 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
3496 * Takes PSE-36 into account.
3497 *
3498 * @returns guest physical address
3499 * @param pPGM Pointer to the PGM instance data.
3500 * @param Pde Guest Pde
3501 */
3502DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
3503{
3504 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
3505 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
3506
3507 return GCPhys & pPGM->GCPhys4MBPSEMask;
3508}
3509
3510
3511/**
3512 * Gets the page directory entry for the specified address (32-bit paging).
3513 *
3514 * @returns The page directory entry in question.
3515 * @param pPGM Pointer to the PGM instance data.
3516 * @param GCPtr The address.
3517 */
3518DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGM pPGM, RTGCPTR GCPtr)
3519{
3520#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3521 PCX86PD pGuestPD = 0;
3522 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3523 if (RT_FAILURE(rc))
3524 {
3525 X86PDE ZeroPde = {0};
3526 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
3527 }
3528 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3529#else
3530 return pPGM->CTX_SUFF(pGst32BitPd)->a[GCPtr >> X86_PD_SHIFT];
3531#endif
3532}
3533
3534
3535/**
3536 * Gets the address of a specific page directory entry (32-bit paging).
3537 *
3538 * @returns Pointer the page directory entry in question.
3539 * @param pPGM Pointer to the PGM instance data.
3540 * @param GCPtr The address.
3541 */
3542DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGM pPGM, RTGCPTR GCPtr)
3543{
3544#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3545 PX86PD pGuestPD = 0;
3546 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3547 AssertRCReturn(rc, 0);
3548 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3549#else
3550 return &pPGM->CTX_SUFF(pGst32BitPd)->a[GCPtr >> X86_PD_SHIFT];
3551#endif
3552}
3553
3554
3555/**
3556 * Gets the address the guest page directory (32-bit paging).
3557 *
3558 * @returns Pointer the page directory entry in question.
3559 * @param pPGM Pointer to the PGM instance data.
3560 */
3561DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGM pPGM)
3562{
3563#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3564 PX86PD pGuestPD = 0;
3565 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPD);
3566 AssertRCReturn(rc, 0);
3567 return pGuestPD;
3568#else
3569 return pPGM->CTX_SUFF(pGst32BitPd);
3570#endif
3571}
3572
3573
3574/**
3575 * Gets the guest page directory pointer table.
3576 *
3577 * @returns Pointer to the page directory in question.
3578 * @returns NULL if the page directory is not present or on an invalid page.
3579 * @param pPGM Pointer to the PGM instance data.
3580 */
3581DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGM pPGM)
3582{
3583#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3584 PX86PDPT pGuestPDPT = 0;
3585 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3586 AssertRCReturn(rc, 0);
3587 return pGuestPDPT;
3588#else
3589 return pPGM->CTX_SUFF(pGstPaePdpt);
3590#endif
3591}
3592
3593
3594/**
3595 * Gets the guest page directory pointer table entry for the specified address.
3596 *
3597 * @returns Pointer to the page directory in question.
3598 * @returns NULL if the page directory is not present or on an invalid page.
3599 * @param pPGM Pointer to the PGM instance data.
3600 * @param GCPtr The address.
3601 */
3602DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGM pPGM, RTGCPTR GCPtr)
3603{
3604 AssertGCPtr32(GCPtr);
3605
3606#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3607 PX86PDPT pGuestPDPT = 0;
3608 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3609 AssertRCReturn(rc, 0);
3610 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
3611#else
3612 return &pPGM->CTX_SUFF(pGstPaePdpt)->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
3613#endif
3614}
3615
3616
3617/**
3618 * Gets the page directory for the specified address.
3619 *
3620 * @returns Pointer to the page directory in question.
3621 * @returns NULL if the page directory is not present or on an invalid page.
3622 * @param pPGM Pointer to the PGM instance data.
3623 * @param GCPtr The address.
3624 */
3625DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCPTR GCPtr)
3626{
3627 AssertGCPtr32(GCPtr);
3628
3629#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3630 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3631 AssertReturn(pGuestPDPT, 0);
3632#else
3633 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3634#endif
3635 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3636 if (pGuestPDPT->a[iPdPt].n.u1Present)
3637 {
3638#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3639 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3640 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt];
3641#endif
3642
3643 /* cache is out-of-sync. */
3644 PX86PDPAE pPD;
3645 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3646 if (RT_SUCCESS(rc))
3647 return pPD;
3648 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u));
3649 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
3650 }
3651 return NULL;
3652}
3653
3654
3655/**
3656 * Gets the page directory entry for the specified address.
3657 *
3658 * @returns Pointer to the page directory entry in question.
3659 * @returns NULL if the page directory is not present or on an invalid page.
3660 * @param pPGM Pointer to the PGM instance data.
3661 * @param GCPtr The address.
3662 */
3663DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCPTR GCPtr)
3664{
3665 AssertGCPtr32(GCPtr);
3666
3667#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3668 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3669 AssertReturn(pGuestPDPT, 0);
3670#else
3671 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3672#endif
3673 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3674 if (pGuestPDPT->a[iPdPt].n.u1Present)
3675 {
3676 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3677#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3678 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3679 return &pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD];
3680#endif
3681
3682 /* The cache is out-of-sync. */
3683 PX86PDPAE pPD;
3684 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3685 if (RT_SUCCESS(rc))
3686 return &pPD->a[iPD];
3687 AssertMsgFailed(("Impossible! rc=%Rrc PDPE=%RX64\n", rc, pGuestPDPT->a[iPdPt].u));
3688 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
3689 }
3690 return NULL;
3691}
3692
3693
3694/**
3695 * Gets the page directory entry for the specified address.
3696 *
3697 * @returns The page directory entry in question.
3698 * @returns A non-present entry if the page directory is not present or on an invalid page.
3699 * @param pPGM Pointer to the PGM instance data.
3700 * @param GCPtr The address.
3701 */
3702DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGM pPGM, RTGCPTR GCPtr)
3703{
3704 AssertGCPtr32(GCPtr);
3705
3706#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3707 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3708 if (RT_LIKELY(pGuestPDPT))
3709#else
3710 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3711#endif
3712 {
3713 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3714 if (pGuestPDPT->a[iPdPt].n.u1Present)
3715 {
3716 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3717#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3718 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3719 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt]->a[iPD];
3720#endif
3721
3722 /* cache is out-of-sync. */
3723 PX86PDPAE pPD;
3724 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3725 if (RT_SUCCESS(rc))
3726 return pPD->a[iPD];
3727 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt]));
3728 }
3729 }
3730 X86PDEPAE ZeroPde = {0};
3731 return ZeroPde;
3732}
3733
3734
3735/**
3736 * Gets the page directory pointer table entry for the specified address
3737 * and returns the index into the page directory
3738 *
3739 * @returns Pointer to the page directory in question.
3740 * @returns NULL if the page directory is not present or on an invalid page.
3741 * @param pPGM Pointer to the PGM instance data.
3742 * @param GCPtr The address.
3743 * @param piPD Receives the index into the returned page directory
3744 * @param pPdpe Receives the page directory pointer entry. Optional.
3745 */
3746DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
3747{
3748 AssertGCPtr32(GCPtr);
3749
3750#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3751 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3752 AssertReturn(pGuestPDPT, 0);
3753#else
3754 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3755#endif
3756 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3757 if (pPdpe)
3758 *pPdpe = pGuestPDPT->a[iPdPt];
3759 if (pGuestPDPT->a[iPdPt].n.u1Present)
3760 {
3761 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3762#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3763 if ((pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3764 {
3765 *piPD = iPD;
3766 return pPGM->CTX_SUFF(apGstPaePDs)[iPdPt];
3767 }
3768#endif
3769
3770 /* cache is out-of-sync. */
3771 PX86PDPAE pPD;
3772 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPDPT->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3773 if (RT_SUCCESS(rc))
3774 {
3775 *piPD = iPD;
3776 return pPD;
3777 }
3778 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, pGuestPDPT->a[iPdPt].u));
3779 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3780 }
3781 return NULL;
3782}
3783
3784#ifndef IN_RC
3785
3786/**
3787 * Gets the page map level-4 pointer for the guest.
3788 *
3789 * @returns Pointer to the PML4 page.
3790 * @param pPGM Pointer to the PGM instance data.
3791 */
3792DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGM pPGM)
3793{
3794#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3795 PX86PML4 pGuestPml4;
3796 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
3797 AssertRCReturn(rc, NULL);
3798 return pGuestPml4;
3799#else
3800 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
3801 return pPGM->CTX_SUFF(pGstAmd64Pml4);
3802#endif
3803}
3804
3805
3806/**
3807 * Gets the pointer to a page map level-4 entry.
3808 *
3809 * @returns Pointer to the PML4 entry.
3810 * @param pPGM Pointer to the PGM instance data.
3811 * @param iPml4 The index.
3812 */
3813DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4)
3814{
3815#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3816 PX86PML4 pGuestPml4;
3817 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
3818 AssertRCReturn(rc, NULL);
3819 return &pGuestPml4->a[iPml4];
3820#else
3821 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
3822 return &pPGM->CTX_SUFF(pGstAmd64Pml4)->a[iPml4];
3823#endif
3824}
3825
3826
3827/**
3828 * Gets a page map level-4 entry.
3829 *
3830 * @returns The PML4 entry.
3831 * @param pPGM Pointer to the PGM instance data.
3832 * @param iPml4 The index.
3833 */
3834DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGM pPGM, unsigned int iPml4)
3835{
3836#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3837 PX86PML4 pGuestPml4;
3838 int rc = pgmR0DynMapGCPageInlined(pPGM, pPGM->GCPhysCR3, (void **)&pGuestPml4);
3839 if (RT_FAILURE(rc))
3840 {
3841 X86PML4E ZeroPml4e = {0};
3842 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
3843 }
3844 return pGuestPml4->a[iPml4];
3845#else
3846 Assert(pPGM->CTX_SUFF(pGstAmd64Pml4));
3847 return pPGM->CTX_SUFF(pGstAmd64Pml4)->a[iPml4];
3848#endif
3849}
3850
3851
3852/**
3853 * Gets the page directory pointer entry for the specified address.
3854 *
3855 * @returns Pointer to the page directory pointer entry in question.
3856 * @returns NULL if the page directory is not present or on an invalid page.
3857 * @param pPGM Pointer to the PGM instance data.
3858 * @param GCPtr The address.
3859 * @param ppPml4e Page Map Level-4 Entry (out)
3860 */
3861DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
3862{
3863 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3864 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3865 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
3866 if (pPml4e->n.u1Present)
3867 {
3868 PX86PDPT pPdpt;
3869 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
3870 AssertRCReturn(rc, NULL);
3871
3872 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3873 return &pPdpt->a[iPdPt];
3874 }
3875 return NULL;
3876}
3877
3878
3879/**
3880 * Gets the page directory entry for the specified address.
3881 *
3882 * @returns The page directory entry in question.
3883 * @returns A non-present entry if the page directory is not present or on an invalid page.
3884 * @param pPGM Pointer to the PGM instance data.
3885 * @param GCPtr The address.
3886 * @param ppPml4e Page Map Level-4 Entry (out)
3887 * @param pPdpe Page directory pointer table entry (out)
3888 */
3889DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
3890{
3891 X86PDEPAE ZeroPde = {0};
3892 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3893 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3894 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
3895 if (pPml4e->n.u1Present)
3896 {
3897 PCX86PDPT pPdptTemp;
3898 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
3899 AssertRCReturn(rc, ZeroPde);
3900
3901 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3902 *pPdpe = pPdptTemp->a[iPdPt];
3903 if (pPdptTemp->a[iPdPt].n.u1Present)
3904 {
3905 PCX86PDPAE pPD;
3906 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3907 AssertRCReturn(rc, ZeroPde);
3908
3909 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3910 return pPD->a[iPD];
3911 }
3912 }
3913
3914 return ZeroPde;
3915}
3916
3917
3918/**
3919 * Gets the page directory entry for the specified address.
3920 *
3921 * @returns The page directory entry in question.
3922 * @returns A non-present entry if the page directory is not present or on an invalid page.
3923 * @param pPGM Pointer to the PGM instance data.
3924 * @param GCPtr The address.
3925 */
3926DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGM pPGM, RTGCPTR64 GCPtr)
3927{
3928 X86PDEPAE ZeroPde = {0};
3929 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3930 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3931 if (pGuestPml4->a[iPml4].n.u1Present)
3932 {
3933 PCX86PDPT pPdptTemp;
3934 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
3935 AssertRCReturn(rc, ZeroPde);
3936
3937 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3938 if (pPdptTemp->a[iPdPt].n.u1Present)
3939 {
3940 PCX86PDPAE pPD;
3941 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3942 AssertRCReturn(rc, ZeroPde);
3943
3944 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3945 return pPD->a[iPD];
3946 }
3947 }
3948 return ZeroPde;
3949}
3950
3951
3952/**
3953 * Gets the page directory entry for the specified address.
3954 *
3955 * @returns Pointer to the page directory entry in question.
3956 * @returns NULL if the page directory is not present or on an invalid page.
3957 * @param pPGM Pointer to the PGM instance data.
3958 * @param GCPtr The address.
3959 */
3960DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGM pPGM, RTGCPTR64 GCPtr)
3961{
3962 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3963 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3964 if (pGuestPml4->a[iPml4].n.u1Present)
3965 {
3966 PCX86PDPT pPdptTemp;
3967 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
3968 AssertRCReturn(rc, NULL);
3969
3970 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3971 if (pPdptTemp->a[iPdPt].n.u1Present)
3972 {
3973 PX86PDPAE pPD;
3974 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3975 AssertRCReturn(rc, NULL);
3976
3977 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3978 return &pPD->a[iPD];
3979 }
3980 }
3981 return NULL;
3982}
3983
3984
3985/**
3986 * Gets the GUEST page directory pointer for the specified address.
3987 *
3988 * @returns The page directory in question.
3989 * @returns NULL if the page directory is not present or on an invalid page.
3990 * @param pPGM Pointer to the PGM instance data.
3991 * @param GCPtr The address.
3992 * @param ppPml4e Page Map Level-4 Entry (out)
3993 * @param pPdpe Page directory pointer table entry (out)
3994 * @param piPD Receives the index into the returned page directory
3995 */
3996DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
3997{
3998 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3999 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4000 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4001 if (pPml4e->n.u1Present)
4002 {
4003 PCX86PDPT pPdptTemp;
4004 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
4005 AssertRCReturn(rc, NULL);
4006
4007 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4008 *pPdpe = pPdptTemp->a[iPdPt];
4009 if (pPdptTemp->a[iPdPt].n.u1Present)
4010 {
4011 PX86PDPAE pPD;
4012 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4013 AssertRCReturn(rc, NULL);
4014
4015 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4016 return pPD;
4017 }
4018 }
4019 return 0;
4020}
4021
4022#endif /* !IN_RC */
4023
4024/**
4025 * Gets the shadow page directory, 32-bit.
4026 *
4027 * @returns Pointer to the shadow 32-bit PD.
4028 * @param pPGM Pointer to the PGM instance data.
4029 */
4030DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGM pPGM)
4031{
4032 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4033}
4034
4035
4036/**
4037 * Gets the shadow page directory entry for the specified address, 32-bit.
4038 *
4039 * @returns Shadow 32-bit PDE.
4040 * @param pPGM Pointer to the PGM instance data.
4041 * @param GCPtr The address.
4042 */
4043DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGM pPGM, RTGCPTR GCPtr)
4044{
4045 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4046
4047 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
4048 if (!pShwPde)
4049 {
4050 X86PDE ZeroPde = {0};
4051 return ZeroPde;
4052 }
4053 return pShwPde->a[iPd];
4054}
4055
4056
4057/**
4058 * Gets the pointer to the shadow page directory entry for the specified
4059 * address, 32-bit.
4060 *
4061 * @returns Pointer to the shadow 32-bit PDE.
4062 * @param pPGM Pointer to the PGM instance data.
4063 * @param GCPtr The address.
4064 */
4065DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGM pPGM, RTGCPTR GCPtr)
4066{
4067 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4068
4069 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
4070 AssertReturn(pPde, NULL);
4071 return &pPde->a[iPd];
4072}
4073
4074
4075/**
4076 * Gets the shadow page pointer table, PAE.
4077 *
4078 * @returns Pointer to the shadow PAE PDPT.
4079 * @param pPGM Pointer to the PGM instance data.
4080 */
4081DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGM pPGM)
4082{
4083 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4084}
4085
4086
4087/**
4088 * Gets the shadow page directory for the specified address, PAE.
4089 *
4090 * @returns Pointer to the shadow PD.
4091 * @param pPGM Pointer to the PGM instance data.
4092 * @param GCPtr The address.
4093 */
4094DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr)
4095{
4096 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4097 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
4098
4099 if (!pPdpt->a[iPdpt].n.u1Present)
4100 return NULL;
4101
4102 /* Fetch the pgm pool shadow descriptor. */
4103 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4104 AssertReturn(pShwPde, NULL);
4105
4106 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
4107}
4108
4109
4110/**
4111 * Gets the shadow page directory for the specified address, PAE.
4112 *
4113 * @returns Pointer to the shadow PD.
4114 * @param pPGM Pointer to the PGM instance data.
4115 * @param GCPtr The address.
4116 */
4117DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
4118{
4119 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4120
4121 if (!pPdpt->a[iPdpt].n.u1Present)
4122 return NULL;
4123
4124 /* Fetch the pgm pool shadow descriptor. */
4125 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4126 AssertReturn(pShwPde, NULL);
4127
4128 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
4129}
4130
4131
4132/**
4133 * Gets the shadow page directory entry, PAE.
4134 *
4135 * @returns PDE.
4136 * @param pPGM Pointer to the PGM instance data.
4137 * @param GCPtr The address.
4138 */
4139DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGM pPGM, RTGCPTR GCPtr)
4140{
4141 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4142
4143 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4144 if (!pShwPde)
4145 {
4146 X86PDEPAE ZeroPde = {0};
4147 return ZeroPde;
4148 }
4149 return pShwPde->a[iPd];
4150}
4151
4152
4153/**
4154 * Gets the pointer to the shadow page directory entry for an address, PAE.
4155 *
4156 * @returns Pointer to the PDE.
4157 * @param pPGM Pointer to the PGM instance data.
4158 * @param GCPtr The address.
4159 */
4160DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGM pPGM, RTGCPTR GCPtr)
4161{
4162 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4163
4164 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4165 AssertReturn(pPde, NULL);
4166 return &pPde->a[iPd];
4167}
4168
4169#ifndef IN_RC
4170
4171/**
4172 * Gets the shadow page map level-4 pointer.
4173 *
4174 * @returns Pointer to the shadow PML4.
4175 * @param pPGM Pointer to the PGM instance data.
4176 */
4177DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM)
4178{
4179 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4180}
4181
4182
4183/**
4184 * Gets the shadow page map level-4 entry for the specified address.
4185 *
4186 * @returns The entry.
4187 * @param pPGM Pointer to the PGM instance data.
4188 * @param GCPtr The address.
4189 */
4190DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGM pPGM, RTGCPTR GCPtr)
4191{
4192 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4193 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4194
4195 if (!pShwPml4)
4196 {
4197 X86PML4E ZeroPml4e = {0};
4198 return ZeroPml4e;
4199 }
4200 return pShwPml4->a[iPml4];
4201}
4202
4203
4204/**
4205 * Gets the pointer to the specified shadow page map level-4 entry.
4206 *
4207 * @returns The entry.
4208 * @param pPGM Pointer to the PGM instance data.
4209 * @param iPml4 The PML4 index.
4210 */
4211DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4)
4212{
4213 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4214 if (!pShwPml4)
4215 return NULL;
4216 return &pShwPml4->a[iPml4];
4217}
4218
4219
4220/**
4221 * Gets the GUEST page directory pointer for the specified address.
4222 *
4223 * @returns The page directory in question.
4224 * @returns NULL if the page directory is not present or on an invalid page.
4225 * @param pPGM Pointer to the PGM instance data.
4226 * @param GCPtr The address.
4227 * @param piPD Receives the index into the returned page directory
4228 */
4229DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
4230{
4231 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4232 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4233 if (pGuestPml4->a[iPml4].n.u1Present)
4234 {
4235 PCX86PDPT pPdptTemp;
4236 int rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4237 AssertRCReturn(rc, NULL);
4238
4239 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4240 if (pPdptTemp->a[iPdPt].n.u1Present)
4241 {
4242 PX86PDPAE pPD;
4243 rc = PGM_GCPHYS_2_PTR_BY_PGM(pPGM, pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
4244 AssertRCReturn(rc, NULL);
4245
4246 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4247 return pPD;
4248 }
4249 }
4250 return NULL;
4251}
4252
4253#endif /* !IN_RC */
4254
4255/**
4256 * Gets the page state for a physical handler.
4257 *
4258 * @returns The physical handler page state.
4259 * @param pCur The physical handler in question.
4260 */
4261DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
4262{
4263 switch (pCur->enmType)
4264 {
4265 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
4266 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
4267
4268 case PGMPHYSHANDLERTYPE_MMIO:
4269 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
4270 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
4271
4272 default:
4273 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4274 }
4275}
4276
4277
4278/**
4279 * Gets the page state for a virtual handler.
4280 *
4281 * @returns The virtual handler page state.
4282 * @param pCur The virtual handler in question.
4283 * @remarks This should never be used on a hypervisor access handler.
4284 */
4285DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
4286{
4287 switch (pCur->enmType)
4288 {
4289 case PGMVIRTHANDLERTYPE_WRITE:
4290 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
4291 case PGMVIRTHANDLERTYPE_ALL:
4292 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
4293 default:
4294 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4295 }
4296}
4297
4298
4299/**
4300 * Clears one physical page of a virtual handler
4301 *
4302 * @param pPGM Pointer to the PGM instance.
4303 * @param pCur Virtual handler structure
4304 * @param iPage Physical page index
4305 *
4306 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
4307 * need to care about other handlers in the same page.
4308 */
4309DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
4310{
4311 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
4312
4313 /*
4314 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
4315 */
4316#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4317 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4318 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4319 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4320#endif
4321 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
4322 {
4323 /* We're the head of the alias chain. */
4324 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
4325#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4326 AssertReleaseMsg(pRemove != NULL,
4327 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4328 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4329 AssertReleaseMsg(pRemove == pPhys2Virt,
4330 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
4331 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4332 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
4333 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
4334#endif
4335 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
4336 {
4337 /* Insert the next list in the alias chain into the tree. */
4338 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4339#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4340 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4341 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4342 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
4343#endif
4344 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
4345 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
4346 AssertRelease(fRc);
4347 }
4348 }
4349 else
4350 {
4351 /* Locate the previous node in the alias chain. */
4352 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
4353#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4354 AssertReleaseMsg(pPrev != pPhys2Virt,
4355 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4356 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4357#endif
4358 for (;;)
4359 {
4360 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4361 if (pNext == pPhys2Virt)
4362 {
4363 /* unlink. */
4364 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
4365 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
4366 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
4367 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
4368 else
4369 {
4370 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4371 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
4372 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
4373 }
4374 break;
4375 }
4376
4377 /* next */
4378 if (pNext == pPrev)
4379 {
4380#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4381 AssertReleaseMsg(pNext != pPrev,
4382 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4383 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4384#endif
4385 break;
4386 }
4387 pPrev = pNext;
4388 }
4389 }
4390 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
4391 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
4392 pPhys2Virt->offNextAlias = 0;
4393 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
4394
4395 /*
4396 * Clear the ram flags for this page.
4397 */
4398 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
4399 AssertReturnVoid(pPage);
4400 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
4401}
4402
4403
4404/**
4405 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4406 *
4407 * @returns Pointer to the shadow page structure.
4408 * @param pPool The pool.
4409 * @param HCPhys The HC physical address of the shadow page.
4410 */
4411DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
4412{
4413 /*
4414 * Look up the page.
4415 */
4416 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
4417 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
4418 return pPage;
4419}
4420
4421
4422/**
4423 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4424 *
4425 * @returns Pointer to the shadow page structure.
4426 * @param pPool The pool.
4427 * @param idx The pool page index.
4428 */
4429DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
4430{
4431 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
4432 return &pPool->aPages[idx];
4433}
4434
4435
4436#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
4437/**
4438 * Clear references to guest physical memory.
4439 *
4440 * @param pPool The pool.
4441 * @param pPoolPage The pool page.
4442 * @param pPhysPage The physical guest page tracking structure.
4443 */
4444DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
4445{
4446 /*
4447 * Just deal with the simple case here.
4448 */
4449# ifdef LOG_ENABLED
4450 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
4451# endif
4452 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
4453 if (cRefs == 1)
4454 {
4455 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
4456 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
4457 }
4458 else
4459 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
4460 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
4461}
4462#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
4463
4464
4465#ifdef PGMPOOL_WITH_CACHE
4466/**
4467 * Moves the page to the head of the age list.
4468 *
4469 * This is done when the cached page is used in one way or another.
4470 *
4471 * @param pPool The pool.
4472 * @param pPage The cached page.
4473 * @todo inline in PGMInternal.h!
4474 */
4475DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4476{
4477 /*
4478 * Move to the head of the age list.
4479 */
4480 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
4481 {
4482 /* unlink */
4483 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
4484 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
4485 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
4486 else
4487 pPool->iAgeTail = pPage->iAgePrev;
4488
4489 /* insert at head */
4490 pPage->iAgePrev = NIL_PGMPOOL_IDX;
4491 pPage->iAgeNext = pPool->iAgeHead;
4492 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
4493 pPool->iAgeHead = pPage->idx;
4494 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
4495 }
4496}
4497#endif /* PGMPOOL_WITH_CACHE */
4498
4499/**
4500 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
4501 *
4502 * @param pVM VM Handle.
4503 * @param pPage PGM pool page
4504 */
4505DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4506{
4507 Assert(!pPage->fLocked);
4508 pPage->fLocked = true;
4509}
4510
4511
4512/**
4513 * Unlocks a page to allow flushing again
4514 *
4515 * @param pVM VM Handle.
4516 * @param pPage PGM pool page
4517 */
4518DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4519{
4520 Assert(pPage->fLocked);
4521 pPage->fLocked = false;
4522}
4523
4524
4525/**
4526 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
4527 *
4528 * @returns VBox status code.
4529 * @param pPage PGM pool page
4530 */
4531DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
4532{
4533 if (pPage->fLocked)
4534 {
4535 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
4536 if (pPage->cModifications)
4537 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
4538 return true;
4539 }
4540 return false;
4541}
4542
4543/**
4544 * Tells if mappings are to be put into the shadow page table or not
4545 *
4546 * @returns boolean result
4547 * @param pVM VM handle.
4548 */
4549DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
4550{
4551#ifdef IN_RING0
4552 /* There are no mappings in VT-x and AMD-V mode. */
4553 Assert(pPGM->fDisableMappings);
4554 return false;
4555#else
4556 return !pPGM->fDisableMappings;
4557#endif
4558}
4559
4560/** @} */
4561
4562#endif
4563
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette