VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 22695

Last change on this file since 22695 was 22695, checked in by vboxsync, 16 years ago

Must flush pgm pool pages in PGMR3PhysGCPhys2CCPtrExternal to avoid changing dormant pgm pool pages.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 191.5 KB
Line 
1/* $Id: PGMInternal.h 22695 2009-09-02 08:41:52Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/avl.h>
40#include <iprt/asm.h>
41#include <iprt/assert.h>
42#include <iprt/critsect.h>
43
44
45
46/** @defgroup grp_pgm_int Internals
47 * @ingroup grp_pgm
48 * @internal
49 * @{
50 */
51
52
53/** @name PGM Compile Time Config
54 * @{
55 */
56
57/**
58 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
59 * Comment it if it will break something.
60 */
61#define PGM_OUT_OF_SYNC_IN_GC
62
63/**
64 * Check and skip global PDEs for non-global flushes
65 */
66#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
67
68/**
69 * Optimization for PAE page tables that are modified often
70 */
71#ifndef IN_RC
72////# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
73#endif
74
75/**
76 * Sync N pages instead of a whole page table
77 */
78#define PGM_SYNC_N_PAGES
79
80/**
81 * Number of pages to sync during a page fault
82 *
83 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
84 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
85 */
86#define PGM_SYNC_NR_PAGES 8
87
88/**
89 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
90 */
91#define PGM_MAX_PHYSCACHE_ENTRIES 64
92#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
93
94/** @def PGMPOOL_WITH_CACHE
95 * Enable agressive caching using the page pool.
96 *
97 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
98 */
99#define PGMPOOL_WITH_CACHE
100
101/** @def PGMPOOL_WITH_MIXED_PT_CR3
102 * When defined, we'll deal with 'uncachable' pages.
103 */
104#ifdef PGMPOOL_WITH_CACHE
105# define PGMPOOL_WITH_MIXED_PT_CR3
106#endif
107
108/** @def PGMPOOL_WITH_MONITORING
109 * Monitor the guest pages which are shadowed.
110 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
111 * be enabled as well.
112 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
113 */
114#ifdef PGMPOOL_WITH_CACHE
115# define PGMPOOL_WITH_MONITORING
116#endif
117
118/** @def PGMPOOL_WITH_GCPHYS_TRACKING
119 * Tracking the of shadow pages mapping guest physical pages.
120 *
121 * This is very expensive, the current cache prototype is trying to figure out
122 * whether it will be acceptable with an agressive caching policy.
123 */
124#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
125# define PGMPOOL_WITH_GCPHYS_TRACKING
126#endif
127
128/** @def PGMPOOL_WITH_USER_TRACKING
129 * Tracking users of shadow pages. This is required for the linking of shadow page
130 * tables and physical guest addresses.
131 */
132#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
133# define PGMPOOL_WITH_USER_TRACKING
134#endif
135
136/** @def PGMPOOL_CFG_MAX_GROW
137 * The maximum number of pages to add to the pool in one go.
138 */
139#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
140
141/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
142 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
143 */
144#ifdef VBOX_STRICT
145# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
146#endif
147
148/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
149 * Enables the experimental lazy page allocation code. */
150/*# define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
151
152/** @} */
153
154
155/** @name PDPT and PML4 flags.
156 * These are placed in the three bits available for system programs in
157 * the PDPT and PML4 entries.
158 * @{ */
159/** The entry is a permanent one and it's must always be present.
160 * Never free such an entry. */
161#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
162/** Mapping (hypervisor allocated pagetable). */
163#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
164/** @} */
165
166/** @name Page directory flags.
167 * These are placed in the three bits available for system programs in
168 * the page directory entries.
169 * @{ */
170/** Mapping (hypervisor allocated pagetable). */
171#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
172/** Made read-only to facilitate dirty bit tracking. */
173#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
174/** @} */
175
176/** @name Page flags.
177 * These are placed in the three bits available for system programs in
178 * the page entries.
179 * @{ */
180/** Made read-only to facilitate dirty bit tracking. */
181#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
182
183#ifndef PGM_PTFLAGS_CSAM_VALIDATED
184/** Scanned and approved by CSAM (tm).
185 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
186 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
187#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
188#endif
189
190/** @} */
191
192/** @name Defines used to indicate the shadow and guest paging in the templates.
193 * @{ */
194#define PGM_TYPE_REAL 1
195#define PGM_TYPE_PROT 2
196#define PGM_TYPE_32BIT 3
197#define PGM_TYPE_PAE 4
198#define PGM_TYPE_AMD64 5
199#define PGM_TYPE_NESTED 6
200#define PGM_TYPE_EPT 7
201#define PGM_TYPE_MAX PGM_TYPE_EPT
202/** @} */
203
204/** Macro for checking if the guest is using paging.
205 * @param uGstType PGM_TYPE_*
206 * @param uShwType PGM_TYPE_*
207 * @remark ASSUMES certain order of the PGM_TYPE_* values.
208 */
209#define PGM_WITH_PAGING(uGstType, uShwType) \
210 ( (uGstType) >= PGM_TYPE_32BIT \
211 && (uShwType) != PGM_TYPE_NESTED \
212 && (uShwType) != PGM_TYPE_EPT)
213
214/** Macro for checking if the guest supports the NX bit.
215 * @param uGstType PGM_TYPE_*
216 * @param uShwType PGM_TYPE_*
217 * @remark ASSUMES certain order of the PGM_TYPE_* values.
218 */
219#define PGM_WITH_NX(uGstType, uShwType) \
220 ( (uGstType) >= PGM_TYPE_PAE \
221 && (uShwType) != PGM_TYPE_NESTED \
222 && (uShwType) != PGM_TYPE_EPT)
223
224
225/** @def PGM_HCPHYS_2_PTR
226 * Maps a HC physical page pool address to a virtual address.
227 *
228 * @returns VBox status code.
229 * @param pVM The VM handle.
230 * @param HCPhys The HC physical address to map to a virtual one.
231 * @param ppv Where to store the virtual address. No need to cast this.
232 *
233 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
234 * small page window employeed by that function. Be careful.
235 * @remark There is no need to assert on the result.
236 */
237#ifdef IN_RC
238# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
239 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
240#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
241# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
242 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
243#else
244# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
245 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
246#endif
247
248/** @def PGM_HCPHYS_2_PTR_BY_PGM
249 * Maps a HC physical page pool address to a virtual address.
250 *
251 * @returns VBox status code.
252 * @param pPGM The PGM instance data.
253 * @param HCPhys The HC physical address to map to a virtual one.
254 * @param ppv Where to store the virtual address. No need to cast this.
255 *
256 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
257 * small page window employeed by that function. Be careful.
258 * @remark There is no need to assert on the result.
259 */
260#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
261# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
262 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
263#else
264# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
265 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
266#endif
267
268/** @def PGM_GCPHYS_2_PTR
269 * Maps a GC physical page address to a virtual address.
270 *
271 * @returns VBox status code.
272 * @param pVM The VM handle.
273 * @param GCPhys The GC physical address to map to a virtual one.
274 * @param ppv Where to store the virtual address. No need to cast this.
275 *
276 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
277 * small page window employeed by that function. Be careful.
278 * @remark There is no need to assert on the result.
279 */
280#ifdef IN_RC
281# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
282 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
283#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
284# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
285 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
286#else
287# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
288 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
289#endif
290
291/** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
292 * Maps a GC physical page address to a virtual address.
293 *
294 * @returns VBox status code.
295 * @param pPGM Pointer to the PGM instance data.
296 * @param GCPhys The GC physical address to map to a virtual one.
297 * @param ppv Where to store the virtual address. No need to cast this.
298 *
299 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
300 * small page window employeed by that function. Be careful.
301 * @remark There is no need to assert on the result.
302 */
303#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
304# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
305 pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
306#else
307# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
308 PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
309#endif
310
311/** @def PGM_GCPHYS_2_PTR_EX
312 * Maps a unaligned GC physical page address to a virtual address.
313 *
314 * @returns VBox status code.
315 * @param pVM The VM handle.
316 * @param GCPhys The GC physical address to map to a virtual one.
317 * @param ppv Where to store the virtual address. No need to cast this.
318 *
319 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
320 * small page window employeed by that function. Be careful.
321 * @remark There is no need to assert on the result.
322 */
323#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
324# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
325 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
326#else
327# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
328 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
329#endif
330
331/** @def PGM_INVL_PG
332 * Invalidates a page.
333 *
334 * @param pVCpu The VMCPU handle.
335 * @param GCVirt The virtual address of the page to invalidate.
336 */
337#ifdef IN_RC
338# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(GCVirt))
339#elif defined(IN_RING0)
340# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
341#else
342# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
343#endif
344
345/** @def PGM_INVL_PG_ALL_VCPU
346 * Invalidates a page on all VCPUs
347 *
348 * @param pVM The VM handle.
349 * @param GCVirt The virtual address of the page to invalidate.
350 */
351#ifdef IN_RC
352# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(GCVirt))
353#elif defined(IN_RING0)
354# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
355#else
356# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
357#endif
358
359/** @def PGM_INVL_BIG_PG
360 * Invalidates a 4MB page directory entry.
361 *
362 * @param pVCpu The VMCPU handle.
363 * @param GCVirt The virtual address within the page directory to invalidate.
364 */
365#ifdef IN_RC
366# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
367#elif defined(IN_RING0)
368# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
369#else
370# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
371#endif
372
373/** @def PGM_INVL_VCPU_TLBS()
374 * Invalidates the TLBs of the specified VCPU
375 *
376 * @param pVCpu The VMCPU handle.
377 */
378#ifdef IN_RC
379# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
380#elif defined(IN_RING0)
381# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
382#else
383# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
384#endif
385
386/** @def PGM_INVL_ALL_VCPU_TLBS()
387 * Invalidates the TLBs of all VCPUs
388 *
389 * @param pVM The VM handle.
390 */
391#ifdef IN_RC
392# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
393#elif defined(IN_RING0)
394# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
395#else
396# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
397#endif
398
399/** Size of the GCPtrConflict array in PGMMAPPING.
400 * @remarks Must be a power of two. */
401#define PGMMAPPING_CONFLICT_MAX 8
402
403/**
404 * Structure for tracking GC Mappings.
405 *
406 * This structure is used by linked list in both GC and HC.
407 */
408typedef struct PGMMAPPING
409{
410 /** Pointer to next entry. */
411 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
412 /** Pointer to next entry. */
413 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
414 /** Pointer to next entry. */
415 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
416 /** Indicate whether this entry is finalized. */
417 bool fFinalized;
418 /** Start Virtual address. */
419 RTGCPTR GCPtr;
420 /** Last Virtual address (inclusive). */
421 RTGCPTR GCPtrLast;
422 /** Range size (bytes). */
423 RTGCPTR cb;
424 /** Pointer to relocation callback function. */
425 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
426 /** User argument to the callback. */
427 R3PTRTYPE(void *) pvUser;
428 /** Mapping description / name. For easing debugging. */
429 R3PTRTYPE(const char *) pszDesc;
430 /** Last 8 addresses that caused conflicts. */
431 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
432 /** Number of conflicts for this hypervisor mapping. */
433 uint32_t cConflicts;
434 /** Number of page tables. */
435 uint32_t cPTs;
436
437 /** Array of page table mapping data. Each entry
438 * describes one page table. The array can be longer
439 * than the declared length.
440 */
441 struct
442 {
443 /** The HC physical address of the page table. */
444 RTHCPHYS HCPhysPT;
445 /** The HC physical address of the first PAE page table. */
446 RTHCPHYS HCPhysPaePT0;
447 /** The HC physical address of the second PAE page table. */
448 RTHCPHYS HCPhysPaePT1;
449 /** The HC virtual address of the 32-bit page table. */
450 R3PTRTYPE(PX86PT) pPTR3;
451 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
452 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
453 /** The RC virtual address of the 32-bit page table. */
454 RCPTRTYPE(PX86PT) pPTRC;
455 /** The RC virtual address of the two PAE page table. */
456 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
457 /** The R0 virtual address of the 32-bit page table. */
458 R0PTRTYPE(PX86PT) pPTR0;
459 /** The R0 virtual address of the two PAE page table. */
460 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
461 } aPTs[1];
462} PGMMAPPING;
463/** Pointer to structure for tracking GC Mappings. */
464typedef struct PGMMAPPING *PPGMMAPPING;
465
466
467/**
468 * Physical page access handler structure.
469 *
470 * This is used to keep track of physical address ranges
471 * which are being monitored in some kind of way.
472 */
473typedef struct PGMPHYSHANDLER
474{
475 AVLROGCPHYSNODECORE Core;
476 /** Access type. */
477 PGMPHYSHANDLERTYPE enmType;
478 /** Number of pages to update. */
479 uint32_t cPages;
480 /** Pointer to R3 callback function. */
481 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
482 /** User argument for R3 handlers. */
483 R3PTRTYPE(void *) pvUserR3;
484 /** Pointer to R0 callback function. */
485 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
486 /** User argument for R0 handlers. */
487 R0PTRTYPE(void *) pvUserR0;
488 /** Pointer to RC callback function. */
489 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
490 /** User argument for RC handlers. */
491 RCPTRTYPE(void *) pvUserRC;
492 /** Description / Name. For easing debugging. */
493 R3PTRTYPE(const char *) pszDesc;
494#ifdef VBOX_WITH_STATISTICS
495 /** Profiling of this handler. */
496 STAMPROFILE Stat;
497#endif
498} PGMPHYSHANDLER;
499/** Pointer to a physical page access handler structure. */
500typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
501
502
503/**
504 * Cache node for the physical addresses covered by a virtual handler.
505 */
506typedef struct PGMPHYS2VIRTHANDLER
507{
508 /** Core node for the tree based on physical ranges. */
509 AVLROGCPHYSNODECORE Core;
510 /** Offset from this struct to the PGMVIRTHANDLER structure. */
511 int32_t offVirtHandler;
512 /** Offset of the next alias relative to this one.
513 * Bit 0 is used for indicating whether we're in the tree.
514 * Bit 1 is used for indicating that we're the head node.
515 */
516 int32_t offNextAlias;
517} PGMPHYS2VIRTHANDLER;
518/** Pointer to a phys to virtual handler structure. */
519typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
520
521/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
522 * node is in the tree. */
523#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
524/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
525 * node is in the head of an alias chain.
526 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
527#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
528/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
529#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
530
531
532/**
533 * Virtual page access handler structure.
534 *
535 * This is used to keep track of virtual address ranges
536 * which are being monitored in some kind of way.
537 */
538typedef struct PGMVIRTHANDLER
539{
540 /** Core node for the tree based on virtual ranges. */
541 AVLROGCPTRNODECORE Core;
542 /** Size of the range (in bytes). */
543 RTGCPTR cb;
544 /** Number of cache pages. */
545 uint32_t cPages;
546 /** Access type. */
547 PGMVIRTHANDLERTYPE enmType;
548 /** Pointer to the RC callback function. */
549 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
550#if HC_ARCH_BITS == 64
551 RTRCPTR padding;
552#endif
553 /** Pointer to the R3 callback function for invalidation. */
554 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
555 /** Pointer to the R3 callback function. */
556 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
557 /** Description / Name. For easing debugging. */
558 R3PTRTYPE(const char *) pszDesc;
559#ifdef VBOX_WITH_STATISTICS
560 /** Profiling of this handler. */
561 STAMPROFILE Stat;
562#endif
563 /** Array of cached physical addresses for the monitored ranged. */
564 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
565} PGMVIRTHANDLER;
566/** Pointer to a virtual page access handler structure. */
567typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
568
569
570/**
571 * Page type.
572 *
573 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
574 * @remarks This is used in the saved state, so changes to it requires bumping
575 * the saved state version.
576 * @todo So, convert to \#defines!
577 */
578typedef enum PGMPAGETYPE
579{
580 /** The usual invalid zero entry. */
581 PGMPAGETYPE_INVALID = 0,
582 /** RAM page. (RWX) */
583 PGMPAGETYPE_RAM,
584 /** MMIO2 page. (RWX) */
585 PGMPAGETYPE_MMIO2,
586 /** MMIO2 page aliased over an MMIO page. (RWX)
587 * See PGMHandlerPhysicalPageAlias(). */
588 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
589 /** Shadowed ROM. (RWX) */
590 PGMPAGETYPE_ROM_SHADOW,
591 /** ROM page. (R-X) */
592 PGMPAGETYPE_ROM,
593 /** MMIO page. (---) */
594 PGMPAGETYPE_MMIO,
595 /** End of valid entries. */
596 PGMPAGETYPE_END
597} PGMPAGETYPE;
598AssertCompile(PGMPAGETYPE_END <= 7);
599
600/** @name Page type predicates.
601 * @{ */
602#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
603#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
604#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
605#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
606#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
607/** @} */
608
609
610/**
611 * A Physical Guest Page tracking structure.
612 *
613 * The format of this structure is complicated because we have to fit a lot
614 * of information into as few bits as possible. The format is also subject
615 * to change (there is one comming up soon). Which means that for we'll be
616 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
617 * accessess to the structure.
618 */
619typedef struct PGMPAGE
620{
621 /** The physical address and a whole lot of other stuff. All bits are used! */
622 RTHCPHYS HCPhysX;
623 /** The page state. */
624 uint32_t u2StateX : 2;
625 /** Flag indicating that a write monitored page was written to when set. */
626 uint32_t fWrittenToX : 1;
627 /** For later. */
628 uint32_t fSomethingElse : 1;
629 /** The Page ID.
630 * @todo Merge with HCPhysX once we've liberated HCPhysX of its stuff.
631 * The HCPhysX will then be 100% static. */
632 uint32_t idPageX : 28;
633 /** The page type (PGMPAGETYPE). */
634 uint32_t u3Type : 3;
635 /** The physical handler state (PGM_PAGE_HNDL_PHYS_STATE*) */
636 uint32_t u2HandlerPhysStateX : 2;
637 /** The virtual handler state (PGM_PAGE_HNDL_VIRT_STATE*) */
638 uint32_t u2HandlerVirtStateX : 2;
639 uint32_t u29B : 25;
640} PGMPAGE;
641AssertCompileSize(PGMPAGE, 16);
642/** Pointer to a physical guest page. */
643typedef PGMPAGE *PPGMPAGE;
644/** Pointer to a const physical guest page. */
645typedef const PGMPAGE *PCPGMPAGE;
646/** Pointer to a physical guest page pointer. */
647typedef PPGMPAGE *PPPGMPAGE;
648
649
650/**
651 * Clears the page structure.
652 * @param pPage Pointer to the physical guest page tracking structure.
653 */
654#define PGM_PAGE_CLEAR(pPage) \
655 do { \
656 (pPage)->HCPhysX = 0; \
657 (pPage)->u2StateX = 0; \
658 (pPage)->fWrittenToX = 0; \
659 (pPage)->fSomethingElse = 0; \
660 (pPage)->idPageX = 0; \
661 (pPage)->u3Type = 0; \
662 (pPage)->u29B = 0; \
663 } while (0)
664
665/**
666 * Initializes the page structure.
667 * @param pPage Pointer to the physical guest page tracking structure.
668 */
669#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
670 do { \
671 (pPage)->HCPhysX = (_HCPhys); \
672 (pPage)->u2StateX = (_uState); \
673 (pPage)->fWrittenToX = 0; \
674 (pPage)->fSomethingElse = 0; \
675 (pPage)->idPageX = (_idPage); \
676 /*(pPage)->u3Type = (_uType); - later */ \
677 PGM_PAGE_SET_TYPE(pPage, _uType); \
678 (pPage)->u29B = 0; \
679 } while (0)
680
681/**
682 * Initializes the page structure of a ZERO page.
683 * @param pPage Pointer to the physical guest page tracking structure.
684 */
685#define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
686 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
687/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
688# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType) \
689 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
690
691
692/** @name The Page state, PGMPAGE::u2StateX.
693 * @{ */
694/** The zero page.
695 * This is a per-VM page that's never ever mapped writable. */
696#define PGM_PAGE_STATE_ZERO 0
697/** A allocated page.
698 * This is a per-VM page allocated from the page pool (or wherever
699 * we get MMIO2 pages from if the type is MMIO2).
700 */
701#define PGM_PAGE_STATE_ALLOCATED 1
702/** A allocated page that's being monitored for writes.
703 * The shadow page table mappings are read-only. When a write occurs, the
704 * fWrittenTo member is set, the page remapped as read-write and the state
705 * moved back to allocated. */
706#define PGM_PAGE_STATE_WRITE_MONITORED 2
707/** The page is shared, aka. copy-on-write.
708 * This is a page that's shared with other VMs. */
709#define PGM_PAGE_STATE_SHARED 3
710/** @} */
711
712
713/**
714 * Gets the page state.
715 * @returns page state (PGM_PAGE_STATE_*).
716 * @param pPage Pointer to the physical guest page tracking structure.
717 */
718#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->u2StateX )
719
720/**
721 * Sets the page state.
722 * @param pPage Pointer to the physical guest page tracking structure.
723 * @param _uState The new page state.
724 */
725#define PGM_PAGE_SET_STATE(pPage, _uState) \
726 do { (pPage)->u2StateX = (_uState); } while (0)
727
728
729/**
730 * Gets the host physical address of the guest page.
731 * @returns host physical address (RTHCPHYS).
732 * @param pPage Pointer to the physical guest page tracking structure.
733 */
734#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhysX & UINT64_C(0x0000fffffffff000) )
735
736/**
737 * Sets the host physical address of the guest page.
738 * @param pPage Pointer to the physical guest page tracking structure.
739 * @param _HCPhys The new host physical address.
740 */
741#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
742 do { (pPage)->HCPhysX = (((pPage)->HCPhysX) & UINT64_C(0xffff000000000fff)) \
743 | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
744
745/**
746 * Get the Page ID.
747 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
748 * @param pPage Pointer to the physical guest page tracking structure.
749 */
750#define PGM_PAGE_GET_PAGEID(pPage) ( (pPage)->idPageX )
751/* later:
752#define PGM_PAGE_GET_PAGEID(pPage) ( ((uint32_t)(pPage)->HCPhysX >> (48 - 12))
753 | ((uint32_t)(pPage)->HCPhysX & 0xfff) )
754*/
755/**
756 * Sets the Page ID.
757 * @param pPage Pointer to the physical guest page tracking structure.
758 */
759#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPageX = (_idPage); } while (0)
760/* later:
761#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhysX = (((pPage)->HCPhysX) & UINT64_C(0x0000fffffffff000)) \
762 | ((_idPage) & 0xfff) \
763 | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
764*/
765
766/**
767 * Get the Chunk ID.
768 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
769 * @param pPage Pointer to the physical guest page tracking structure.
770 */
771#define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPageX >> GMM_CHUNKID_SHIFT )
772/* later:
773#if GMM_CHUNKID_SHIFT == 12
774# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhysX >> 48) )
775#elif GMM_CHUNKID_SHIFT > 12
776# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhysX >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
777#elif GMM_CHUNKID_SHIFT < 12
778# define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhysX >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \
779 | ( (uint32_t)((pPage)->HCPhysX & 0xfff) >> GMM_CHUNKID_SHIFT ) )
780#else
781# error "GMM_CHUNKID_SHIFT isn't defined or something."
782#endif
783*/
784
785/**
786 * Get the index of the page within the allocaiton chunk.
787 * @returns The page index.
788 * @param pPage Pointer to the physical guest page tracking structure.
789 */
790#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPageX & GMM_PAGEID_IDX_MASK )
791/* later:
792#if GMM_CHUNKID_SHIFT <= 12
793# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysX & GMM_PAGEID_IDX_MASK) )
794#else
795# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysX & 0xfff) \
796 | ( (uint32_t)((pPage)->HCPhysX >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
797#endif
798*/
799
800
801/**
802 * Gets the page type.
803 * @returns The page type.
804 * @param pPage Pointer to the physical guest page tracking structure.
805 */
806#define PGM_PAGE_GET_TYPE(pPage) (pPage)->u3Type
807
808/**
809 * Sets the page type.
810 * @param pPage Pointer to the physical guest page tracking structure.
811 * @param _enmType The new page type (PGMPAGETYPE).
812 */
813#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
814 do { (pPage)->u3Type = (_enmType); } while (0)
815
816/**
817 * Checks if the page is marked for MMIO.
818 * @returns true/false.
819 * @param pPage Pointer to the physical guest page tracking structure.
820 */
821#define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->u3Type == PGMPAGETYPE_MMIO )
822
823/**
824 * Checks if the page is backed by the ZERO page.
825 * @returns true/false.
826 * @param pPage Pointer to the physical guest page tracking structure.
827 */
828#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_ZERO )
829
830/**
831 * Checks if the page is backed by a SHARED page.
832 * @returns true/false.
833 * @param pPage Pointer to the physical guest page tracking structure.
834 */
835#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
836
837
838/**
839 * Marks the paget as written to (for GMM change monitoring).
840 * @param pPage Pointer to the physical guest page tracking structure.
841 */
842#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 1; } while (0)
843
844/**
845 * Clears the written-to indicator.
846 * @param pPage Pointer to the physical guest page tracking structure.
847 */
848#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 0; } while (0)
849
850/**
851 * Checks if the page was marked as written-to.
852 * @returns true/false.
853 * @param pPage Pointer to the physical guest page tracking structure.
854 */
855#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( (pPage)->fWrittenToX )
856
857
858/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateX).
859 *
860 * @remarks The values are assigned in order of priority, so we can calculate
861 * the correct state for a page with different handlers installed.
862 * @{ */
863/** No handler installed. */
864#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
865/** Monitoring is temporarily disabled. */
866#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
867/** Write access is monitored. */
868#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
869/** All access is monitored. */
870#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
871/** @} */
872
873/**
874 * Gets the physical access handler state of a page.
875 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
876 * @param pPage Pointer to the physical guest page tracking structure.
877 */
878#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) ( (pPage)->u2HandlerPhysStateX )
879
880/**
881 * Sets the physical access handler state of a page.
882 * @param pPage Pointer to the physical guest page tracking structure.
883 * @param _uState The new state value.
884 */
885#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
886 do { (pPage)->u2HandlerPhysStateX = (_uState); } while (0)
887
888/**
889 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
890 * @returns true/false
891 * @param pPage Pointer to the physical guest page tracking structure.
892 */
893#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE )
894
895/**
896 * Checks if the page has any active physical access handlers.
897 * @returns true/false
898 * @param pPage Pointer to the physical guest page tracking structure.
899 */
900#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
901
902
903/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateX).
904 *
905 * @remarks The values are assigned in order of priority, so we can calculate
906 * the correct state for a page with different handlers installed.
907 * @{ */
908/** No handler installed. */
909#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
910/* 1 is reserved so the lineup is identical with the physical ones. */
911/** Write access is monitored. */
912#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
913/** All access is monitored. */
914#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
915/** @} */
916
917/**
918 * Gets the virtual access handler state of a page.
919 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
920 * @param pPage Pointer to the physical guest page tracking structure.
921 */
922#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u2HandlerVirtStateX )
923
924/**
925 * Sets the virtual access handler state of a page.
926 * @param pPage Pointer to the physical guest page tracking structure.
927 * @param _uState The new state value.
928 */
929#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
930 do { (pPage)->u2HandlerVirtStateX = (_uState); } while (0)
931
932/**
933 * Checks if the page has any virtual access handlers.
934 * @returns true/false
935 * @param pPage Pointer to the physical guest page tracking structure.
936 */
937#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ( (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
938
939/**
940 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
941 * virtual handlers.
942 * @returns true/false
943 * @param pPage Pointer to the physical guest page tracking structure.
944 */
945#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
946
947
948
949/**
950 * Checks if the page has any access handlers, including temporarily disabled ones.
951 * @returns true/false
952 * @param pPage Pointer to the physical guest page tracking structure.
953 */
954#define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
955 ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE \
956 || (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
957
958/**
959 * Checks if the page has any active access handlers.
960 * @returns true/false
961 * @param pPage Pointer to the physical guest page tracking structure.
962 */
963#define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
964 ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
965 || (pPage)->u2HandlerVirtStateX >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
966
967/**
968 * Checks if the page has any active access handlers catching all accesses.
969 * @returns true/false
970 * @param pPage Pointer to the physical guest page tracking structure.
971 */
972#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
973 ( (pPage)->u2HandlerPhysStateX == PGM_PAGE_HNDL_PHYS_STATE_ALL \
974 || (pPage)->u2HandlerVirtStateX == PGM_PAGE_HNDL_VIRT_STATE_ALL )
975
976
977
978
979/** @def PGM_PAGE_GET_TRACKING
980 * Gets the packed shadow page pool tracking data associated with a guest page.
981 * @returns uint16_t containing the data.
982 * @param pPage Pointer to the physical guest page tracking structure.
983 */
984#define PGM_PAGE_GET_TRACKING(pPage) \
985 ( *((uint16_t *)&(pPage)->HCPhysX + 3) )
986
987/** @def PGM_PAGE_SET_TRACKING
988 * Sets the packed shadow page pool tracking data associated with a guest page.
989 * @param pPage Pointer to the physical guest page tracking structure.
990 * @param u16TrackingData The tracking data to store.
991 */
992#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
993 do { *((uint16_t *)&(pPage)->HCPhysX + 3) = (u16TrackingData); } while (0)
994
995/** @def PGM_PAGE_GET_TD_CREFS
996 * Gets the @a cRefs tracking data member.
997 * @returns cRefs.
998 * @param pPage Pointer to the physical guest page tracking structure.
999 */
1000#define PGM_PAGE_GET_TD_CREFS(pPage) \
1001 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1002
1003#define PGM_PAGE_GET_TD_IDX(pPage) \
1004 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1005
1006/**
1007 * Ram range for GC Phys to HC Phys conversion.
1008 *
1009 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1010 * conversions too, but we'll let MM handle that for now.
1011 *
1012 * This structure is used by linked lists in both GC and HC.
1013 */
1014typedef struct PGMRAMRANGE
1015{
1016 /** Start of the range. Page aligned. */
1017 RTGCPHYS GCPhys;
1018 /** Size of the range. (Page aligned of course). */
1019 RTGCPHYS cb;
1020 /** Pointer to the next RAM range - for R3. */
1021 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1022 /** Pointer to the next RAM range - for R0. */
1023 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1024 /** Pointer to the next RAM range - for RC. */
1025 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1026 /** PGM_RAM_RANGE_FLAGS_* flags. */
1027 uint32_t fFlags;
1028 /** Last address in the range (inclusive). Page aligned (-1). */
1029 RTGCPHYS GCPhysLast;
1030 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1031 R3PTRTYPE(void *) pvR3;
1032 /** The range description. */
1033 R3PTRTYPE(const char *) pszDesc;
1034 /** Pointer to self - R0 pointer. */
1035 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
1036 /** Pointer to self - RC pointer. */
1037 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC;
1038 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1039 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 2 : 1];
1040 /** Array of physical guest page tracking structures. */
1041 PGMPAGE aPages[1];
1042} PGMRAMRANGE;
1043/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1044typedef PGMRAMRANGE *PPGMRAMRANGE;
1045
1046/** @name PGMRAMRANGE::fFlags
1047 * @{ */
1048/** The RAM range is floating around as an independent guest mapping. */
1049#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
1050/** @} */
1051
1052
1053/**
1054 * Per page tracking structure for ROM image.
1055 *
1056 * A ROM image may have a shadow page, in which case we may have
1057 * two pages backing it. This structure contains the PGMPAGE for
1058 * both while PGMRAMRANGE have a copy of the active one. It is
1059 * important that these aren't out of sync in any regard other
1060 * than page pool tracking data.
1061 */
1062typedef struct PGMROMPAGE
1063{
1064 /** The page structure for the virgin ROM page. */
1065 PGMPAGE Virgin;
1066 /** The page structure for the shadow RAM page. */
1067 PGMPAGE Shadow;
1068 /** The current protection setting. */
1069 PGMROMPROT enmProt;
1070 /** Pad the structure size to a multiple of 8. */
1071 uint32_t u32Padding;
1072} PGMROMPAGE;
1073/** Pointer to a ROM page tracking structure. */
1074typedef PGMROMPAGE *PPGMROMPAGE;
1075
1076
1077/**
1078 * A registered ROM image.
1079 *
1080 * This is needed to keep track of ROM image since they generally
1081 * intrude into a PGMRAMRANGE. It also keeps track of additional
1082 * info like the two page sets (read-only virgin and read-write shadow),
1083 * the current state of each page.
1084 *
1085 * Because access handlers cannot easily be executed in a different
1086 * context, the ROM ranges needs to be accessible and in all contexts.
1087 */
1088typedef struct PGMROMRANGE
1089{
1090 /** Pointer to the next range - R3. */
1091 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1092 /** Pointer to the next range - R0. */
1093 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1094 /** Pointer to the next range - RC. */
1095 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1096 /** Pointer alignment */
1097 RTRCPTR GCPtrAlignment;
1098 /** Address of the range. */
1099 RTGCPHYS GCPhys;
1100 /** Address of the last byte in the range. */
1101 RTGCPHYS GCPhysLast;
1102 /** Size of the range. */
1103 RTGCPHYS cb;
1104 /** The flags (PGMPHYS_ROM_FLAG_*). */
1105 uint32_t fFlags;
1106 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1107 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 7 : 3];
1108 /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
1109 * This is used for strictness checks. */
1110 R3PTRTYPE(const void *) pvOriginal;
1111 /** The ROM description. */
1112 R3PTRTYPE(const char *) pszDesc;
1113 /** The per page tracking structures. */
1114 PGMROMPAGE aPages[1];
1115} PGMROMRANGE;
1116/** Pointer to a ROM range. */
1117typedef PGMROMRANGE *PPGMROMRANGE;
1118
1119
1120/**
1121 * A registered MMIO2 (= Device RAM) range.
1122 *
1123 * There are a few reason why we need to keep track of these
1124 * registrations. One of them is the deregistration & cleanup
1125 * stuff, while another is that the PGMRAMRANGE associated with
1126 * such a region may have to be removed from the ram range list.
1127 *
1128 * Overlapping with a RAM range has to be 100% or none at all. The
1129 * pages in the existing RAM range must not be ROM nor MMIO. A guru
1130 * meditation will be raised if a partial overlap or an overlap of
1131 * ROM pages is encountered. On an overlap we will free all the
1132 * existing RAM pages and put in the ram range pages instead.
1133 */
1134typedef struct PGMMMIO2RANGE
1135{
1136 /** The owner of the range. (a device) */
1137 PPDMDEVINSR3 pDevInsR3;
1138 /** Pointer to the ring-3 mapping of the allocation. */
1139 RTR3PTR pvR3;
1140 /** Pointer to the next range - R3. */
1141 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1142 /** Whether it's mapped or not. */
1143 bool fMapped;
1144 /** Whether it's overlapping or not. */
1145 bool fOverlapping;
1146 /** The PCI region number.
1147 * @remarks This ASSUMES that nobody will ever really need to have multiple
1148 * PCI devices with matching MMIO region numbers on a single device. */
1149 uint8_t iRegion;
1150 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1151 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5];
1152 /** The associated RAM range. */
1153 PGMRAMRANGE RamRange;
1154} PGMMMIO2RANGE;
1155/** Pointer to a MMIO2 range. */
1156typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1157
1158
1159
1160
1161/**
1162 * PGMPhysRead/Write cache entry
1163 */
1164typedef struct PGMPHYSCACHEENTRY
1165{
1166 /** R3 pointer to physical page. */
1167 R3PTRTYPE(uint8_t *) pbR3;
1168 /** GC Physical address for cache entry */
1169 RTGCPHYS GCPhys;
1170#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1171 RTGCPHYS u32Padding0; /**< alignment padding. */
1172#endif
1173} PGMPHYSCACHEENTRY;
1174
1175/**
1176 * PGMPhysRead/Write cache to reduce REM memory access overhead
1177 */
1178typedef struct PGMPHYSCACHE
1179{
1180 /** Bitmap of valid cache entries */
1181 uint64_t aEntries;
1182 /** Cache entries */
1183 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1184} PGMPHYSCACHE;
1185
1186
1187/** Pointer to an allocation chunk ring-3 mapping. */
1188typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1189/** Pointer to an allocation chunk ring-3 mapping pointer. */
1190typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1191
1192/**
1193 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1194 *
1195 * The primary tree (Core) uses the chunk id as key.
1196 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1197 */
1198typedef struct PGMCHUNKR3MAP
1199{
1200 /** The key is the chunk id. */
1201 AVLU32NODECORE Core;
1202 /** The key is the ageing sequence number. */
1203 AVLLU32NODECORE AgeCore;
1204 /** The current age thingy. */
1205 uint32_t iAge;
1206 /** The current reference count. */
1207 uint32_t volatile cRefs;
1208 /** The current permanent reference count. */
1209 uint32_t volatile cPermRefs;
1210 /** The mapping address. */
1211 void *pv;
1212} PGMCHUNKR3MAP;
1213
1214/**
1215 * Allocation chunk ring-3 mapping TLB entry.
1216 */
1217typedef struct PGMCHUNKR3MAPTLBE
1218{
1219 /** The chunk id. */
1220 uint32_t volatile idChunk;
1221#if HC_ARCH_BITS == 64
1222 uint32_t u32Padding; /**< alignment padding. */
1223#endif
1224 /** The chunk map. */
1225#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1226 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1227#else
1228 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1229#endif
1230} PGMCHUNKR3MAPTLBE;
1231/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1232typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1233
1234/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1235 * @remark Must be a power of two value. */
1236#define PGM_CHUNKR3MAPTLB_ENTRIES 32
1237
1238/**
1239 * Allocation chunk ring-3 mapping TLB.
1240 *
1241 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1242 * At first glance this might look kinda odd since AVL trees are
1243 * supposed to give the most optimial lookup times of all trees
1244 * due to their balancing. However, take a tree with 1023 nodes
1245 * in it, that's 10 levels, meaning that most searches has to go
1246 * down 9 levels before they find what they want. This isn't fast
1247 * compared to a TLB hit. There is the factor of cache misses,
1248 * and of course the problem with trees and branch prediction.
1249 * This is why we use TLBs in front of most of the trees.
1250 *
1251 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1252 * difficult when we switch to the new inlined AVL trees (from kStuff).
1253 */
1254typedef struct PGMCHUNKR3MAPTLB
1255{
1256 /** The TLB entries. */
1257 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1258} PGMCHUNKR3MAPTLB;
1259
1260/**
1261 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1262 * @returns Chunk TLB index.
1263 * @param idChunk The Chunk ID.
1264 */
1265#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1266
1267
1268/**
1269 * Ring-3 guest page mapping TLB entry.
1270 * @remarks used in ring-0 as well at the moment.
1271 */
1272typedef struct PGMPAGER3MAPTLBE
1273{
1274 /** Address of the page. */
1275 RTGCPHYS volatile GCPhys;
1276 /** The guest page. */
1277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1278 R3PTRTYPE(PPGMPAGE) volatile pPage;
1279#else
1280 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1281#endif
1282 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1283#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1284 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1285#else
1286 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1287#endif
1288 /** The address */
1289#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1290 R3PTRTYPE(void *) volatile pv;
1291#else
1292 R3R0PTRTYPE(void *) volatile pv;
1293#endif
1294#if HC_ARCH_BITS == 32
1295 uint32_t u32Padding; /**< alignment padding. */
1296#endif
1297} PGMPAGER3MAPTLBE;
1298/** Pointer to an entry in the HC physical TLB. */
1299typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1300
1301
1302/** The number of entries in the ring-3 guest page mapping TLB.
1303 * @remarks The value must be a power of two. */
1304#define PGM_PAGER3MAPTLB_ENTRIES 64
1305
1306/**
1307 * Ring-3 guest page mapping TLB.
1308 * @remarks used in ring-0 as well at the moment.
1309 */
1310typedef struct PGMPAGER3MAPTLB
1311{
1312 /** The TLB entries. */
1313 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1314} PGMPAGER3MAPTLB;
1315/** Pointer to the ring-3 guest page mapping TLB. */
1316typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1317
1318/**
1319 * Calculates the index of the TLB entry for the specified guest page.
1320 * @returns Physical TLB index.
1321 * @param GCPhys The guest physical address.
1322 */
1323#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1324
1325
1326/**
1327 * Mapping cache usage set entry.
1328 *
1329 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1330 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1331 * cache. If it's extended to include ring-3, well, then something will
1332 * have be changed here...
1333 */
1334typedef struct PGMMAPSETENTRY
1335{
1336 /** The mapping cache index. */
1337 uint16_t iPage;
1338 /** The number of references.
1339 * The max is UINT16_MAX - 1. */
1340 uint16_t cRefs;
1341#if HC_ARCH_BITS == 64
1342 uint32_t alignment;
1343#endif
1344 /** Pointer to the page. */
1345 RTR0PTR pvPage;
1346 /** The physical address for this entry. */
1347 RTHCPHYS HCPhys;
1348} PGMMAPSETENTRY;
1349/** Pointer to a mapping cache usage set entry. */
1350typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1351
1352/**
1353 * Mapping cache usage set.
1354 *
1355 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1356 * done during exits / traps. The set is
1357 */
1358typedef struct PGMMAPSET
1359{
1360 /** The number of occupied entries.
1361 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1362 * dynamic mappings. */
1363 uint32_t cEntries;
1364 /** The start of the current subset.
1365 * This is UINT32_MAX if no subset is currently open. */
1366 uint32_t iSubset;
1367 /** The index of the current CPU, only valid if the set is open. */
1368 int32_t iCpu;
1369#if HC_ARCH_BITS == 64
1370 uint32_t alignment;
1371#endif
1372 /** The entries. */
1373 PGMMAPSETENTRY aEntries[64];
1374 /** HCPhys -> iEntry fast lookup table.
1375 * Use PGMMAPSET_HASH for hashing.
1376 * The entries may or may not be valid, check against cEntries. */
1377 uint8_t aiHashTable[128];
1378} PGMMAPSET;
1379/** Pointer to the mapping cache set. */
1380typedef PGMMAPSET *PPGMMAPSET;
1381
1382/** PGMMAPSET::cEntries value for a closed set. */
1383#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1384
1385/** Hash function for aiHashTable. */
1386#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1387
1388/** The max fill size (strict builds). */
1389#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1390
1391
1392/** @name Context neutrual page mapper TLB.
1393 *
1394 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1395 * code is writting in a kind of context neutrual way. Time will show whether
1396 * this actually makes sense or not...
1397 *
1398 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1399 * context ends up using a global mapping cache on some platforms
1400 * (darwin).
1401 *
1402 * @{ */
1403/** @typedef PPGMPAGEMAPTLB
1404 * The page mapper TLB pointer type for the current context. */
1405/** @typedef PPGMPAGEMAPTLB
1406 * The page mapper TLB entry pointer type for the current context. */
1407/** @typedef PPGMPAGEMAPTLB
1408 * The page mapper TLB entry pointer pointer type for the current context. */
1409/** @def PGM_PAGEMAPTLB_ENTRIES
1410 * The number of TLB entries in the page mapper TLB for the current context. */
1411/** @def PGM_PAGEMAPTLB_IDX
1412 * Calculate the TLB index for a guest physical address.
1413 * @returns The TLB index.
1414 * @param GCPhys The guest physical address. */
1415/** @typedef PPGMPAGEMAP
1416 * Pointer to a page mapper unit for current context. */
1417/** @typedef PPPGMPAGEMAP
1418 * Pointer to a page mapper unit pointer for current context. */
1419#ifdef IN_RC
1420// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1421// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1422// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1423# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1424# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1425 typedef void * PPGMPAGEMAP;
1426 typedef void ** PPPGMPAGEMAP;
1427//#elif IN_RING0
1428// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1429// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1430// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1431//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1432//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1433// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1434// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1435#else
1436 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1437 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1438 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1439# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1440# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1441 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1442 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1443#endif
1444/** @} */
1445
1446
1447/** @name PGM Pool Indexes.
1448 * Aka. the unique shadow page identifier.
1449 * @{ */
1450/** NIL page pool IDX. */
1451#define NIL_PGMPOOL_IDX 0
1452/** The first normal index. */
1453#define PGMPOOL_IDX_FIRST_SPECIAL 1
1454/** Page directory (32-bit root). */
1455#define PGMPOOL_IDX_PD 1
1456/** Page Directory Pointer Table (PAE root). */
1457#define PGMPOOL_IDX_PDPT 2
1458/** AMD64 CR3 level index.*/
1459#define PGMPOOL_IDX_AMD64_CR3 3
1460/** Nested paging root.*/
1461#define PGMPOOL_IDX_NESTED_ROOT 4
1462/** The first normal index. */
1463#define PGMPOOL_IDX_FIRST 5
1464/** The last valid index. (inclusive, 14 bits) */
1465#define PGMPOOL_IDX_LAST 0x3fff
1466/** @} */
1467
1468/** The NIL index for the parent chain. */
1469#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1470
1471/**
1472 * Node in the chain linking a shadowed page to it's parent (user).
1473 */
1474#pragma pack(1)
1475typedef struct PGMPOOLUSER
1476{
1477 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1478 uint16_t iNext;
1479 /** The user page index. */
1480 uint16_t iUser;
1481 /** Index into the user table. */
1482 uint32_t iUserTable;
1483} PGMPOOLUSER, *PPGMPOOLUSER;
1484typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1485#pragma pack()
1486
1487
1488/** The NIL index for the phys ext chain. */
1489#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1490
1491/**
1492 * Node in the chain of physical cross reference extents.
1493 * @todo Calling this an 'extent' is not quite right, find a better name.
1494 */
1495#pragma pack(1)
1496typedef struct PGMPOOLPHYSEXT
1497{
1498 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1499 uint16_t iNext;
1500 /** The user page index. */
1501 uint16_t aidx[3];
1502} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1503typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1504#pragma pack()
1505
1506
1507/**
1508 * The kind of page that's being shadowed.
1509 */
1510typedef enum PGMPOOLKIND
1511{
1512 /** The virtual invalid 0 entry. */
1513 PGMPOOLKIND_INVALID = 0,
1514 /** The entry is free (=unused). */
1515 PGMPOOLKIND_FREE,
1516
1517 /** Shw: 32-bit page table; Gst: no paging */
1518 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1519 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1520 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1521 /** Shw: 32-bit page table; Gst: 4MB page. */
1522 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1523 /** Shw: PAE page table; Gst: no paging */
1524 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1525 /** Shw: PAE page table; Gst: 32-bit page table. */
1526 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1527 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1528 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1529 /** Shw: PAE page table; Gst: PAE page table. */
1530 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1531 /** Shw: PAE page table; Gst: 2MB page. */
1532 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1533
1534 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1535 PGMPOOLKIND_32BIT_PD,
1536 /** Shw: 32-bit page directory. Gst: no paging. */
1537 PGMPOOLKIND_32BIT_PD_PHYS,
1538 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1539 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1540 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1541 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1542 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1543 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1544 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1545 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1546 /** Shw: PAE page directory; Gst: PAE page directory. */
1547 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1548 /** Shw: PAE page directory; Gst: no paging. */
1549 PGMPOOLKIND_PAE_PD_PHYS,
1550
1551 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1552 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1553 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1554 PGMPOOLKIND_PAE_PDPT,
1555 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1556 PGMPOOLKIND_PAE_PDPT_PHYS,
1557
1558 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1559 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1560 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1561 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1562 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1563 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1564 /** Shw: 64-bit page directory table; Gst: no paging */
1565 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1566
1567 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1568 PGMPOOLKIND_64BIT_PML4,
1569
1570 /** Shw: EPT page directory pointer table; Gst: no paging */
1571 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1572 /** Shw: EPT page directory table; Gst: no paging */
1573 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1574 /** Shw: EPT page table; Gst: no paging */
1575 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1576
1577 /** Shw: Root Nested paging table. */
1578 PGMPOOLKIND_ROOT_NESTED,
1579
1580 /** The last valid entry. */
1581 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1582} PGMPOOLKIND;
1583
1584/**
1585 * The access attributes of the page; only applies to big pages.
1586 */
1587typedef enum
1588{
1589 PGMPOOLACCESS_DONTCARE = 0,
1590 PGMPOOLACCESS_USER_RW,
1591 PGMPOOLACCESS_USER_R,
1592 PGMPOOLACCESS_USER_RW_NX,
1593 PGMPOOLACCESS_USER_R_NX,
1594 PGMPOOLACCESS_SUPERVISOR_RW,
1595 PGMPOOLACCESS_SUPERVISOR_R,
1596 PGMPOOLACCESS_SUPERVISOR_RW_NX,
1597 PGMPOOLACCESS_SUPERVISOR_R_NX
1598} PGMPOOLACCESS;
1599
1600/**
1601 * The tracking data for a page in the pool.
1602 */
1603typedef struct PGMPOOLPAGE
1604{
1605 /** AVL node code with the (R3) physical address of this page. */
1606 AVLOHCPHYSNODECORE Core;
1607 /** Pointer to the R3 mapping of the page. */
1608#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1609 R3PTRTYPE(void *) pvPageR3;
1610#else
1611 R3R0PTRTYPE(void *) pvPageR3;
1612#endif
1613 /** The guest physical address. */
1614#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1615 uint32_t Alignment0;
1616#endif
1617 RTGCPHYS GCPhys;
1618
1619 /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
1620 RTGCPTR pvLastAccessHandlerRip;
1621 RTGCPTR pvLastAccessHandlerFault;
1622 uint64_t cLastAccessHandlerCount;
1623
1624 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1625 uint8_t enmKind;
1626 /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
1627 uint8_t enmAccess;
1628 /** The index of this page. */
1629 uint16_t idx;
1630 /** The next entry in the list this page currently resides in.
1631 * It's either in the free list or in the GCPhys hash. */
1632 uint16_t iNext;
1633#ifdef PGMPOOL_WITH_USER_TRACKING
1634 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1635 uint16_t iUserHead;
1636 /** The number of present entries. */
1637 uint16_t cPresent;
1638 /** The first entry in the table which is present. */
1639 uint16_t iFirstPresent;
1640#endif
1641#ifdef PGMPOOL_WITH_MONITORING
1642 /** The number of modifications to the monitored page. */
1643 uint16_t cModifications;
1644 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1645 uint16_t iModifiedNext;
1646 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1647 uint16_t iModifiedPrev;
1648 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1649 uint16_t iMonitoredNext;
1650 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1651 uint16_t iMonitoredPrev;
1652#endif
1653#ifdef PGMPOOL_WITH_CACHE
1654 /** The next page in the age list. */
1655 uint16_t iAgeNext;
1656 /** The previous page in the age list. */
1657 uint16_t iAgePrev;
1658#endif /* PGMPOOL_WITH_CACHE */
1659 /** Used to indicate that the page is zeroed. */
1660 bool fZeroed;
1661 /** Used to indicate that a PT has non-global entries. */
1662 bool fSeenNonGlobal;
1663 /** Used to indicate that we're monitoring writes to the guest page. */
1664 bool fMonitored;
1665 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1666 * (All pages are in the age list.) */
1667 bool fCached;
1668 /** This is used by the R3 access handlers when invoked by an async thread.
1669 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1670 bool volatile fReusedFlushPending;
1671#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1672 /** Used to mark the page as dirty (write monitoring if temporarily off. */
1673 bool fDirty;
1674#else
1675 bool bPadding1;
1676#endif
1677
1678 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1679 uint32_t cLocked;
1680#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1681 uint32_t idxDirty;
1682 RTGCPTR pvDirtyFault;
1683#else
1684 uint32_t bPadding2;
1685#endif
1686} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1687/** Pointer to a const pool page. */
1688typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
1689
1690
1691#ifdef PGMPOOL_WITH_CACHE
1692/** The hash table size. */
1693# define PGMPOOL_HASH_SIZE 0x40
1694/** The hash function. */
1695# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1696#endif
1697
1698
1699/**
1700 * The shadow page pool instance data.
1701 *
1702 * It's all one big allocation made at init time, except for the
1703 * pages that is. The user nodes follows immediatly after the
1704 * page structures.
1705 */
1706typedef struct PGMPOOL
1707{
1708 /** The VM handle - R3 Ptr. */
1709 PVMR3 pVMR3;
1710 /** The VM handle - R0 Ptr. */
1711 PVMR0 pVMR0;
1712 /** The VM handle - RC Ptr. */
1713 PVMRC pVMRC;
1714 /** The max pool size. This includes the special IDs. */
1715 uint16_t cMaxPages;
1716 /** The current pool size. */
1717 uint16_t cCurPages;
1718 /** The head of the free page list. */
1719 uint16_t iFreeHead;
1720 /* Padding. */
1721 uint16_t u16Padding;
1722#ifdef PGMPOOL_WITH_USER_TRACKING
1723 /** Head of the chain of free user nodes. */
1724 uint16_t iUserFreeHead;
1725 /** The number of user nodes we've allocated. */
1726 uint16_t cMaxUsers;
1727 /** The number of present page table entries in the entire pool. */
1728 uint32_t cPresent;
1729 /** Pointer to the array of user nodes - RC pointer. */
1730 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1731 /** Pointer to the array of user nodes - R3 pointer. */
1732 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1733 /** Pointer to the array of user nodes - R0 pointer. */
1734 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1735#endif /* PGMPOOL_WITH_USER_TRACKING */
1736#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1737 /** Head of the chain of free phys ext nodes. */
1738 uint16_t iPhysExtFreeHead;
1739 /** The number of user nodes we've allocated. */
1740 uint16_t cMaxPhysExts;
1741 /** Pointer to the array of physical xref extent - RC pointer. */
1742 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1743 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1744 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1745 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1746 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1747#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1748#ifdef PGMPOOL_WITH_CACHE
1749 /** Hash table for GCPhys addresses. */
1750 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1751 /** The head of the age list. */
1752 uint16_t iAgeHead;
1753 /** The tail of the age list. */
1754 uint16_t iAgeTail;
1755 /** Set if the cache is enabled. */
1756 bool fCacheEnabled;
1757#endif /* PGMPOOL_WITH_CACHE */
1758#ifdef PGMPOOL_WITH_MONITORING
1759 /** Head of the list of modified pages. */
1760 uint16_t iModifiedHead;
1761 /** The current number of modified pages. */
1762 uint16_t cModifiedPages;
1763 /** Access handler, RC. */
1764 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1765 /** Access handler, R0. */
1766 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1767 /** Access handler, R3. */
1768 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1769 /** The access handler description (HC ptr). */
1770 R3PTRTYPE(const char *) pszAccessHandler;
1771# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1772 /* Next available slot. */
1773 uint32_t idxFreeDirtyPage;
1774 /* Number of active dirty pages. */
1775 uint32_t cDirtyPages;
1776 /* Array of current dirty pgm pool page indices. */
1777 uint16_t aIdxDirtyPages[8];
1778 uint64_t aDirtyPages[8][512];
1779# endif /* PGMPOOL_WITH_OPTIMIZED_DIRTY_PT */
1780#endif /* PGMPOOL_WITH_MONITORING */
1781 /** The number of pages currently in use. */
1782 uint16_t cUsedPages;
1783#ifdef VBOX_WITH_STATISTICS
1784 /** The high water mark for cUsedPages. */
1785 uint16_t cUsedPagesHigh;
1786 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1787 /** Profiling pgmPoolAlloc(). */
1788 STAMPROFILEADV StatAlloc;
1789 /** Profiling pgmPoolClearAll(). */
1790 STAMPROFILE StatClearAll;
1791 /** Profiling pgmPoolFlushAllInt(). */
1792 STAMPROFILE StatFlushAllInt;
1793 /** Profiling pgmPoolFlushPage(). */
1794 STAMPROFILE StatFlushPage;
1795 /** Profiling pgmPoolFree(). */
1796 STAMPROFILE StatFree;
1797 /** Counting explicit flushes by PGMPoolFlushPage(). */
1798 STAMCOUNTER StatForceFlushPage;
1799 /** Profiling time spent zeroing pages. */
1800 STAMPROFILE StatZeroPage;
1801# ifdef PGMPOOL_WITH_USER_TRACKING
1802 /** Profiling of pgmPoolTrackDeref. */
1803 STAMPROFILE StatTrackDeref;
1804 /** Profiling pgmTrackFlushGCPhysPT. */
1805 STAMPROFILE StatTrackFlushGCPhysPT;
1806 /** Profiling pgmTrackFlushGCPhysPTs. */
1807 STAMPROFILE StatTrackFlushGCPhysPTs;
1808 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1809 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1810 /** Number of times we've been out of user records. */
1811 STAMCOUNTER StatTrackFreeUpOneUser;
1812# endif
1813# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1814 /** Profiling deref activity related tracking GC physical pages. */
1815 STAMPROFILE StatTrackDerefGCPhys;
1816 /** Number of linear searches for a HCPhys in the ram ranges. */
1817 STAMCOUNTER StatTrackLinearRamSearches;
1818 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1819 STAMCOUNTER StamTrackPhysExtAllocFailures;
1820# endif
1821# ifdef PGMPOOL_WITH_MONITORING
1822 /** Profiling the RC/R0 access handler. */
1823 STAMPROFILE StatMonitorRZ;
1824 /** Times we've failed interpreting the instruction. */
1825 STAMCOUNTER StatMonitorRZEmulateInstr;
1826 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
1827 STAMPROFILE StatMonitorRZFlushPage;
1828 /* Times we've detected a page table reinit. */
1829 STAMCOUNTER StatMonitorRZFlushReinit;
1830 /** Times we've detected fork(). */
1831 STAMCOUNTER StatMonitorRZFork;
1832 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
1833 STAMPROFILE StatMonitorRZHandled;
1834 /** Times we've failed interpreting a patch code instruction. */
1835 STAMCOUNTER StatMonitorRZIntrFailPatch1;
1836 /** Times we've failed interpreting a patch code instruction during flushing. */
1837 STAMCOUNTER StatMonitorRZIntrFailPatch2;
1838 /** The number of times we've seen rep prefixes we can't handle. */
1839 STAMCOUNTER StatMonitorRZRepPrefix;
1840 /** Profiling the REP STOSD cases we've handled. */
1841 STAMPROFILE StatMonitorRZRepStosd;
1842
1843 /** Profiling the R3 access handler. */
1844 STAMPROFILE StatMonitorR3;
1845 /** Times we've failed interpreting the instruction. */
1846 STAMCOUNTER StatMonitorR3EmulateInstr;
1847 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
1848 STAMPROFILE StatMonitorR3FlushPage;
1849 /* Times we've detected a page table reinit. */
1850 STAMCOUNTER StatMonitorR3FlushReinit;
1851 /** Times we've detected fork(). */
1852 STAMCOUNTER StatMonitorR3Fork;
1853 /** Profiling the R3 access we've handled (except REP STOSD). */
1854 STAMPROFILE StatMonitorR3Handled;
1855 /** The number of times we've seen rep prefixes we can't handle. */
1856 STAMCOUNTER StatMonitorR3RepPrefix;
1857 /** Profiling the REP STOSD cases we've handled. */
1858 STAMPROFILE StatMonitorR3RepStosd;
1859 /** The number of times we're called in an async thread an need to flush. */
1860 STAMCOUNTER StatMonitorR3Async;
1861 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
1862 STAMCOUNTER StatResetDirtyPages;
1863 /** Times we've called pgmPoolAddDirtyPage. */
1864 STAMCOUNTER StatDirtyPage;
1865 /** Times we've had to flush duplicates for dirty page management. */
1866 STAMCOUNTER StatDirtyPageDupFlush;
1867
1868 /** The high wather mark for cModifiedPages. */
1869 uint16_t cModifiedPagesHigh;
1870 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
1871# endif
1872# ifdef PGMPOOL_WITH_CACHE
1873 /** The number of cache hits. */
1874 STAMCOUNTER StatCacheHits;
1875 /** The number of cache misses. */
1876 STAMCOUNTER StatCacheMisses;
1877 /** The number of times we've got a conflict of 'kind' in the cache. */
1878 STAMCOUNTER StatCacheKindMismatches;
1879 /** Number of times we've been out of pages. */
1880 STAMCOUNTER StatCacheFreeUpOne;
1881 /** The number of cacheable allocations. */
1882 STAMCOUNTER StatCacheCacheable;
1883 /** The number of uncacheable allocations. */
1884 STAMCOUNTER StatCacheUncacheable;
1885# endif
1886#elif HC_ARCH_BITS == 64
1887 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
1888#endif
1889 /** The AVL tree for looking up a page by its HC physical address. */
1890 AVLOHCPHYSTREE HCPhysTree;
1891 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
1892 /** Array of pages. (cMaxPages in length)
1893 * The Id is the index into thist array.
1894 */
1895 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
1896} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
1897
1898
1899/** @def PGMPOOL_PAGE_2_PTR
1900 * Maps a pool page pool into the current context.
1901 *
1902 * @returns VBox status code.
1903 * @param pVM The VM handle.
1904 * @param pPage The pool page.
1905 *
1906 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1907 * small page window employeed by that function. Be careful.
1908 * @remark There is no need to assert on the result.
1909 */
1910#if defined(IN_RC)
1911# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1912#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1913# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
1914#elif defined(VBOX_STRICT)
1915# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
1916DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
1917{
1918 Assert(pPage && pPage->pvPageR3);
1919 return pPage->pvPageR3;
1920}
1921#else
1922# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
1923#endif
1924
1925/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
1926 * Maps a pool page pool into the current context.
1927 *
1928 * @returns VBox status code.
1929 * @param pPGM Pointer to the PGM instance data.
1930 * @param pPage The pool page.
1931 *
1932 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1933 * small page window employeed by that function. Be careful.
1934 * @remark There is no need to assert on the result.
1935 */
1936#if defined(IN_RC)
1937# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
1938#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1939# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
1940#else
1941# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
1942#endif
1943
1944/** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
1945 * Maps a pool page pool into the current context.
1946 *
1947 * @returns VBox status code.
1948 * @param pPGM Pointer to the PGMCPU instance data.
1949 * @param pPage The pool page.
1950 *
1951 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
1952 * small page window employeed by that function. Be careful.
1953 * @remark There is no need to assert on the result.
1954 */
1955#if defined(IN_RC)
1956# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
1957#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1958# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
1959#else
1960# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
1961#endif
1962
1963
1964/** @name Per guest page tracking data.
1965 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
1966 * is to use more bits for it and split it up later on. But for now we'll play
1967 * safe and change as little as possible.
1968 *
1969 * The 16-bit word has two parts:
1970 *
1971 * The first 14-bit forms the @a idx field. It is either the index of a page in
1972 * the shadow page pool, or and index into the extent list.
1973 *
1974 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
1975 * shadow page pool references to the page. If cRefs equals
1976 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
1977 * (misnomer) table and not the shadow page pool.
1978 *
1979 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
1980 * the 16-bit word.
1981 *
1982 * @{ */
1983/** The shift count for getting to the cRefs part. */
1984#define PGMPOOL_TD_CREFS_SHIFT 14
1985/** The mask applied after shifting the tracking data down by
1986 * PGMPOOL_TD_CREFS_SHIFT. */
1987#define PGMPOOL_TD_CREFS_MASK 0x3
1988/** The cRef value used to indiciate that the idx is the head of a
1989 * physical cross reference list. */
1990#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
1991/** The shift used to get idx. */
1992#define PGMPOOL_TD_IDX_SHIFT 0
1993/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
1994#define PGMPOOL_TD_IDX_MASK 0x3fff
1995/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
1996 * simply too many mappings of this page. */
1997#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
1998
1999/** @def PGMPOOL_TD_MAKE
2000 * Makes a 16-bit tracking data word.
2001 *
2002 * @returns tracking data.
2003 * @param cRefs The @a cRefs field. Must be within bounds!
2004 * @param idx The @a idx field. Must also be within bounds! */
2005#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
2006
2007/** @def PGMPOOL_TD_GET_CREFS
2008 * Get the @a cRefs field from a tracking data word.
2009 *
2010 * @returns The @a cRefs field
2011 * @param u16 The tracking data word. */
2012#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
2013
2014/** @def PGMPOOL_TD_GET_IDX
2015 * Get the @a idx field from a tracking data word.
2016 *
2017 * @returns The @a idx field
2018 * @param u16 The tracking data word. */
2019#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
2020/** @} */
2021
2022
2023/**
2024 * Trees are using self relative offsets as pointers.
2025 * So, all its data, including the root pointer, must be in the heap for HC and GC
2026 * to have the same layout.
2027 */
2028typedef struct PGMTREES
2029{
2030 /** Physical access handlers (AVL range+offsetptr tree). */
2031 AVLROGCPHYSTREE PhysHandlers;
2032 /** Virtual access handlers (AVL range + GC ptr tree). */
2033 AVLROGCPTRTREE VirtHandlers;
2034 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
2035 AVLROGCPHYSTREE PhysToVirtHandlers;
2036 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
2037 AVLROGCPTRTREE HyperVirtHandlers;
2038} PGMTREES;
2039/** Pointer to PGM trees. */
2040typedef PGMTREES *PPGMTREES;
2041
2042
2043/** @name Paging mode macros
2044 * @{ */
2045#ifdef IN_RC
2046# define PGM_CTX(a,b) a##RC##b
2047# define PGM_CTX_STR(a,b) a "GC" b
2048# define PGM_CTX_DECL(type) VMMRCDECL(type)
2049#else
2050# ifdef IN_RING3
2051# define PGM_CTX(a,b) a##R3##b
2052# define PGM_CTX_STR(a,b) a "R3" b
2053# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2054# else
2055# define PGM_CTX(a,b) a##R0##b
2056# define PGM_CTX_STR(a,b) a "R0" b
2057# define PGM_CTX_DECL(type) VMMDECL(type)
2058# endif
2059#endif
2060
2061#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2062#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2063#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2064#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2065#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2066#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2067#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2068#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2069#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2070#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2071#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2072#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2073#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2074#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2075#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2076#define PGM_GST_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
2077#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2078
2079#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2080#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2081#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2082#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2083#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2084#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2085#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2086#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2087#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2088#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2089#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2090#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2091#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2092#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2093#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2094#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2095#define PGM_SHW_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
2096
2097/* Shw_Gst */
2098#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2099#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2100#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2101#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2102#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2103#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2104#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2105#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2106#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2107#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2108#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2109#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2110#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2111#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2112#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2113#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2114#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2115#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2116#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2117
2118#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2119#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2120#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2121#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2122#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2123#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2124#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2125#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2126#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2127#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2128#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2129#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2130#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2131#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2132#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2133#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2134#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2135#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2136#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2137#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2138#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2139#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2140#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2141#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2142#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2143#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2144#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2145#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2146#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2147#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2148#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2149#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2150#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2151#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2152#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2153#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2154#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2155
2156#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2157#define PGM_BTH_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
2158/** @} */
2159
2160/**
2161 * Data for each paging mode.
2162 */
2163typedef struct PGMMODEDATA
2164{
2165 /** The guest mode type. */
2166 uint32_t uGstType;
2167 /** The shadow mode type. */
2168 uint32_t uShwType;
2169
2170 /** @name Function pointers for Shadow paging.
2171 * @{
2172 */
2173 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2174 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2175 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2176 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2177
2178 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2179 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2180
2181 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2182 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2183 /** @} */
2184
2185 /** @name Function pointers for Guest paging.
2186 * @{
2187 */
2188 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2189 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2190 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2191 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2192 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2193 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2194 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2195 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2196 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2197 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2198 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2199 /** @} */
2200
2201 /** @name Function pointers for Both Shadow and Guest paging.
2202 * @{
2203 */
2204 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2205 /* no pfnR3BthTrap0eHandler */
2206 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2207 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2208 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2209 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2210 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2211#ifdef VBOX_STRICT
2212 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2213#endif
2214 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2215 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2216
2217 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2218 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2219 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2220 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2221 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2222 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2223#ifdef VBOX_STRICT
2224 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2225#endif
2226 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2227 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2228
2229 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2230 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2231 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2232 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2233 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2234 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2235#ifdef VBOX_STRICT
2236 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2237#endif
2238 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2239 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2240 /** @} */
2241} PGMMODEDATA, *PPGMMODEDATA;
2242
2243
2244
2245/**
2246 * Converts a PGM pointer into a VM pointer.
2247 * @returns Pointer to the VM structure the PGM is part of.
2248 * @param pPGM Pointer to PGM instance data.
2249 */
2250#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2251
2252/**
2253 * PGM Data (part of VM)
2254 */
2255typedef struct PGM
2256{
2257 /** Offset to the VM structure. */
2258 RTINT offVM;
2259 /** Offset of the PGMCPU structure relative to VMCPU. */
2260 RTINT offVCpuPGM;
2261
2262 /** @cfgm{RamPreAlloc, boolean, false}
2263 * Indicates whether the base RAM should all be allocated before starting
2264 * the VM (default), or if it should be allocated when first written to.
2265 */
2266 bool fRamPreAlloc;
2267 /** Alignment padding. */
2268 bool afAlignment0[11];
2269
2270 /*
2271 * This will be redefined at least two more times before we're done, I'm sure.
2272 * The current code is only to get on with the coding.
2273 * - 2004-06-10: initial version, bird.
2274 * - 2004-07-02: 1st time, bird.
2275 * - 2004-10-18: 2nd time, bird.
2276 * - 2005-07-xx: 3rd time, bird.
2277 */
2278
2279 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2280 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2281 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2282 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2283
2284 /** The host paging mode. (This is what SUPLib reports.) */
2285 SUPPAGINGMODE enmHostMode;
2286
2287 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
2288 RTGCPHYS GCPhys4MBPSEMask;
2289
2290 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2291 * This is sorted by physical address and contains no overlapping ranges. */
2292 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2293 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2294 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2295 /** RC pointer corresponding to PGM::pRamRangesR3. */
2296 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2297 RTRCPTR alignment4; /**< structure alignment. */
2298
2299 /** Pointer to the list of ROM ranges - for R3.
2300 * This is sorted by physical address and contains no overlapping ranges. */
2301 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2302 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2303 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2304 /** RC pointer corresponding to PGM::pRomRangesR3. */
2305 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2306 /** Alignment padding. */
2307 RTRCPTR GCPtrPadding2;
2308
2309 /** Pointer to the list of MMIO2 ranges - for R3.
2310 * Registration order. */
2311 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2312
2313 /** PGM offset based trees - R3 Ptr. */
2314 R3PTRTYPE(PPGMTREES) pTreesR3;
2315 /** PGM offset based trees - R0 Ptr. */
2316 R0PTRTYPE(PPGMTREES) pTreesR0;
2317 /** PGM offset based trees - RC Ptr. */
2318 RCPTRTYPE(PPGMTREES) pTreesRC;
2319
2320 /** Linked list of GC mappings - for RC.
2321 * The list is sorted ascending on address.
2322 */
2323 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2324 /** Linked list of GC mappings - for HC.
2325 * The list is sorted ascending on address.
2326 */
2327 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2328 /** Linked list of GC mappings - for R0.
2329 * The list is sorted ascending on address.
2330 */
2331 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2332
2333 /** Pointer to the 5 page CR3 content mapping.
2334 * The first page is always the CR3 (in some form) while the 4 other pages
2335 * are used of the PDs in PAE mode. */
2336 RTGCPTR GCPtrCR3Mapping;
2337#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2338 uint32_t u32Alignment;
2339#endif
2340
2341 /** Indicates that PGMR3FinalizeMappings has been called and that further
2342 * PGMR3MapIntermediate calls will be rejected. */
2343 bool fFinalizedMappings;
2344 /** If set no conflict checks are required. (boolean) */
2345 bool fMappingsFixed;
2346 /** If set, then no mappings are put into the shadow page table. (boolean) */
2347 bool fDisableMappings;
2348 /** Size of fixed mapping */
2349 uint32_t cbMappingFixed;
2350 /** Base address (GC) of fixed mapping */
2351 RTGCPTR GCPtrMappingFixed;
2352 /** The address of the previous RAM range mapping. */
2353 RTGCPTR GCPtrPrevRamRangeMapping;
2354
2355 /** @name Intermediate Context
2356 * @{ */
2357 /** Pointer to the intermediate page directory - Normal. */
2358 R3PTRTYPE(PX86PD) pInterPD;
2359 /** Pointer to the intermedate page tables - Normal.
2360 * There are two page tables, one for the identity mapping and one for
2361 * the host context mapping (of the core code). */
2362 R3PTRTYPE(PX86PT) apInterPTs[2];
2363 /** Pointer to the intermedate page tables - PAE. */
2364 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2365 /** Pointer to the intermedate page directory - PAE. */
2366 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2367 /** Pointer to the intermedate page directory - PAE. */
2368 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2369 /** Pointer to the intermedate page-map level 4 - AMD64. */
2370 R3PTRTYPE(PX86PML4) pInterPaePML4;
2371 /** Pointer to the intermedate page directory - AMD64. */
2372 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2373 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2374 RTHCPHYS HCPhysInterPD;
2375 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2376 RTHCPHYS HCPhysInterPaePDPT;
2377 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2378 RTHCPHYS HCPhysInterPaePML4;
2379 /** @} */
2380
2381 /** Base address of the dynamic page mapping area.
2382 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2383 */
2384 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2385 /** The index of the last entry used in the dynamic page mapping area. */
2386 RTUINT iDynPageMapLast;
2387 /** Cache containing the last entries in the dynamic page mapping area.
2388 * The cache size is covering half of the mapping area. */
2389 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2390 /** Keep a lock counter for the full (!) mapping area. */
2391 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
2392
2393 /** The address of the ring-0 mapping cache if we're making use of it. */
2394 RTR0PTR pvR0DynMapUsed;
2395
2396 /** PGM critical section.
2397 * This protects the physical & virtual access handlers, ram ranges,
2398 * and the page flag updating (some of it anyway).
2399 */
2400 PDMCRITSECT CritSect;
2401
2402 /** Pointer to SHW+GST mode data (function pointers).
2403 * The index into this table is made up from */
2404 R3PTRTYPE(PPGMMODEDATA) paModeData;
2405
2406 /** Shadow Page Pool - R3 Ptr. */
2407 R3PTRTYPE(PPGMPOOL) pPoolR3;
2408 /** Shadow Page Pool - R0 Ptr. */
2409 R0PTRTYPE(PPGMPOOL) pPoolR0;
2410 /** Shadow Page Pool - RC Ptr. */
2411 RCPTRTYPE(PPGMPOOL) pPoolRC;
2412
2413 /** We're not in a state which permits writes to guest memory.
2414 * (Only used in strict builds.) */
2415 bool fNoMorePhysWrites;
2416
2417 /**
2418 * Data associated with managing the ring-3 mappings of the allocation chunks.
2419 */
2420 struct
2421 {
2422 /** The chunk tree, ordered by chunk id. */
2423#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2424 R3PTRTYPE(PAVLU32NODECORE) pTree;
2425#else
2426 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2427#endif
2428 /** The chunk mapping TLB. */
2429 PGMCHUNKR3MAPTLB Tlb;
2430 /** The number of mapped chunks. */
2431 uint32_t c;
2432 /** The maximum number of mapped chunks.
2433 * @cfgm PGM/MaxRing3Chunks */
2434 uint32_t cMax;
2435 /** The chunk age tree, ordered by ageing sequence number. */
2436 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2437 /** The current time. */
2438 uint32_t iNow;
2439 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2440 uint32_t AgeingCountdown;
2441 } ChunkR3Map;
2442
2443 /**
2444 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2445 */
2446 PGMPAGER3MAPTLB PhysTlbHC;
2447
2448 /** @name The zero page.
2449 * @{ */
2450 /** The host physical address of the zero page. */
2451 RTHCPHYS HCPhysZeroPg;
2452 /** The ring-3 mapping of the zero page. */
2453 RTR3PTR pvZeroPgR3;
2454 /** The ring-0 mapping of the zero page. */
2455 RTR0PTR pvZeroPgR0;
2456 /** The GC mapping of the zero page. */
2457 RTGCPTR pvZeroPgRC;
2458#if GC_ARCH_BITS != 32
2459 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2460#endif
2461 /** @}*/
2462
2463 /** The number of handy pages. */
2464 uint32_t cHandyPages;
2465 /**
2466 * Array of handy pages.
2467 *
2468 * This array is used in a two way communication between pgmPhysAllocPage
2469 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2470 * an intermediary.
2471 *
2472 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2473 * (The current size of 32 pages, means 128 KB of handy memory.)
2474 */
2475 GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
2476
2477 /** @name Error injection.
2478 * @{ */
2479 /** Inject handy page allocation errors pretending we're completely out of
2480 * memory. */
2481 bool volatile fErrInjHandyPages;
2482 /** Padding. */
2483 bool afReserved[7];
2484 /** @} */
2485
2486 /** @name Release Statistics
2487 * @{ */
2488 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */
2489 uint32_t cPrivatePages; /**< The number of private pages. */
2490 uint32_t cSharedPages; /**< The number of shared pages. */
2491 uint32_t cZeroPages; /**< The number of zero backed pages. */
2492
2493 /** The number of times we were forced to change the hypervisor region location. */
2494 STAMCOUNTER cRelocations;
2495 /** @} */
2496
2497#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2498 /* R3 only: */
2499 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2500 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2501
2502 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2503 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2504 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2505 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2506 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2507 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2508 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2509 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2510 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2511 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2512 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2513 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2514 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2515 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2516 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2517 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2518 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2519 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2520/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2521 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2522 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2523/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2524
2525 /* RC only: */
2526 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */
2527 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */
2528 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2529 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2530
2531 STAMCOUNTER StatRZPhysRead;
2532 STAMCOUNTER StatRZPhysReadBytes;
2533 STAMCOUNTER StatRZPhysWrite;
2534 STAMCOUNTER StatRZPhysWriteBytes;
2535 STAMCOUNTER StatR3PhysRead;
2536 STAMCOUNTER StatR3PhysReadBytes;
2537 STAMCOUNTER StatR3PhysWrite;
2538 STAMCOUNTER StatR3PhysWriteBytes;
2539 STAMCOUNTER StatRCPhysRead;
2540 STAMCOUNTER StatRCPhysReadBytes;
2541 STAMCOUNTER StatRCPhysWrite;
2542 STAMCOUNTER StatRCPhysWriteBytes;
2543
2544 STAMCOUNTER StatRZPhysSimpleRead;
2545 STAMCOUNTER StatRZPhysSimpleReadBytes;
2546 STAMCOUNTER StatRZPhysSimpleWrite;
2547 STAMCOUNTER StatRZPhysSimpleWriteBytes;
2548 STAMCOUNTER StatR3PhysSimpleRead;
2549 STAMCOUNTER StatR3PhysSimpleReadBytes;
2550 STAMCOUNTER StatR3PhysSimpleWrite;
2551 STAMCOUNTER StatR3PhysSimpleWriteBytes;
2552 STAMCOUNTER StatRCPhysSimpleRead;
2553 STAMCOUNTER StatRCPhysSimpleReadBytes;
2554 STAMCOUNTER StatRCPhysSimpleWrite;
2555 STAMCOUNTER StatRCPhysSimpleWriteBytes;
2556
2557# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2558 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2559 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2560 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2561 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2562 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2563 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2564# endif
2565#endif
2566} PGM;
2567/** Pointer to the PGM instance data. */
2568typedef PGM *PPGM;
2569
2570
2571/**
2572 * Converts a PGMCPU pointer into a VM pointer.
2573 * @returns Pointer to the VM structure the PGM is part of.
2574 * @param pPGM Pointer to PGMCPU instance data.
2575 */
2576#define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2577
2578/**
2579 * Converts a PGMCPU pointer into a PGM pointer.
2580 * @returns Pointer to the VM structure the PGM is part of.
2581 * @param pPGM Pointer to PGMCPU instance data.
2582 */
2583#define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
2584
2585/**
2586 * PGMCPU Data (part of VMCPU).
2587 */
2588typedef struct PGMCPU
2589{
2590 /** Offset to the VM structure. */
2591 RTINT offVM;
2592 /** Offset to the VMCPU structure. */
2593 RTINT offVCpu;
2594 /** Offset of the PGM structure relative to VMCPU. */
2595 RTINT offPGM;
2596 RTINT uPadding0; /**< structure size alignment. */
2597
2598#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2599 /** Automatically tracked physical memory mapping set.
2600 * Ring-0 and strict raw-mode builds. */
2601 PGMMAPSET AutoSet;
2602#endif
2603
2604 /** A20 gate mask.
2605 * Our current approach to A20 emulation is to let REM do it and don't bother
2606 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2607 * But whould need arrise, we'll subject physical addresses to this mask. */
2608 RTGCPHYS GCPhysA20Mask;
2609 /** A20 gate state - boolean! */
2610 bool fA20Enabled;
2611
2612 /** What needs syncing (PGM_SYNC_*).
2613 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2614 * PGMFlushTLB, and PGMR3Load. */
2615 RTUINT fSyncFlags;
2616
2617 /** The shadow paging mode. */
2618 PGMMODE enmShadowMode;
2619 /** The guest paging mode. */
2620 PGMMODE enmGuestMode;
2621
2622 /** The current physical address representing in the guest CR3 register. */
2623 RTGCPHYS GCPhysCR3;
2624
2625 /** @name 32-bit Guest Paging.
2626 * @{ */
2627 /** The guest's page directory, R3 pointer. */
2628 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2629#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2630 /** The guest's page directory, R0 pointer. */
2631 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2632#endif
2633 /** The guest's page directory, static RC mapping. */
2634 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2635 /** @} */
2636
2637 /** @name PAE Guest Paging.
2638 * @{ */
2639 /** The guest's page directory pointer table, static RC mapping. */
2640 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2641 /** The guest's page directory pointer table, R3 pointer. */
2642 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2643#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2644 /** The guest's page directory pointer table, R0 pointer. */
2645 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2646#endif
2647
2648 /** The guest's page directories, R3 pointers.
2649 * These are individual pointers and don't have to be adjecent.
2650 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2651 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2652 /** The guest's page directories, R0 pointers.
2653 * Same restrictions as apGstPaePDsR3. */
2654#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2655 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2656#endif
2657 /** The guest's page directories, static GC mapping.
2658 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2659 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2660 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2661 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2662 RTGCPHYS aGCPhysGstPaePDs[4];
2663 /** The physical addresses of the monitored guest page directories (PAE). */
2664 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2665 /** @} */
2666
2667 /** @name AMD64 Guest Paging.
2668 * @{ */
2669 /** The guest's page directory pointer table, R3 pointer. */
2670 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
2671#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2672 /** The guest's page directory pointer table, R0 pointer. */
2673 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
2674#endif
2675 /** @} */
2676
2677 /** Pointer to the page of the current active CR3 - R3 Ptr. */
2678 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
2679 /** Pointer to the page of the current active CR3 - R0 Ptr. */
2680 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
2681 /** Pointer to the page of the current active CR3 - RC Ptr. */
2682 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
2683 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
2684 uint32_t iShwUser;
2685 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
2686 uint32_t iShwUserTable;
2687# if HC_ARCH_BITS == 64
2688 RTRCPTR alignment6; /**< structure size alignment. */
2689# endif
2690 /** @} */
2691
2692 /** @name Function pointers for Shadow paging.
2693 * @{
2694 */
2695 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2696 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2697 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2698 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2699
2700 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2701 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2702
2703 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2704 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2705
2706 /** @} */
2707
2708 /** @name Function pointers for Guest paging.
2709 * @{
2710 */
2711 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2712 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2713 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2714 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2715 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2716 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2717 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2718 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2719#if HC_ARCH_BITS == 64
2720 RTRCPTR alignment3; /**< structure size alignment. */
2721#endif
2722
2723 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2724 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2725 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2726 /** @} */
2727
2728 /** @name Function pointers for Both Shadow and Guest paging.
2729 * @{
2730 */
2731 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2732 /* no pfnR3BthTrap0eHandler */
2733 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2734 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2735 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2736 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2737 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2738 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2739 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2740 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2741
2742 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2743 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2744 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2745 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2746 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2747 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2748 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2749 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2750 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2751
2752 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2753 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2754 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2755 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2756 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2757 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2758 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2759 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2760 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2761#if HC_ARCH_BITS == 64
2762 RTRCPTR alignment2; /**< structure size alignment. */
2763#endif
2764 /** @} */
2765
2766 /** For saving stack space, the disassembler state is allocated here instead of
2767 * on the stack.
2768 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
2769 union
2770 {
2771 /** The disassembler scratch space. */
2772 DISCPUSTATE DisState;
2773 /** Padding. */
2774 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
2775 };
2776
2777 /* Count the number of pgm pool access handler calls. */
2778 uint64_t cPoolAccessHandler;
2779
2780 /** @name Release Statistics
2781 * @{ */
2782 /** The number of times the guest has switched mode since last reset or statistics reset. */
2783 STAMCOUNTER cGuestModeChanges;
2784 /** @} */
2785
2786#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2787 /** @name Statistics
2788 * @{ */
2789 /** RC: Which statistic this \#PF should be attributed to. */
2790 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
2791 RTRCPTR padding0;
2792 /** R0: Which statistic this \#PF should be attributed to. */
2793 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
2794 RTR0PTR padding1;
2795
2796 /* Common */
2797 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
2798 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
2799
2800 /* R0 only: */
2801 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
2802 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
2803 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
2804 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2805 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
2806 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
2807 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
2808 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
2809 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
2810 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
2811 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
2812 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
2813 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
2814 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
2815 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
2816 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
2817 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
2818 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
2819 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
2820 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
2821 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
2822 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
2823 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
2824 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
2825 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
2826 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
2827
2828 /* RZ only: */
2829 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
2830 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
2831 STAMPROFILE StatRZTrap0eTimeSyncPT;
2832 STAMPROFILE StatRZTrap0eTimeMapping;
2833 STAMPROFILE StatRZTrap0eTimeOutOfSync;
2834 STAMPROFILE StatRZTrap0eTimeHandlers;
2835 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
2836 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
2837 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
2838 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
2839 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
2840 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
2841 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
2842 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
2843 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
2844 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
2845 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
2846 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
2847 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
2848 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
2849 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
2850 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
2851 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
2852 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
2853 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
2854 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
2855 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
2856 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: #PF err kind */
2857 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: #PF err kind */
2858 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: #PF err kind */
2859 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: #PF err kind */
2860 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: #PF err kind */
2861 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: #PF err kind */
2862 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: #PF err kind */
2863 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: #PF err kind */
2864 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: #PF err kind */
2865 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: #PF err kind */
2866 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: #PF err kind */
2867 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest #PFs. */
2868 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest #PF ending up at the end of the #PF code. */
2869 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest #PF to HMA or other mapping. */
2870 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
2871 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
2872 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the #PFs. */
2873 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
2874 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
2875 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
2876 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
2877 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
2878
2879 /* HC - R3 and (maybe) R0: */
2880
2881 /* RZ & R3: */
2882 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
2883 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
2884 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
2885 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
2886 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
2887 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
2888 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
2889 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
2890 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
2891 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
2892 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
2893 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
2894 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
2895 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
2896 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2897 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
2898 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
2899 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
2900 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2901 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
2902 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
2903 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
2904 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
2905 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
2906 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
2907 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
2908 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
2909 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
2910 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
2911 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
2912 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2913 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2914 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
2915 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2916 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2917 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2918 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2919 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
2920 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
2921 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2922 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2923 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2924 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2925 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
2926
2927 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
2928 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
2929 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
2930 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
2931 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
2932 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
2933 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
2934 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
2935 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
2936 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
2937 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
2938 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
2939 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
2940 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
2941 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2942 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
2943 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
2944 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
2945 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2946 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
2947 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
2948 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
2949 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
2950 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
2951 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
2952 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
2953 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
2954 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
2955 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
2956 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
2957 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
2958 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
2959 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
2960 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2961 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in #PF or VerifyAccessSyncPage. */
2962 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in #PF or VerifyAccessSyncPage. */
2963 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
2964 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
2965 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2966 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2967 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2968 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2969 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
2970 /** @} */
2971#endif /* VBOX_WITH_STATISTICS */
2972} PGMCPU;
2973/** Pointer to the per-cpu PGM data. */
2974typedef PGMCPU *PPGMCPU;
2975
2976
2977/** @name PGM::fSyncFlags Flags
2978 * @{
2979 */
2980/** Updates the virtual access handler state bit in PGMPAGE. */
2981#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
2982/** Always sync CR3. */
2983#define PGM_SYNC_ALWAYS RT_BIT(1)
2984/** Check monitoring on next CR3 (re)load and invalidate page.
2985 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
2986#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
2987/** Check guest mapping in SyncCR3. */
2988#define PGM_SYNC_MAP_CR3 RT_BIT(3)
2989/** Clear the page pool (a light weight flush). */
2990#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
2991#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
2992/** @} */
2993
2994
2995RT_C_DECLS_BEGIN
2996
2997int pgmLock(PVM pVM);
2998void pgmUnlock(PVM pVM);
2999
3000int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3001int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3002PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3003void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping);
3004DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3005
3006void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3007bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3008void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
3009int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3010DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3011#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3012void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3013#else
3014# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3015#endif
3016DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3017
3018
3019int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3020int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3021int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3022int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3023int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3024int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv);
3025int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
3026int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3027int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
3028VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3029#ifdef IN_RING3
3030void pgmR3PhysRelinkRamRanges(PVM pVM);
3031int pgmR3PhysRamPreAllocate(PVM pVM);
3032int pgmR3PhysRamReset(PVM pVM);
3033int pgmR3PhysRomReset(PVM pVM);
3034int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3035
3036int pgmR3PoolInit(PVM pVM);
3037void pgmR3PoolRelocate(PVM pVM);
3038void pgmR3PoolReset(PVM pVM);
3039
3040#endif /* IN_RING3 */
3041#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3042int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3043#endif
3044int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
3045
3046DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
3047{
3048 return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
3049}
3050
3051void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3052void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3053int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3054void pgmPoolClearAll(PVM pVM);
3055PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
3056int pgmPoolSyncCR3(PVMCPU pVCpu);
3057int pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs);
3058uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
3059void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
3060void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint);
3061#ifdef PGMPOOL_WITH_MONITORING
3062void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu);
3063int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3064void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3065#endif
3066
3067void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3068void pgmPoolResetDirtyPages(PVM pVM, bool fForceRemoval = false);
3069
3070int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
3071int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
3072
3073void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3074void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
3075int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3076int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3077
3078int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3079#ifndef IN_RC
3080int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3081#endif
3082int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3083
3084PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);
3085PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
3086PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
3087PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);
3088
3089RT_C_DECLS_END
3090
3091
3092/**
3093 * Gets the PGMRAMRANGE structure for a guest page.
3094 *
3095 * @returns Pointer to the RAM range on success.
3096 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3097 *
3098 * @param pPGM PGM handle.
3099 * @param GCPhys The GC physical address.
3100 */
3101DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
3102{
3103 /*
3104 * Optimize for the first range.
3105 */
3106 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3107 RTGCPHYS off = GCPhys - pRam->GCPhys;
3108 if (RT_UNLIKELY(off >= pRam->cb))
3109 {
3110 do
3111 {
3112 pRam = pRam->CTX_SUFF(pNext);
3113 if (RT_UNLIKELY(!pRam))
3114 break;
3115 off = GCPhys - pRam->GCPhys;
3116 } while (off >= pRam->cb);
3117 }
3118 return pRam;
3119}
3120
3121
3122/**
3123 * Gets the PGMPAGE structure for a guest page.
3124 *
3125 * @returns Pointer to the page on success.
3126 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3127 *
3128 * @param pPGM PGM handle.
3129 * @param GCPhys The GC physical address.
3130 */
3131DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
3132{
3133 /*
3134 * Optimize for the first range.
3135 */
3136 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3137 RTGCPHYS off = GCPhys - pRam->GCPhys;
3138 if (RT_UNLIKELY(off >= pRam->cb))
3139 {
3140 do
3141 {
3142 pRam = pRam->CTX_SUFF(pNext);
3143 if (RT_UNLIKELY(!pRam))
3144 return NULL;
3145 off = GCPhys - pRam->GCPhys;
3146 } while (off >= pRam->cb);
3147 }
3148 return &pRam->aPages[off >> PAGE_SHIFT];
3149}
3150
3151
3152/**
3153 * Gets the PGMPAGE structure for a guest page.
3154 *
3155 * Old Phys code: Will make sure the page is present.
3156 *
3157 * @returns VBox status code.
3158 * @retval VINF_SUCCESS and a valid *ppPage on success.
3159 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3160 *
3161 * @param pPGM PGM handle.
3162 * @param GCPhys The GC physical address.
3163 * @param ppPage Where to store the page poitner on success.
3164 */
3165DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
3166{
3167 /*
3168 * Optimize for the first range.
3169 */
3170 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3171 RTGCPHYS off = GCPhys - pRam->GCPhys;
3172 if (RT_UNLIKELY(off >= pRam->cb))
3173 {
3174 do
3175 {
3176 pRam = pRam->CTX_SUFF(pNext);
3177 if (RT_UNLIKELY(!pRam))
3178 {
3179 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
3180 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3181 }
3182 off = GCPhys - pRam->GCPhys;
3183 } while (off >= pRam->cb);
3184 }
3185 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3186 return VINF_SUCCESS;
3187}
3188
3189
3190
3191
3192/**
3193 * Gets the PGMPAGE structure for a guest page.
3194 *
3195 * Old Phys code: Will make sure the page is present.
3196 *
3197 * @returns VBox status code.
3198 * @retval VINF_SUCCESS and a valid *ppPage on success.
3199 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
3200 *
3201 * @param pPGM PGM handle.
3202 * @param GCPhys The GC physical address.
3203 * @param ppPage Where to store the page poitner on success.
3204 * @param ppRamHint Where to read and store the ram list hint.
3205 * The caller initializes this to NULL before the call.
3206 */
3207DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
3208{
3209 RTGCPHYS off;
3210 PPGMRAMRANGE pRam = *ppRamHint;
3211 if ( !pRam
3212 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
3213 {
3214 pRam = pPGM->CTX_SUFF(pRamRanges);
3215 off = GCPhys - pRam->GCPhys;
3216 if (RT_UNLIKELY(off >= pRam->cb))
3217 {
3218 do
3219 {
3220 pRam = pRam->CTX_SUFF(pNext);
3221 if (RT_UNLIKELY(!pRam))
3222 {
3223 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
3224 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3225 }
3226 off = GCPhys - pRam->GCPhys;
3227 } while (off >= pRam->cb);
3228 }
3229 *ppRamHint = pRam;
3230 }
3231 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3232 return VINF_SUCCESS;
3233}
3234
3235
3236/**
3237 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3238 *
3239 * @returns Pointer to the page on success.
3240 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3241 *
3242 * @param pPGM PGM handle.
3243 * @param GCPhys The GC physical address.
3244 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
3245 */
3246DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
3247{
3248 /*
3249 * Optimize for the first range.
3250 */
3251 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3252 RTGCPHYS off = GCPhys - pRam->GCPhys;
3253 if (RT_UNLIKELY(off >= pRam->cb))
3254 {
3255 do
3256 {
3257 pRam = pRam->CTX_SUFF(pNext);
3258 if (RT_UNLIKELY(!pRam))
3259 return NULL;
3260 off = GCPhys - pRam->GCPhys;
3261 } while (off >= pRam->cb);
3262 }
3263 *ppRam = pRam;
3264 return &pRam->aPages[off >> PAGE_SHIFT];
3265}
3266
3267
3268/**
3269 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
3270 *
3271 * @returns Pointer to the page on success.
3272 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
3273 *
3274 * @param pPGM PGM handle.
3275 * @param GCPhys The GC physical address.
3276 * @param ppPage Where to store the pointer to the PGMPAGE structure.
3277 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
3278 */
3279DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
3280{
3281 /*
3282 * Optimize for the first range.
3283 */
3284 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3285 RTGCPHYS off = GCPhys - pRam->GCPhys;
3286 if (RT_UNLIKELY(off >= pRam->cb))
3287 {
3288 do
3289 {
3290 pRam = pRam->CTX_SUFF(pNext);
3291 if (RT_UNLIKELY(!pRam))
3292 {
3293 *ppRam = NULL; /* Shut up silly GCC warnings. */
3294 *ppPage = NULL; /* ditto */
3295 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3296 }
3297 off = GCPhys - pRam->GCPhys;
3298 } while (off >= pRam->cb);
3299 }
3300 *ppRam = pRam;
3301 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
3302 return VINF_SUCCESS;
3303}
3304
3305
3306/**
3307 * Convert GC Phys to HC Phys.
3308 *
3309 * @returns VBox status.
3310 * @param pPGM PGM handle.
3311 * @param GCPhys The GC physical address.
3312 * @param pHCPhys Where to store the corresponding HC physical address.
3313 *
3314 * @deprecated Doesn't deal with zero, shared or write monitored pages.
3315 * Avoid when writing new code!
3316 */
3317DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
3318{
3319 PPGMPAGE pPage;
3320 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3321 if (RT_FAILURE(rc))
3322 return rc;
3323 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
3324 return VINF_SUCCESS;
3325}
3326
3327#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3328
3329/**
3330 * Inlined version of the ring-0 version of PGMDynMapHCPage that
3331 * optimizes access to pages already in the set.
3332 *
3333 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
3334 * @param pPGM Pointer to the PVM instance data.
3335 * @param HCPhys The physical address of the page.
3336 * @param ppv Where to store the mapping address.
3337 */
3338DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
3339{
3340 PVM pVM = PGM2VM(pPGM);
3341 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
3342 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
3343
3344 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a);
3345 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3346 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3347
3348 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3349 unsigned iEntry = pSet->aiHashTable[iHash];
3350 if ( iEntry < pSet->cEntries
3351 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3352 {
3353 *ppv = pSet->aEntries[iEntry].pvPage;
3354 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlHits);
3355 }
3356 else
3357 {
3358 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlMisses);
3359 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
3360 }
3361
3362 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapHCPageInl, a);
3363 return VINF_SUCCESS;
3364}
3365
3366
3367/**
3368 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
3369 * access to pages already in the set.
3370 *
3371 * @returns See PGMDynMapGCPage.
3372 * @param pPGM Pointer to the PVM instance data.
3373 * @param HCPhys The physical address of the page.
3374 * @param ppv Where to store the mapping address.
3375 */
3376DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
3377{
3378 PVM pVM = PGM2VM(pPGM);
3379 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
3380
3381 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
3382 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
3383
3384 /*
3385 * Get the ram range.
3386 */
3387 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3388 RTGCPHYS off = GCPhys - pRam->GCPhys;
3389 if (RT_UNLIKELY(off >= pRam->cb
3390 /** @todo || page state stuff */))
3391 {
3392 /* This case is not counted into StatR0DynMapGCPageInl. */
3393 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
3394 return PGMDynMapGCPage(pVM, GCPhys, ppv);
3395 }
3396
3397 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
3398 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
3399
3400 /*
3401 * pgmR0DynMapHCPageInlined with out stats.
3402 */
3403 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
3404 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3405 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3406
3407 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3408 unsigned iEntry = pSet->aiHashTable[iHash];
3409 if ( iEntry < pSet->cEntries
3410 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3411 {
3412 *ppv = pSet->aEntries[iEntry].pvPage;
3413 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
3414 }
3415 else
3416 {
3417 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
3418 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
3419 }
3420
3421 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
3422 return VINF_SUCCESS;
3423}
3424
3425
3426/**
3427 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
3428 * access to pages already in the set.
3429 *
3430 * @returns See PGMDynMapGCPage.
3431 * @param pPGM Pointer to the PVM instance data.
3432 * @param HCPhys The physical address of the page.
3433 * @param ppv Where to store the mapping address.
3434 */
3435DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
3436{
3437 PVM pVM = PGM2VM(pPGM);
3438 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
3439
3440 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
3441
3442 /*
3443 * Get the ram range.
3444 */
3445 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
3446 RTGCPHYS off = GCPhys - pRam->GCPhys;
3447 if (RT_UNLIKELY(off >= pRam->cb
3448 /** @todo || page state stuff */))
3449 {
3450 /* This case is not counted into StatR0DynMapGCPageInl. */
3451 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
3452 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
3453 }
3454
3455 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
3456 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
3457
3458 /*
3459 * pgmR0DynMapHCPageInlined with out stats.
3460 */
3461 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
3462 Assert(!(HCPhys & PAGE_OFFSET_MASK));
3463 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
3464
3465 unsigned iHash = PGMMAPSET_HASH(HCPhys);
3466 unsigned iEntry = pSet->aiHashTable[iHash];
3467 if ( iEntry < pSet->cEntries
3468 && pSet->aEntries[iEntry].HCPhys == HCPhys)
3469 {
3470 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
3471 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
3472 }
3473 else
3474 {
3475 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
3476 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
3477 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
3478 }
3479
3480 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
3481 return VINF_SUCCESS;
3482}
3483
3484#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3485#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3486
3487/**
3488 * Maps the page into current context (RC and maybe R0).
3489 *
3490 * @returns pointer to the mapping.
3491 * @param pVM Pointer to the PGM instance data.
3492 * @param pPage The page.
3493 */
3494DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
3495{
3496 if (pPage->idx >= PGMPOOL_IDX_FIRST)
3497 {
3498 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
3499 void *pv;
3500# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3501 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
3502# else
3503 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
3504# endif
3505 return pv;
3506 }
3507 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
3508}
3509
3510/**
3511 * Temporarily maps one host page specified by HC physical address, returning
3512 * pointer within the page.
3513 *
3514 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
3515 * reused after 8 mappings (or perhaps a few more if you score with the cache).
3516 *
3517 * @returns The address corresponding to HCPhys.
3518 * @param pPGM Pointer to the PVM instance data.
3519 * @param HCPhys HC Physical address of the page.
3520 */
3521DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
3522{
3523 void *pv;
3524# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3525 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3526# else
3527 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
3528# endif
3529 pv = (void *)((uintptr_t)pv | (HCPhys & PAGE_OFFSET_MASK));
3530 return pv;
3531}
3532
3533#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
3534#ifndef IN_RC
3535
3536/**
3537 * Queries the Physical TLB entry for a physical guest page,
3538 * attempting to load the TLB entry if necessary.
3539 *
3540 * @returns VBox status code.
3541 * @retval VINF_SUCCESS on success
3542 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3543 *
3544 * @param pPGM The PGM instance handle.
3545 * @param GCPhys The address of the guest page.
3546 * @param ppTlbe Where to store the pointer to the TLB entry.
3547 */
3548DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3549{
3550 int rc;
3551 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3552 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3553 {
3554 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
3555 rc = VINF_SUCCESS;
3556 }
3557 else
3558 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
3559 *ppTlbe = pTlbe;
3560 return rc;
3561}
3562
3563
3564/**
3565 * Queries the Physical TLB entry for a physical guest page,
3566 * attempting to load the TLB entry if necessary.
3567 *
3568 * @returns VBox status code.
3569 * @retval VINF_SUCCESS on success
3570 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3571 *
3572 * @param pPGM The PGM instance handle.
3573 * @param pPage Pointer to the PGMPAGE structure corresponding to
3574 * GCPhys.
3575 * @param GCPhys The address of the guest page.
3576 * @param ppTlbe Where to store the pointer to the TLB entry.
3577 */
3578DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3579{
3580 int rc;
3581 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3582 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3583 {
3584 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
3585 rc = VINF_SUCCESS;
3586 }
3587 else
3588 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
3589 *ppTlbe = pTlbe;
3590 return rc;
3591}
3592
3593#endif /* !IN_RC */
3594
3595/**
3596 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
3597 * Takes PSE-36 into account.
3598 *
3599 * @returns guest physical address
3600 * @param pPGM Pointer to the PGM instance data.
3601 * @param Pde Guest Pde
3602 */
3603DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
3604{
3605 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
3606 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
3607
3608 return GCPhys & pPGM->GCPhys4MBPSEMask;
3609}
3610
3611
3612/**
3613 * Gets the page directory entry for the specified address (32-bit paging).
3614 *
3615 * @returns The page directory entry in question.
3616 * @param pPGM Pointer to the PGM instance data.
3617 * @param GCPtr The address.
3618 */
3619DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
3620{
3621#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3622 PCX86PD pGuestPD = NULL;
3623 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
3624 if (RT_FAILURE(rc))
3625 {
3626 X86PDE ZeroPde = {0};
3627 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
3628 }
3629#else
3630 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
3631# ifdef IN_RING3
3632 if (!pGuestPD)
3633 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
3634# endif
3635#endif
3636 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3637}
3638
3639
3640/**
3641 * Gets the address of a specific page directory entry (32-bit paging).
3642 *
3643 * @returns Pointer the page directory entry in question.
3644 * @param pPGM Pointer to the PGM instance data.
3645 * @param GCPtr The address.
3646 */
3647DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
3648{
3649#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3650 PX86PD pGuestPD = NULL;
3651 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
3652 AssertRCReturn(rc, NULL);
3653#else
3654 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
3655# ifdef IN_RING3
3656 if (!pGuestPD)
3657 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
3658# endif
3659#endif
3660 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
3661}
3662
3663
3664/**
3665 * Gets the address the guest page directory (32-bit paging).
3666 *
3667 * @returns Pointer the page directory entry in question.
3668 * @param pPGM Pointer to the PGM instance data.
3669 */
3670DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
3671{
3672#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3673 PX86PD pGuestPD = NULL;
3674 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
3675 AssertRCReturn(rc, NULL);
3676#else
3677 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
3678# ifdef IN_RING3
3679 if (!pGuestPD)
3680 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
3681# endif
3682#endif
3683 return pGuestPD;
3684}
3685
3686
3687/**
3688 * Gets the guest page directory pointer table.
3689 *
3690 * @returns Pointer to the page directory in question.
3691 * @returns NULL if the page directory is not present or on an invalid page.
3692 * @param pPGM Pointer to the PGM instance data.
3693 */
3694DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
3695{
3696#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3697 PX86PDPT pGuestPDPT = NULL;
3698 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3699 AssertRCReturn(rc, NULL);
3700#else
3701 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3702# ifdef IN_RING3
3703 if (!pGuestPDPT)
3704 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
3705# endif
3706#endif
3707 return pGuestPDPT;
3708}
3709
3710
3711/**
3712 * Gets the guest page directory pointer table entry for the specified address.
3713 *
3714 * @returns Pointer to the page directory in question.
3715 * @returns NULL if the page directory is not present or on an invalid page.
3716 * @param pPGM Pointer to the PGM instance data.
3717 * @param GCPtr The address.
3718 */
3719DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
3720{
3721 AssertGCPtr32(GCPtr);
3722
3723#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3724 PX86PDPT pGuestPDPT = 0;
3725 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
3726 AssertRCReturn(rc, 0);
3727#else
3728 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
3729# ifdef IN_RING3
3730 if (!pGuestPDPT)
3731 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
3732# endif
3733#endif
3734 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
3735}
3736
3737
3738/**
3739 * Gets the page directory for the specified address.
3740 *
3741 * @returns Pointer to the page directory in question.
3742 * @returns NULL if the page directory is not present or on an invalid page.
3743 * @param pPGM Pointer to the PGM instance data.
3744 * @param GCPtr The address.
3745 */
3746DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
3747{
3748 AssertGCPtr32(GCPtr);
3749
3750 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3751 AssertReturn(pGuestPDPT, NULL);
3752 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3753 if (pGuestPDPT->a[iPdpt].n.u1Present)
3754 {
3755#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3756 PX86PDPAE pGuestPD = NULL;
3757 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
3758 AssertRCReturn(rc, NULL);
3759#else
3760 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
3761 if ( !pGuestPD
3762 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
3763 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
3764#endif
3765 return pGuestPD;
3766 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
3767 }
3768 return NULL;
3769}
3770
3771
3772/**
3773 * Gets the page directory entry for the specified address.
3774 *
3775 * @returns Pointer to the page directory entry in question.
3776 * @returns NULL if the page directory is not present or on an invalid page.
3777 * @param pPGM Pointer to the PGM instance data.
3778 * @param GCPtr The address.
3779 */
3780DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
3781{
3782 AssertGCPtr32(GCPtr);
3783
3784 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3785 AssertReturn(pGuestPDPT, NULL);
3786 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3787 if (pGuestPDPT->a[iPdpt].n.u1Present)
3788 {
3789 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3790#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3791 PX86PDPAE pGuestPD = NULL;
3792 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
3793 AssertRCReturn(rc, NULL);
3794#else
3795 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
3796 if ( !pGuestPD
3797 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
3798 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
3799#endif
3800 return &pGuestPD->a[iPD];
3801 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
3802 }
3803 return NULL;
3804}
3805
3806
3807/**
3808 * Gets the page directory entry for the specified address.
3809 *
3810 * @returns The page directory entry in question.
3811 * @returns A non-present entry if the page directory is not present or on an invalid page.
3812 * @param pPGM Pointer to the PGM instance data.
3813 * @param GCPtr The address.
3814 */
3815DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
3816{
3817 AssertGCPtr32(GCPtr);
3818 X86PDEPAE ZeroPde = {0};
3819 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3820 if (RT_LIKELY(pGuestPDPT))
3821 {
3822 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3823 if (pGuestPDPT->a[iPdpt].n.u1Present)
3824 {
3825 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3826#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3827 PX86PDPAE pGuestPD = NULL;
3828 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
3829 AssertRCReturn(rc, ZeroPde);
3830#else
3831 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
3832 if ( !pGuestPD
3833 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
3834 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
3835#endif
3836 return pGuestPD->a[iPD];
3837 }
3838 }
3839 return ZeroPde;
3840}
3841
3842
3843/**
3844 * Gets the page directory pointer table entry for the specified address
3845 * and returns the index into the page directory
3846 *
3847 * @returns Pointer to the page directory in question.
3848 * @returns NULL if the page directory is not present or on an invalid page.
3849 * @param pPGM Pointer to the PGM instance data.
3850 * @param GCPtr The address.
3851 * @param piPD Receives the index into the returned page directory
3852 * @param pPdpe Receives the page directory pointer entry. Optional.
3853 */
3854DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
3855{
3856 AssertGCPtr32(GCPtr);
3857
3858 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
3859 AssertReturn(pGuestPDPT, NULL);
3860 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
3861 if (pPdpe)
3862 *pPdpe = pGuestPDPT->a[iPdpt];
3863 if (pGuestPDPT->a[iPdpt].n.u1Present)
3864 {
3865 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3866#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3867 PX86PDPAE pGuestPD = NULL;
3868 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
3869 AssertRCReturn(rc, NULL);
3870#else
3871 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
3872 if ( !pGuestPD
3873 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
3874 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
3875#endif
3876 *piPD = iPD;
3877 return pGuestPD;
3878 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3879 }
3880 return NULL;
3881}
3882
3883#ifndef IN_RC
3884
3885/**
3886 * Gets the page map level-4 pointer for the guest.
3887 *
3888 * @returns Pointer to the PML4 page.
3889 * @param pPGM Pointer to the PGM instance data.
3890 */
3891DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
3892{
3893#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3894 PX86PML4 pGuestPml4;
3895 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
3896 AssertRCReturn(rc, NULL);
3897#else
3898 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
3899# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
3900 if (!pGuestPml4)
3901 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
3902# endif
3903 Assert(pGuestPml4);
3904#endif
3905 return pGuestPml4;
3906}
3907
3908
3909/**
3910 * Gets the pointer to a page map level-4 entry.
3911 *
3912 * @returns Pointer to the PML4 entry.
3913 * @param pPGM Pointer to the PGM instance data.
3914 * @param iPml4 The index.
3915 */
3916DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
3917{
3918#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3919 PX86PML4 pGuestPml4;
3920 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
3921 AssertRCReturn(rc, NULL);
3922#else
3923 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
3924# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
3925 if (!pGuestPml4)
3926 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
3927# endif
3928 Assert(pGuestPml4);
3929#endif
3930 return &pGuestPml4->a[iPml4];
3931}
3932
3933
3934/**
3935 * Gets a page map level-4 entry.
3936 *
3937 * @returns The PML4 entry.
3938 * @param pPGM Pointer to the PGM instance data.
3939 * @param iPml4 The index.
3940 */
3941DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
3942{
3943#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3944 PX86PML4 pGuestPml4;
3945 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
3946 if (RT_FAILURE(rc))
3947 {
3948 X86PML4E ZeroPml4e = {0};
3949 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
3950 }
3951#else
3952 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
3953# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
3954 if (!pGuestPml4)
3955 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
3956# endif
3957 Assert(pGuestPml4);
3958#endif
3959 return pGuestPml4->a[iPml4];
3960}
3961
3962
3963/**
3964 * Gets the page directory pointer entry for the specified address.
3965 *
3966 * @returns Pointer to the page directory pointer entry in question.
3967 * @returns NULL if the page directory is not present or on an invalid page.
3968 * @param pPGM Pointer to the PGM instance data.
3969 * @param GCPtr The address.
3970 * @param ppPml4e Page Map Level-4 Entry (out)
3971 */
3972DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
3973{
3974 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
3975 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3976 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
3977 if (pPml4e->n.u1Present)
3978 {
3979 PX86PDPT pPdpt;
3980 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
3981 AssertRCReturn(rc, NULL);
3982
3983 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3984 return &pPdpt->a[iPdpt];
3985 }
3986 return NULL;
3987}
3988
3989
3990/**
3991 * Gets the page directory entry for the specified address.
3992 *
3993 * @returns The page directory entry in question.
3994 * @returns A non-present entry if the page directory is not present or on an invalid page.
3995 * @param pPGM Pointer to the PGM instance data.
3996 * @param GCPtr The address.
3997 * @param ppPml4e Page Map Level-4 Entry (out)
3998 * @param pPdpe Page directory pointer table entry (out)
3999 */
4000DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
4001{
4002 X86PDEPAE ZeroPde = {0};
4003 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4004 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4005 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4006 if (pPml4e->n.u1Present)
4007 {
4008 PCX86PDPT pPdptTemp;
4009 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
4010 AssertRCReturn(rc, ZeroPde);
4011
4012 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4013 *pPdpe = pPdptTemp->a[iPdpt];
4014 if (pPdptTemp->a[iPdpt].n.u1Present)
4015 {
4016 PCX86PDPAE pPD;
4017 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
4018 AssertRCReturn(rc, ZeroPde);
4019
4020 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4021 return pPD->a[iPD];
4022 }
4023 }
4024
4025 return ZeroPde;
4026}
4027
4028
4029/**
4030 * Gets the page directory entry for the specified address.
4031 *
4032 * @returns The page directory entry in question.
4033 * @returns A non-present entry if the page directory is not present or on an invalid page.
4034 * @param pPGM Pointer to the PGM instance data.
4035 * @param GCPtr The address.
4036 */
4037DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
4038{
4039 X86PDEPAE ZeroPde = {0};
4040 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4041 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4042 if (pGuestPml4->a[iPml4].n.u1Present)
4043 {
4044 PCX86PDPT pPdptTemp;
4045 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4046 AssertRCReturn(rc, ZeroPde);
4047
4048 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4049 if (pPdptTemp->a[iPdpt].n.u1Present)
4050 {
4051 PCX86PDPAE pPD;
4052 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
4053 AssertRCReturn(rc, ZeroPde);
4054
4055 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4056 return pPD->a[iPD];
4057 }
4058 }
4059 return ZeroPde;
4060}
4061
4062
4063/**
4064 * Gets the page directory entry for the specified address.
4065 *
4066 * @returns Pointer to the page directory entry in question.
4067 * @returns NULL if the page directory is not present or on an invalid page.
4068 * @param pPGM Pointer to the PGM instance data.
4069 * @param GCPtr The address.
4070 */
4071DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
4072{
4073 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4074 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4075 if (pGuestPml4->a[iPml4].n.u1Present)
4076 {
4077 PCX86PDPT pPdptTemp;
4078 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4079 AssertRCReturn(rc, NULL);
4080
4081 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4082 if (pPdptTemp->a[iPdpt].n.u1Present)
4083 {
4084 PX86PDPAE pPD;
4085 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
4086 AssertRCReturn(rc, NULL);
4087
4088 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4089 return &pPD->a[iPD];
4090 }
4091 }
4092 return NULL;
4093}
4094
4095
4096/**
4097 * Gets the GUEST page directory pointer for the specified address.
4098 *
4099 * @returns The page directory in question.
4100 * @returns NULL if the page directory is not present or on an invalid page.
4101 * @param pPGM Pointer to the PGM instance data.
4102 * @param GCPtr The address.
4103 * @param ppPml4e Page Map Level-4 Entry (out)
4104 * @param pPdpe Page directory pointer table entry (out)
4105 * @param piPD Receives the index into the returned page directory
4106 */
4107DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
4108{
4109 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4110 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4111 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
4112 if (pPml4e->n.u1Present)
4113 {
4114 PCX86PDPT pPdptTemp;
4115 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
4116 AssertRCReturn(rc, NULL);
4117
4118 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4119 *pPdpe = pPdptTemp->a[iPdpt];
4120 if (pPdptTemp->a[iPdpt].n.u1Present)
4121 {
4122 PX86PDPAE pPD;
4123 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
4124 AssertRCReturn(rc, NULL);
4125
4126 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4127 return pPD;
4128 }
4129 }
4130 return 0;
4131}
4132
4133#endif /* !IN_RC */
4134
4135/**
4136 * Gets the shadow page directory, 32-bit.
4137 *
4138 * @returns Pointer to the shadow 32-bit PD.
4139 * @param pPGM Pointer to the PGM instance data.
4140 */
4141DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGMCPU pPGM)
4142{
4143 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4144}
4145
4146
4147/**
4148 * Gets the shadow page directory entry for the specified address, 32-bit.
4149 *
4150 * @returns Shadow 32-bit PDE.
4151 * @param pPGM Pointer to the PGM instance data.
4152 * @param GCPtr The address.
4153 */
4154DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
4155{
4156 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4157
4158 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
4159 if (!pShwPde)
4160 {
4161 X86PDE ZeroPde = {0};
4162 return ZeroPde;
4163 }
4164 return pShwPde->a[iPd];
4165}
4166
4167
4168/**
4169 * Gets the pointer to the shadow page directory entry for the specified
4170 * address, 32-bit.
4171 *
4172 * @returns Pointer to the shadow 32-bit PDE.
4173 * @param pPGM Pointer to the PGM instance data.
4174 * @param GCPtr The address.
4175 */
4176DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
4177{
4178 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
4179
4180 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
4181 AssertReturn(pPde, NULL);
4182 return &pPde->a[iPd];
4183}
4184
4185
4186/**
4187 * Gets the shadow page pointer table, PAE.
4188 *
4189 * @returns Pointer to the shadow PAE PDPT.
4190 * @param pPGM Pointer to the PGM instance data.
4191 */
4192DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGMCPU pPGM)
4193{
4194 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4195}
4196
4197
4198/**
4199 * Gets the shadow page directory for the specified address, PAE.
4200 *
4201 * @returns Pointer to the shadow PD.
4202 * @param pPGM Pointer to the PGM instance data.
4203 * @param GCPtr The address.
4204 */
4205DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
4206{
4207 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4208 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
4209
4210 if (!pPdpt->a[iPdpt].n.u1Present)
4211 return NULL;
4212
4213 /* Fetch the pgm pool shadow descriptor. */
4214 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4215 AssertReturn(pShwPde, NULL);
4216
4217 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
4218}
4219
4220
4221/**
4222 * Gets the shadow page directory for the specified address, PAE.
4223 *
4224 * @returns Pointer to the shadow PD.
4225 * @param pPGM Pointer to the PGM instance data.
4226 * @param GCPtr The address.
4227 */
4228DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
4229{
4230 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
4231
4232 if (!pPdpt->a[iPdpt].n.u1Present)
4233 return NULL;
4234
4235 /* Fetch the pgm pool shadow descriptor. */
4236 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
4237 AssertReturn(pShwPde, NULL);
4238
4239 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
4240}
4241
4242
4243/**
4244 * Gets the shadow page directory entry, PAE.
4245 *
4246 * @returns PDE.
4247 * @param pPGM Pointer to the PGM instance data.
4248 * @param GCPtr The address.
4249 */
4250DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
4251{
4252 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4253
4254 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4255 if (!pShwPde)
4256 {
4257 X86PDEPAE ZeroPde = {0};
4258 return ZeroPde;
4259 }
4260 return pShwPde->a[iPd];
4261}
4262
4263
4264/**
4265 * Gets the pointer to the shadow page directory entry for an address, PAE.
4266 *
4267 * @returns Pointer to the PDE.
4268 * @param pPGM Pointer to the PGM instance data.
4269 * @param GCPtr The address.
4270 */
4271DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
4272{
4273 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4274
4275 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
4276 AssertReturn(pPde, NULL);
4277 return &pPde->a[iPd];
4278}
4279
4280#ifndef IN_RC
4281
4282/**
4283 * Gets the shadow page map level-4 pointer.
4284 *
4285 * @returns Pointer to the shadow PML4.
4286 * @param pPGM Pointer to the PGM instance data.
4287 */
4288DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGMCPU pPGM)
4289{
4290 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
4291}
4292
4293
4294/**
4295 * Gets the shadow page map level-4 entry for the specified address.
4296 *
4297 * @returns The entry.
4298 * @param pPGM Pointer to the PGM instance data.
4299 * @param GCPtr The address.
4300 */
4301DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGMCPU pPGM, RTGCPTR GCPtr)
4302{
4303 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4304 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4305
4306 if (!pShwPml4)
4307 {
4308 X86PML4E ZeroPml4e = {0};
4309 return ZeroPml4e;
4310 }
4311 return pShwPml4->a[iPml4];
4312}
4313
4314
4315/**
4316 * Gets the pointer to the specified shadow page map level-4 entry.
4317 *
4318 * @returns The entry.
4319 * @param pPGM Pointer to the PGM instance data.
4320 * @param iPml4 The PML4 index.
4321 */
4322DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
4323{
4324 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
4325 if (!pShwPml4)
4326 return NULL;
4327 return &pShwPml4->a[iPml4];
4328}
4329
4330
4331/**
4332 * Gets the GUEST page directory pointer for the specified address.
4333 *
4334 * @returns The page directory in question.
4335 * @returns NULL if the page directory is not present or on an invalid page.
4336 * @param pPGM Pointer to the PGM instance data.
4337 * @param GCPtr The address.
4338 * @param piPD Receives the index into the returned page directory
4339 */
4340DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
4341{
4342 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
4343 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
4344 if (pGuestPml4->a[iPml4].n.u1Present)
4345 {
4346 PCX86PDPT pPdptTemp;
4347 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
4348 AssertRCReturn(rc, NULL);
4349
4350 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
4351 if (pPdptTemp->a[iPdpt].n.u1Present)
4352 {
4353 PX86PDPAE pPD;
4354 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
4355 AssertRCReturn(rc, NULL);
4356
4357 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
4358 return pPD;
4359 }
4360 }
4361 return NULL;
4362}
4363
4364#endif /* !IN_RC */
4365
4366/**
4367 * Gets the page state for a physical handler.
4368 *
4369 * @returns The physical handler page state.
4370 * @param pCur The physical handler in question.
4371 */
4372DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
4373{
4374 switch (pCur->enmType)
4375 {
4376 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
4377 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
4378
4379 case PGMPHYSHANDLERTYPE_MMIO:
4380 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
4381 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
4382
4383 default:
4384 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4385 }
4386}
4387
4388
4389/**
4390 * Gets the page state for a virtual handler.
4391 *
4392 * @returns The virtual handler page state.
4393 * @param pCur The virtual handler in question.
4394 * @remarks This should never be used on a hypervisor access handler.
4395 */
4396DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
4397{
4398 switch (pCur->enmType)
4399 {
4400 case PGMVIRTHANDLERTYPE_WRITE:
4401 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
4402 case PGMVIRTHANDLERTYPE_ALL:
4403 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
4404 default:
4405 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
4406 }
4407}
4408
4409
4410/**
4411 * Clears one physical page of a virtual handler
4412 *
4413 * @param pPGM Pointer to the PGM instance.
4414 * @param pCur Virtual handler structure
4415 * @param iPage Physical page index
4416 *
4417 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
4418 * need to care about other handlers in the same page.
4419 */
4420DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
4421{
4422 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
4423
4424 /*
4425 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
4426 */
4427#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4428 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4429 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4430 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4431#endif
4432 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
4433 {
4434 /* We're the head of the alias chain. */
4435 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
4436#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4437 AssertReleaseMsg(pRemove != NULL,
4438 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4439 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
4440 AssertReleaseMsg(pRemove == pPhys2Virt,
4441 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
4442 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4443 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
4444 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
4445#endif
4446 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
4447 {
4448 /* Insert the next list in the alias chain into the tree. */
4449 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4450#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4451 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
4452 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
4453 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
4454#endif
4455 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
4456 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
4457 AssertRelease(fRc);
4458 }
4459 }
4460 else
4461 {
4462 /* Locate the previous node in the alias chain. */
4463 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
4464#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4465 AssertReleaseMsg(pPrev != pPhys2Virt,
4466 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4467 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4468#endif
4469 for (;;)
4470 {
4471 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4472 if (pNext == pPhys2Virt)
4473 {
4474 /* unlink. */
4475 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
4476 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
4477 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
4478 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
4479 else
4480 {
4481 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
4482 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
4483 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
4484 }
4485 break;
4486 }
4487
4488 /* next */
4489 if (pNext == pPrev)
4490 {
4491#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
4492 AssertReleaseMsg(pNext != pPrev,
4493 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
4494 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
4495#endif
4496 break;
4497 }
4498 pPrev = pNext;
4499 }
4500 }
4501 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
4502 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
4503 pPhys2Virt->offNextAlias = 0;
4504 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
4505
4506 /*
4507 * Clear the ram flags for this page.
4508 */
4509 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
4510 AssertReturnVoid(pPage);
4511 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
4512}
4513
4514
4515/**
4516 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
4517 *
4518 * @returns Pointer to the shadow page structure.
4519 * @param pPool The pool.
4520 * @param idx The pool page index.
4521 */
4522DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
4523{
4524 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
4525 return &pPool->aPages[idx];
4526}
4527
4528
4529#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
4530/**
4531 * Clear references to guest physical memory.
4532 *
4533 * @param pPool The pool.
4534 * @param pPoolPage The pool page.
4535 * @param pPhysPage The physical guest page tracking structure.
4536 */
4537DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
4538{
4539 /*
4540 * Just deal with the simple case here.
4541 */
4542# ifdef LOG_ENABLED
4543 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
4544# endif
4545 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
4546 if (cRefs == 1)
4547 {
4548 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
4549 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
4550 }
4551 else
4552 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
4553 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
4554}
4555#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
4556
4557
4558#ifdef PGMPOOL_WITH_CACHE
4559/**
4560 * Moves the page to the head of the age list.
4561 *
4562 * This is done when the cached page is used in one way or another.
4563 *
4564 * @param pPool The pool.
4565 * @param pPage The cached page.
4566 */
4567DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4568{
4569 PVM pVM = pPool->CTX_SUFF(pVM);
4570 pgmLock(pVM);
4571
4572 /*
4573 * Move to the head of the age list.
4574 */
4575 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
4576 {
4577 /* unlink */
4578 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
4579 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
4580 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
4581 else
4582 pPool->iAgeTail = pPage->iAgePrev;
4583
4584 /* insert at head */
4585 pPage->iAgePrev = NIL_PGMPOOL_IDX;
4586 pPage->iAgeNext = pPool->iAgeHead;
4587 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
4588 pPool->iAgeHead = pPage->idx;
4589 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
4590 }
4591 pgmUnlock(pVM);
4592}
4593#endif /* PGMPOOL_WITH_CACHE */
4594
4595/**
4596 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
4597 *
4598 * @param pVM VM Handle.
4599 * @param pPage PGM pool page
4600 */
4601DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4602{
4603 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
4604 ASMAtomicIncU32(&pPage->cLocked);
4605}
4606
4607
4608/**
4609 * Unlocks a page to allow flushing again
4610 *
4611 * @param pVM VM Handle.
4612 * @param pPage PGM pool page
4613 */
4614DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
4615{
4616 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
4617 Assert(pPage->cLocked);
4618 ASMAtomicDecU32(&pPage->cLocked);
4619}
4620
4621
4622/**
4623 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
4624 *
4625 * @returns VBox status code.
4626 * @param pPage PGM pool page
4627 */
4628DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
4629{
4630 if (pPage->cLocked)
4631 {
4632 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
4633 if (pPage->cModifications)
4634 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
4635 return true;
4636 }
4637 return false;
4638}
4639
4640/**
4641 * Tells if mappings are to be put into the shadow page table or not
4642 *
4643 * @returns boolean result
4644 * @param pVM VM handle.
4645 */
4646DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
4647{
4648#ifdef IN_RING0
4649 /* There are no mappings in VT-x and AMD-V mode. */
4650 Assert(pPGM->fDisableMappings);
4651 return false;
4652#else
4653 return !pPGM->fDisableMappings;
4654#endif
4655}
4656
4657/** @} */
4658
4659#endif
4660
4661
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette