VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 13050

Last change on this file since 13050 was 13047, checked in by vboxsync, 16 years ago

AVLROGCPTRNODECORE: alignment adjustment.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 156.8 KB
Line 
1/* $Id: PGMInternal.h 13047 2008-10-07 13:24:42Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/avl.h>
40#include <iprt/assert.h>
41#include <iprt/critsect.h>
42
43
44
45/** @defgroup grp_pgm_int Internals
46 * @ingroup grp_pgm
47 * @internal
48 * @{
49 */
50
51
52/** @name PGM Compile Time Config
53 * @{
54 */
55
56/**
57 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
58 * Comment it if it will break something.
59 */
60#define PGM_OUT_OF_SYNC_IN_GC
61
62/**
63 * Check and skip global PDEs for non-global flushes
64 */
65#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
66
67/**
68 * Sync N pages instead of a whole page table
69 */
70#define PGM_SYNC_N_PAGES
71
72/**
73 * Number of pages to sync during a page fault
74 *
75 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
76 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
77 */
78#define PGM_SYNC_NR_PAGES 8
79
80/**
81 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
82 */
83#define PGM_MAX_PHYSCACHE_ENTRIES 64
84#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
85
86/**
87 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
88 */
89#define PGM_PHYSMEMACCESS_CACHING
90
91/** @def PGMPOOL_WITH_CACHE
92 * Enable agressive caching using the page pool.
93 *
94 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
95 */
96#define PGMPOOL_WITH_CACHE
97
98/** @def PGMPOOL_WITH_MIXED_PT_CR3
99 * When defined, we'll deal with 'uncachable' pages.
100 */
101#ifdef PGMPOOL_WITH_CACHE
102# define PGMPOOL_WITH_MIXED_PT_CR3
103#endif
104
105/** @def PGMPOOL_WITH_MONITORING
106 * Monitor the guest pages which are shadowed.
107 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
108 * be enabled as well.
109 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
110 */
111#ifdef PGMPOOL_WITH_CACHE
112# define PGMPOOL_WITH_MONITORING
113#endif
114
115/** @def PGMPOOL_WITH_GCPHYS_TRACKING
116 * Tracking the of shadow pages mapping guest physical pages.
117 *
118 * This is very expensive, the current cache prototype is trying to figure out
119 * whether it will be acceptable with an agressive caching policy.
120 */
121#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
122# define PGMPOOL_WITH_GCPHYS_TRACKING
123#endif
124
125/** @def PGMPOOL_WITH_USER_TRACKING
126 * Tracking users of shadow pages. This is required for the linking of shadow page
127 * tables and physical guest addresses.
128 */
129#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
130# define PGMPOOL_WITH_USER_TRACKING
131#endif
132
133/** @def PGMPOOL_CFG_MAX_GROW
134 * The maximum number of pages to add to the pool in one go.
135 */
136#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
137
138/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
139 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
140 */
141#ifdef VBOX_STRICT
142# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
143#endif
144/** @} */
145
146
147/** @name PDPT and PML4 flags.
148 * These are placed in the three bits available for system programs in
149 * the PDPT and PML4 entries.
150 * @{ */
151/** The entry is a permanent one and it's must always be present.
152 * Never free such an entry. */
153#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
154/** Mapping (hypervisor allocated pagetable). */
155#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
156/** @} */
157
158/** @name Page directory flags.
159 * These are placed in the three bits available for system programs in
160 * the page directory entries.
161 * @{ */
162/** Mapping (hypervisor allocated pagetable). */
163#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
164/** Made read-only to facilitate dirty bit tracking. */
165#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
166/** @} */
167
168/** @name Page flags.
169 * These are placed in the three bits available for system programs in
170 * the page entries.
171 * @{ */
172/** Made read-only to facilitate dirty bit tracking. */
173#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
174
175#ifndef PGM_PTFLAGS_CSAM_VALIDATED
176/** Scanned and approved by CSAM (tm).
177 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
178 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
179#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
180#endif
181/** @} */
182
183/** @name Defines used to indicate the shadow and guest paging in the templates.
184 * @{ */
185#define PGM_TYPE_REAL 1
186#define PGM_TYPE_PROT 2
187#define PGM_TYPE_32BIT 3
188#define PGM_TYPE_PAE 4
189#define PGM_TYPE_AMD64 5
190#define PGM_TYPE_NESTED 6
191#define PGM_TYPE_EPT 7
192#define PGM_TYPE_MAX PGM_TYPE_EPT
193/** @} */
194
195/** Macro for checking if the guest is using paging.
196 * @param uGstType PGM_TYPE_*
197 * @param uShwType PGM_TYPE_*
198 * @remark ASSUMES certain order of the PGM_TYPE_* values.
199 */
200#define PGM_WITH_PAGING(uGstType, uShwType) ((uGstType) >= PGM_TYPE_32BIT && (uShwType) != PGM_TYPE_NESTED && (uShwType) != PGM_TYPE_EPT)
201
202/** Macro for checking if the guest supports the NX bit.
203 * @param uGstType PGM_TYPE_*
204 * @param uShwType PGM_TYPE_*
205 * @remark ASSUMES certain order of the PGM_TYPE_* values.
206 */
207#define PGM_WITH_NX(uGstType, uShwType) ((uGstType) >= PGM_TYPE_PAE && (uShwType) != PGM_TYPE_NESTED && (uShwType) != PGM_TYPE_EPT)
208
209
210/** @def PGM_HCPHYS_2_PTR
211 * Maps a HC physical page pool address to a virtual address.
212 *
213 * @returns VBox status code.
214 * @param pVM The VM handle.
215 * @param HCPhys The HC physical address to map to a virtual one.
216 * @param ppv Where to store the virtual address. No need to cast this.
217 *
218 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
219 * small page window employeed by that function. Be careful.
220 * @remark There is no need to assert on the result.
221 */
222#ifdef IN_GC
223# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv))
224#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
225# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMR0DynMapHCPage(pVM, HCPhys, (void **)(ppv))
226#else
227# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
228#endif
229
230/** @def PGM_GCPHYS_2_PTR
231 * Maps a GC physical page address to a virtual address.
232 *
233 * @returns VBox status code.
234 * @param pVM The VM handle.
235 * @param GCPhys The GC physical address to map to a virtual one.
236 * @param ppv Where to store the virtual address. No need to cast this.
237 *
238 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
239 * small page window employeed by that function. Be careful.
240 * @remark There is no need to assert on the result.
241 */
242#ifdef IN_GC
243# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv))
244#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
245# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMR0DynMapGCPage(pVM, GCPhys, (void **)(ppv))
246#else
247# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
248#endif
249
250/** @def PGM_GCPHYS_2_PTR_EX
251 * Maps a unaligned GC physical page address to a virtual address.
252 *
253 * @returns VBox status code.
254 * @param pVM The VM handle.
255 * @param GCPhys The GC physical address to map to a virtual one.
256 * @param ppv Where to store the virtual address. No need to cast this.
257 *
258 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
259 * small page window employeed by that function. Be careful.
260 * @remark There is no need to assert on the result.
261 */
262#ifdef IN_GC
263# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
264#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
265# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMR0DynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
266#else
267# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
268#endif
269
270/** @def PGM_INVL_PG
271 * Invalidates a page when in GC does nothing in HC.
272 *
273 * @param GCVirt The virtual address of the page to invalidate.
274 */
275#ifdef IN_GC
276# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
277#elif defined(IN_RING0)
278# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
279#else
280# define PGM_INVL_PG(GCVirt) HWACCMInvalidatePage(pVM, (RTGCPTR)(GCVirt))
281#endif
282
283/** @def PGM_INVL_BIG_PG
284 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
285 *
286 * @param GCVirt The virtual address within the page directory to invalidate.
287 */
288#ifdef IN_GC
289# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
290#elif defined(IN_RING0)
291# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
292#else
293# define PGM_INVL_BIG_PG(GCVirt) HWACCMFlushTLB(pVM)
294#endif
295
296/** @def PGM_INVL_GUEST_TLBS()
297 * Invalidates all guest TLBs.
298 */
299#ifdef IN_GC
300# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
301#elif defined(IN_RING0)
302# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
303#else
304# define PGM_INVL_GUEST_TLBS() HWACCMFlushTLB(pVM)
305#endif
306
307
308/**
309 * Structure for tracking GC Mappings.
310 *
311 * This structure is used by linked list in both GC and HC.
312 */
313typedef struct PGMMAPPING
314{
315 /** Pointer to next entry. */
316 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
317 /** Pointer to next entry. */
318 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
319 /** Pointer to next entry. */
320 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
321#if GC_ARCH_BITS == 64
322 RTRCPTR padding0;
323#endif
324 /** Start Virtual address. */
325 RTGCUINTPTR GCPtr;
326 /** Last Virtual address (inclusive). */
327 RTGCUINTPTR GCPtrLast;
328 /** Range size (bytes). */
329 RTGCUINTPTR cb;
330 /** Pointer to relocation callback function. */
331 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
332 /** User argument to the callback. */
333 R3PTRTYPE(void *) pvUser;
334 /** Mapping description / name. For easing debugging. */
335 R3PTRTYPE(const char *) pszDesc;
336 /** Number of page tables. */
337 RTUINT cPTs;
338#if HC_ARCH_BITS != GC_ARCH_BITS || GC_ARCH_BITS == 64
339 RTUINT uPadding1; /**< Alignment padding. */
340#endif
341 /** Array of page table mapping data. Each entry
342 * describes one page table. The array can be longer
343 * than the declared length.
344 */
345 struct
346 {
347 /** The HC physical address of the page table. */
348 RTHCPHYS HCPhysPT;
349 /** The HC physical address of the first PAE page table. */
350 RTHCPHYS HCPhysPaePT0;
351 /** The HC physical address of the second PAE page table. */
352 RTHCPHYS HCPhysPaePT1;
353 /** The HC virtual address of the 32-bit page table. */
354 R3PTRTYPE(PX86PT) pPTR3;
355 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
356 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
357 /** The GC virtual address of the 32-bit page table. */
358 RCPTRTYPE(PX86PT) pPTRC;
359 /** The GC virtual address of the two PAE page table. */
360 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
361 /** The GC virtual address of the 32-bit page table. */
362 R0PTRTYPE(PX86PT) pPTR0;
363 /** The GC virtual address of the two PAE page table. */
364 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
365 } aPTs[1];
366} PGMMAPPING;
367/** Pointer to structure for tracking GC Mappings. */
368typedef struct PGMMAPPING *PPGMMAPPING;
369
370
371/**
372 * Physical page access handler structure.
373 *
374 * This is used to keep track of physical address ranges
375 * which are being monitored in some kind of way.
376 */
377typedef struct PGMPHYSHANDLER
378{
379 AVLROGCPHYSNODECORE Core;
380 /** Access type. */
381 PGMPHYSHANDLERTYPE enmType;
382 /** Number of pages to update. */
383 uint32_t cPages;
384 /** Pointer to R3 callback function. */
385 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
386 /** User argument for R3 handlers. */
387 R3PTRTYPE(void *) pvUserR3;
388 /** Pointer to R0 callback function. */
389 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
390 /** User argument for R0 handlers. */
391 R0PTRTYPE(void *) pvUserR0;
392 /** Pointer to GC callback function. */
393 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
394 /** User argument for RC handlers. */
395 RCPTRTYPE(void *) pvUserRC;
396 /** Description / Name. For easing debugging. */
397 R3PTRTYPE(const char *) pszDesc;
398#ifdef VBOX_WITH_STATISTICS
399 /** Profiling of this handler. */
400 STAMPROFILE Stat;
401#endif
402} PGMPHYSHANDLER;
403/** Pointer to a physical page access handler structure. */
404typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
405
406
407/**
408 * Cache node for the physical addresses covered by a virtual handler.
409 */
410typedef struct PGMPHYS2VIRTHANDLER
411{
412 /** Core node for the tree based on physical ranges. */
413 AVLROGCPHYSNODECORE Core;
414 /** Offset from this struct to the PGMVIRTHANDLER structure. */
415 int32_t offVirtHandler;
416 /** Offset of the next alias relative to this one.
417 * Bit 0 is used for indicating whether we're in the tree.
418 * Bit 1 is used for indicating that we're the head node.
419 */
420 int32_t offNextAlias;
421} PGMPHYS2VIRTHANDLER;
422/** Pointer to a phys to virtual handler structure. */
423typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
424
425/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
426 * node is in the tree. */
427#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
428/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
429 * node is in the head of an alias chain.
430 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
431#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
432/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
433#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
434
435
436/**
437 * Virtual page access handler structure.
438 *
439 * This is used to keep track of virtual address ranges
440 * which are being monitored in some kind of way.
441 */
442typedef struct PGMVIRTHANDLER
443{
444 /** Core node for the tree based on virtual ranges. */
445 AVLROGCPTRNODECORE Core;
446 /** Size of the range (in bytes). */
447 RTGCUINTPTR cb;
448 /** Number of cache pages. */
449 uint32_t cPages;
450 /** Access type. */
451 PGMVIRTHANDLERTYPE enmType;
452 /** Pointer to the RC callback function. */
453 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
454#if HC_ARCH_BITS == 64
455 RTRCPTR padding;
456#endif
457 /** Pointer to the R3 callback function for invalidation. */
458 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
459 /** Pointer to the R3 callback function. */
460 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
461 /** Description / Name. For easing debugging. */
462 R3PTRTYPE(const char *) pszDesc;
463#ifdef VBOX_WITH_STATISTICS
464 /** Profiling of this handler. */
465 STAMPROFILE Stat;
466#endif
467 /** Array of cached physical addresses for the monitored ranged. */
468 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
469} PGMVIRTHANDLER;
470/** Pointer to a virtual page access handler structure. */
471typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
472
473
474/**
475 * Page type.
476 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
477 * @todo convert to \#defines.
478 */
479typedef enum PGMPAGETYPE
480{
481 /** The usual invalid zero entry. */
482 PGMPAGETYPE_INVALID = 0,
483 /** RAM page. (RWX) */
484 PGMPAGETYPE_RAM,
485 /** MMIO2 page. (RWX) */
486 PGMPAGETYPE_MMIO2,
487 /** Shadowed ROM. (RWX) */
488 PGMPAGETYPE_ROM_SHADOW,
489 /** ROM page. (R-X) */
490 PGMPAGETYPE_ROM,
491 /** MMIO page. (---) */
492 PGMPAGETYPE_MMIO,
493 /** End of valid entries. */
494 PGMPAGETYPE_END
495} PGMPAGETYPE;
496AssertCompile(PGMPAGETYPE_END < 7);
497
498/** @name Page type predicates.
499 * @{ */
500#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
501#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
502#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
503#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
504#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
505/** @} */
506
507
508/**
509 * A Physical Guest Page tracking structure.
510 *
511 * The format of this structure is complicated because we have to fit a lot
512 * of information into as few bits as possible. The format is also subject
513 * to change (there is one comming up soon). Which means that for we'll be
514 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
515 * accessess to the structure.
516 */
517typedef struct PGMPAGE
518{
519 /** The physical address and a whole lot of other stuff. All bits are used! */
520 RTHCPHYS HCPhys;
521 /** The page state. */
522 uint32_t u2StateX : 2;
523 /** Flag indicating that a write monitored page was written to when set. */
524 uint32_t fWrittenToX : 1;
525 /** For later. */
526 uint32_t fSomethingElse : 1;
527 /** The Page ID.
528 * @todo Merge with HCPhys once we've liberated HCPhys of its stuff.
529 * The HCPhys will be 100% static. */
530 uint32_t idPageX : 28;
531 /** The page type (PGMPAGETYPE). */
532 uint32_t u3Type : 3;
533 /** The physical handler state (PGM_PAGE_HNDL_PHYS_STATE*) */
534 uint32_t u2HandlerPhysStateX : 2;
535 /** The virtual handler state (PGM_PAGE_HNDL_VIRT_STATE*) */
536 uint32_t u2HandlerVirtStateX : 2;
537 uint32_t u29B : 25;
538} PGMPAGE;
539AssertCompileSize(PGMPAGE, 16);
540/** Pointer to a physical guest page. */
541typedef PGMPAGE *PPGMPAGE;
542/** Pointer to a const physical guest page. */
543typedef const PGMPAGE *PCPGMPAGE;
544/** Pointer to a physical guest page pointer. */
545typedef PPGMPAGE *PPPGMPAGE;
546
547
548/**
549 * Clears the page structure.
550 * @param pPage Pointer to the physical guest page tracking structure.
551 */
552#define PGM_PAGE_CLEAR(pPage) \
553 do { \
554 (pPage)->HCPhys = 0; \
555 (pPage)->u2StateX = 0; \
556 (pPage)->fWrittenToX = 0; \
557 (pPage)->fSomethingElse = 0; \
558 (pPage)->idPageX = 0; \
559 (pPage)->u3Type = 0; \
560 (pPage)->u29B = 0; \
561 } while (0)
562
563/**
564 * Initializes the page structure.
565 * @param pPage Pointer to the physical guest page tracking structure.
566 */
567#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
568 do { \
569 (pPage)->HCPhys = (_HCPhys); \
570 (pPage)->u2StateX = (_uState); \
571 (pPage)->fWrittenToX = 0; \
572 (pPage)->fSomethingElse = 0; \
573 (pPage)->idPageX = (_idPage); \
574 /*(pPage)->u3Type = (_uType); - later */ \
575 PGM_PAGE_SET_TYPE(pPage, _uType); \
576 (pPage)->u29B = 0; \
577 } while (0)
578
579/**
580 * Initializes the page structure of a ZERO page.
581 * @param pPage Pointer to the physical guest page tracking structure.
582 */
583#ifdef VBOX_WITH_NEW_PHYS_CODE
584# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
585 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
586#else
587# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
588 PGM_PAGE_INIT(pPage, 0, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
589#endif
590/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
591# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType) \
592 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
593
594
595/** @name The Page state, PGMPAGE::u2StateX.
596 * @{ */
597/** The zero page.
598 * This is a per-VM page that's never ever mapped writable. */
599#define PGM_PAGE_STATE_ZERO 0
600/** A allocated page.
601 * This is a per-VM page allocated from the page pool (or wherever
602 * we get MMIO2 pages from if the type is MMIO2).
603 */
604#define PGM_PAGE_STATE_ALLOCATED 1
605/** A allocated page that's being monitored for writes.
606 * The shadow page table mappings are read-only. When a write occurs, the
607 * fWrittenTo member is set, the page remapped as read-write and the state
608 * moved back to allocated. */
609#define PGM_PAGE_STATE_WRITE_MONITORED 2
610/** The page is shared, aka. copy-on-write.
611 * This is a page that's shared with other VMs. */
612#define PGM_PAGE_STATE_SHARED 3
613/** @} */
614
615
616/**
617 * Gets the page state.
618 * @returns page state (PGM_PAGE_STATE_*).
619 * @param pPage Pointer to the physical guest page tracking structure.
620 */
621#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->u2StateX )
622
623/**
624 * Sets the page state.
625 * @param pPage Pointer to the physical guest page tracking structure.
626 * @param _uState The new page state.
627 */
628#define PGM_PAGE_SET_STATE(pPage, _uState) \
629 do { (pPage)->u2StateX = (_uState); } while (0)
630
631
632/**
633 * Gets the host physical address of the guest page.
634 * @returns host physical address (RTHCPHYS).
635 * @param pPage Pointer to the physical guest page tracking structure.
636 */
637#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhys & UINT64_C(0x0000fffffffff000) )
638
639/**
640 * Sets the host physical address of the guest page.
641 * @param pPage Pointer to the physical guest page tracking structure.
642 * @param _HCPhys The new host physical address.
643 */
644#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
645 do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0xffff000000000fff)) \
646 | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
647
648/**
649 * Get the Page ID.
650 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
651 * @param pPage Pointer to the physical guest page tracking structure.
652 */
653#define PGM_PAGE_GET_PAGEID(pPage) ( (pPage)->idPageX )
654/* later:
655#define PGM_PAGE_GET_PAGEID(pPage) ( ((uint32_t)(pPage)->HCPhys >> (48 - 12))
656 | ((uint32_t)(pPage)->HCPhys & 0xfff) )
657*/
658/**
659 * Sets the Page ID.
660 * @param pPage Pointer to the physical guest page tracking structure.
661 */
662#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPageX = (_idPage); } while (0)
663/* later:
664#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0x0000fffffffff000)) \
665 | ((_idPage) & 0xfff) \
666 | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
667*/
668
669/**
670 * Get the Chunk ID.
671 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
672 * @param pPage Pointer to the physical guest page tracking structure.
673 */
674#define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPageX >> GMM_CHUNKID_SHIFT )
675/* later:
676#if GMM_CHUNKID_SHIFT == 12
677# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> 48) )
678#elif GMM_CHUNKID_SHIFT > 12
679# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
680#elif GMM_CHUNKID_SHIFT < 12
681# define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhys >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \
682 | ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GMM_CHUNKID_SHIFT ) )
683#else
684# error "GMM_CHUNKID_SHIFT isn't defined or something."
685#endif
686*/
687
688/**
689 * Get the index of the page within the allocaiton chunk.
690 * @returns The page index.
691 * @param pPage Pointer to the physical guest page tracking structure.
692 */
693#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPageX & GMM_PAGEID_IDX_MASK )
694/* later:
695#if GMM_CHUNKID_SHIFT <= 12
696# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & GMM_PAGEID_IDX_MASK) )
697#else
698# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & 0xfff) \
699 | ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
700#endif
701*/
702
703
704/**
705 * Gets the page type.
706 * @returns The page type.
707 * @param pPage Pointer to the physical guest page tracking structure.
708 */
709#define PGM_PAGE_GET_TYPE(pPage) (pPage)->u3Type
710
711/**
712 * Sets the page type.
713 * @param pPage Pointer to the physical guest page tracking structure.
714 * @param _enmType The new page type (PGMPAGETYPE).
715 */
716#ifdef VBOX_WITH_NEW_PHYS_CODE
717#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
718 do { (pPage)->u3Type = (_enmType); } while (0)
719#else
720#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
721 do { \
722 (pPage)->u3Type = (_enmType); \
723 if ((_enmType) == PGMPAGETYPE_ROM) \
724 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM; \
725 else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \
726 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \
727 else if ((_enmType) == PGMPAGETYPE_MMIO2) \
728 (pPage)->HCPhys |= MM_RAM_FLAGS_MMIO2; \
729 } while (0)
730#endif
731
732
733/**
734 * Checks if the page is 'reserved'.
735 * @returns true/false.
736 * @param pPage Pointer to the physical guest page tracking structure.
737 */
738#define PGM_PAGE_IS_RESERVED(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_RESERVED) )
739
740/**
741 * Checks if the page is marked for MMIO.
742 * @returns true/false.
743 * @param pPage Pointer to the physical guest page tracking structure.
744 */
745#define PGM_PAGE_IS_MMIO(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_MMIO) )
746
747/**
748 * Checks if the page is backed by the ZERO page.
749 * @returns true/false.
750 * @param pPage Pointer to the physical guest page tracking structure.
751 */
752#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_ZERO )
753
754/**
755 * Checks if the page is backed by a SHARED page.
756 * @returns true/false.
757 * @param pPage Pointer to the physical guest page tracking structure.
758 */
759#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
760
761
762/**
763 * Marks the paget as written to (for GMM change monitoring).
764 * @param pPage Pointer to the physical guest page tracking structure.
765 */
766#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 1; } while (0)
767
768/**
769 * Clears the written-to indicator.
770 * @param pPage Pointer to the physical guest page tracking structure.
771 */
772#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 0; } while (0)
773
774/**
775 * Checks if the page was marked as written-to.
776 * @returns true/false.
777 * @param pPage Pointer to the physical guest page tracking structure.
778 */
779#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( (pPage)->fWrittenToX )
780
781
782/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateX).
783 *
784 * @remarks The values are assigned in order of priority, so we can calculate
785 * the correct state for a page with different handlers installed.
786 * @{ */
787/** No handler installed. */
788#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
789/** Monitoring is temporarily disabled. */
790#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
791/** Write access is monitored. */
792#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
793/** All access is monitored. */
794#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
795/** @} */
796
797/**
798 * Gets the physical access handler state of a page.
799 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
800 * @param pPage Pointer to the physical guest page tracking structure.
801 */
802#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) ( (pPage)->u2HandlerPhysStateX )
803
804/**
805 * Sets the physical access handler state of a page.
806 * @param pPage Pointer to the physical guest page tracking structure.
807 * @param _uState The new state value.
808 */
809#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
810 do { (pPage)->u2HandlerPhysStateX = (_uState); } while (0)
811
812/**
813 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
814 * @returns true/false
815 * @param pPage Pointer to the physical guest page tracking structure.
816 */
817#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE )
818
819/**
820 * Checks if the page has any active physical access handlers.
821 * @returns true/false
822 * @param pPage Pointer to the physical guest page tracking structure.
823 */
824#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
825
826
827/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateX).
828 *
829 * @remarks The values are assigned in order of priority, so we can calculate
830 * the correct state for a page with different handlers installed.
831 * @{ */
832/** No handler installed. */
833#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
834/* 1 is reserved so the lineup is identical with the physical ones. */
835/** Write access is monitored. */
836#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
837/** All access is monitored. */
838#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
839/** @} */
840
841/**
842 * Gets the virtual access handler state of a page.
843 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
844 * @param pPage Pointer to the physical guest page tracking structure.
845 */
846#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u2HandlerVirtStateX )
847
848/**
849 * Sets the virtual access handler state of a page.
850 * @param pPage Pointer to the physical guest page tracking structure.
851 * @param _uState The new state value.
852 */
853#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
854 do { (pPage)->u2HandlerVirtStateX = (_uState); } while (0)
855
856/**
857 * Checks if the page has any virtual access handlers.
858 * @returns true/false
859 * @param pPage Pointer to the physical guest page tracking structure.
860 */
861#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ( (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
862
863/**
864 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
865 * virtual handlers.
866 * @returns true/false
867 * @param pPage Pointer to the physical guest page tracking structure.
868 */
869#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
870
871
872
873/**
874 * Checks if the page has any access handlers, including temporarily disabled ones.
875 * @returns true/false
876 * @param pPage Pointer to the physical guest page tracking structure.
877 */
878#define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
879 ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE \
880 || (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
881
882/**
883 * Checks if the page has any active access handlers.
884 * @returns true/false
885 * @param pPage Pointer to the physical guest page tracking structure.
886 */
887#define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
888 ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
889 || (pPage)->u2HandlerVirtStateX >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
890
891/**
892 * Checks if the page has any active access handlers catching all accesses.
893 * @returns true/false
894 * @param pPage Pointer to the physical guest page tracking structure.
895 */
896#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
897 ( (pPage)->u2HandlerPhysStateX == PGM_PAGE_HNDL_PHYS_STATE_ALL \
898 || (pPage)->u2HandlerVirtStateX == PGM_PAGE_HNDL_VIRT_STATE_ALL )
899
900
901/**
902 * Ram range for GC Phys to HC Phys conversion.
903 *
904 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
905 * conversions too, but we'll let MM handle that for now.
906 *
907 * This structure is used by linked lists in both GC and HC.
908 */
909typedef struct PGMRAMRANGE
910{
911 /** Pointer to the next RAM range - for R3. */
912 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
913 /** Pointer to the next RAM range - for R0. */
914 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
915 /** Pointer to the next RAM range - for RC. */
916 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
917 /** Pointer alignment. */
918 RTRCPTR RCPtrAlignment;
919 /** Start of the range. Page aligned. */
920 RTGCPHYS GCPhys;
921 /** Last address in the range (inclusive). Page aligned (-1). */
922 RTGCPHYS GCPhysLast;
923 /** Size of the range. (Page aligned of course). */
924 RTGCPHYS cb;
925 /** MM_RAM_* flags */
926 uint32_t fFlags;
927 uint32_t u32Alignment; /**< alignment. */
928#ifndef VBOX_WITH_NEW_PHYS_CODE
929 /** HC virtual lookup ranges for chunks - R3/R0 Ptr.
930 * Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
931 R3R0PTRTYPE(PRTR3UINTPTR) paChunkR3Ptrs;
932#endif
933 /** Start of the HC mapping of the range. This is only used for MMIO2. */
934 R3PTRTYPE(void *) pvR3;
935 /** The range description. */
936 R3PTRTYPE(const char *) pszDesc;
937
938 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
939#ifdef VBOX_WITH_NEW_PHYS_CODE
940 uint32_t au32Reserved[2];
941#elif HC_ARCH_BITS == 32
942 uint32_t au32Reserved[1];
943#endif
944
945 /** Array of physical guest page tracking structures. */
946 PGMPAGE aPages[1];
947} PGMRAMRANGE;
948/** Pointer to Ram range for GC Phys to HC Phys conversion. */
949typedef PGMRAMRANGE *PPGMRAMRANGE;
950
951/** Return hc ptr corresponding to the ram range and physical offset */
952#define PGMRAMRANGE_GETHCPTR(pRam, off) \
953 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((pRam)->paChunkR3Ptrs[(off) >> PGM_DYNAMIC_CHUNK_SHIFT] + ((off) & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
954 : (RTHCPTR)((RTR3UINTPTR)(pRam)->pvR3 + (off));
955
956/**
957 * Per page tracking structure for ROM image.
958 *
959 * A ROM image may have a shadow page, in which case we may have
960 * two pages backing it. This structure contains the PGMPAGE for
961 * both while PGMRAMRANGE have a copy of the active one. It is
962 * important that these aren't out of sync in any regard other
963 * than page pool tracking data.
964 */
965typedef struct PGMROMPAGE
966{
967 /** The page structure for the virgin ROM page. */
968 PGMPAGE Virgin;
969 /** The page structure for the shadow RAM page. */
970 PGMPAGE Shadow;
971 /** The current protection setting. */
972 PGMROMPROT enmProt;
973 /** Pad the structure size to a multiple of 8. */
974 uint32_t u32Padding;
975} PGMROMPAGE;
976/** Pointer to a ROM page tracking structure. */
977typedef PGMROMPAGE *PPGMROMPAGE;
978
979
980/**
981 * A registered ROM image.
982 *
983 * This is needed to keep track of ROM image since they generally
984 * intrude into a PGMRAMRANGE. It also keeps track of additional
985 * info like the two page sets (read-only virgin and read-write shadow),
986 * the current state of each page.
987 *
988 * Because access handlers cannot easily be executed in a different
989 * context, the ROM ranges needs to be accessible and in all contexts.
990 */
991typedef struct PGMROMRANGE
992{
993 /** Pointer to the next range - R3. */
994 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
995 /** Pointer to the next range - R0. */
996 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
997 /** Pointer to the next range - GC. */
998 RCPTRTYPE(struct PGMROMRANGE *) pNextGC;
999 /** Pointer alignment */
1000 RTRCPTR GCPtrAlignment;
1001 /** Address of the range. */
1002 RTGCPHYS GCPhys;
1003 /** Address of the last byte in the range. */
1004 RTGCPHYS GCPhysLast;
1005 /** Size of the range. */
1006 RTGCPHYS cb;
1007 /** The flags (PGMPHYS_ROM_FLAG_*). */
1008 uint32_t fFlags;
1009 /**< Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1010 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 7 : 3];
1011 /** Pointer to the original bits when PGMPHYS_ROM_FLAG_PERMANENT_BINARY was specified.
1012 * This is used for strictness checks. */
1013 R3PTRTYPE(const void *) pvOriginal;
1014 /** The ROM description. */
1015 R3PTRTYPE(const char *) pszDesc;
1016 /** The per page tracking structures. */
1017 PGMROMPAGE aPages[1];
1018} PGMROMRANGE;
1019/** Pointer to a ROM range. */
1020typedef PGMROMRANGE *PPGMROMRANGE;
1021
1022
1023/**
1024 * A registered MMIO2 (= Device RAM) range.
1025 *
1026 * There are a few reason why we need to keep track of these
1027 * registrations. One of them is the deregistration & cleanup
1028 * stuff, while another is that the PGMRAMRANGE associated with
1029 * such a region may have to be removed from the ram range list.
1030 *
1031 * Overlapping with a RAM range has to be 100% or none at all. The
1032 * pages in the existing RAM range must not be ROM nor MMIO. A guru
1033 * meditation will be raised if a partial overlap or an overlap of
1034 * ROM pages is encountered. On an overlap we will free all the
1035 * existing RAM pages and put in the ram range pages instead.
1036 */
1037typedef struct PGMMMIO2RANGE
1038{
1039 /** The owner of the range. (a device) */
1040 PPDMDEVINSR3 pDevInsR3;
1041 /** Pointer to the ring-3 mapping of the allocation. */
1042 RTR3PTR pvR3;
1043 /** Pointer to the next range - R3. */
1044 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1045 /** Whether it's mapped or not. */
1046 bool fMapped;
1047 /** Whether it's overlapping or not. */
1048 bool fOverlapping;
1049 /** The PCI region number.
1050 * @remarks This ASSUMES that nobody will ever really need to have multiple
1051 * PCI devices with matching MMIO region numbers on a single device. */
1052 uint8_t iRegion;
1053 /**< Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1054 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5];
1055 /** The associated RAM range. */
1056 PGMRAMRANGE RamRange;
1057} PGMMMIO2RANGE;
1058/** Pointer to a MMIO2 range. */
1059typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1060
1061
1062
1063
1064/** @todo r=bird: fix typename. */
1065/**
1066 * PGMPhysRead/Write cache entry
1067 */
1068typedef struct PGMPHYSCACHE_ENTRY
1069{
1070 /** HC pointer to physical page */
1071 R3PTRTYPE(uint8_t *) pbHC;
1072 /** GC Physical address for cache entry */
1073 RTGCPHYS GCPhys;
1074#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1075 RTGCPHYS u32Padding0; /**< alignment padding. */
1076#endif
1077} PGMPHYSCACHE_ENTRY;
1078
1079/**
1080 * PGMPhysRead/Write cache to reduce REM memory access overhead
1081 */
1082typedef struct PGMPHYSCACHE
1083{
1084 /** Bitmap of valid cache entries */
1085 uint64_t aEntries;
1086 /** Cache entries */
1087 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1088} PGMPHYSCACHE;
1089
1090
1091/** Pointer to an allocation chunk ring-3 mapping. */
1092typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1093/** Pointer to an allocation chunk ring-3 mapping pointer. */
1094typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1095
1096/**
1097 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1098 *
1099 * The primary tree (Core) uses the chunk id as key.
1100 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1101 */
1102typedef struct PGMCHUNKR3MAP
1103{
1104 /** The key is the chunk id. */
1105 AVLU32NODECORE Core;
1106 /** The key is the ageing sequence number. */
1107 AVLLU32NODECORE AgeCore;
1108 /** The current age thingy. */
1109 uint32_t iAge;
1110 /** The current reference count. */
1111 uint32_t volatile cRefs;
1112 /** The current permanent reference count. */
1113 uint32_t volatile cPermRefs;
1114 /** The mapping address. */
1115 void *pv;
1116} PGMCHUNKR3MAP;
1117
1118/**
1119 * Allocation chunk ring-3 mapping TLB entry.
1120 */
1121typedef struct PGMCHUNKR3MAPTLBE
1122{
1123 /** The chunk id. */
1124 uint32_t volatile idChunk;
1125#if HC_ARCH_BITS == 64
1126 uint32_t u32Padding; /**< alignment padding. */
1127#endif
1128 /** The chunk map. */
1129 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1130} PGMCHUNKR3MAPTLBE;
1131/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1132typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1133
1134/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1135 * @remark Must be a power of two value. */
1136#define PGM_CHUNKR3MAPTLB_ENTRIES 32
1137
1138/**
1139 * Allocation chunk ring-3 mapping TLB.
1140 *
1141 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1142 * At first glance this might look kinda odd since AVL trees are
1143 * supposed to give the most optimial lookup times of all trees
1144 * due to their balancing. However, take a tree with 1023 nodes
1145 * in it, that's 10 levels, meaning that most searches has to go
1146 * down 9 levels before they find what they want. This isn't fast
1147 * compared to a TLB hit. There is the factor of cache misses,
1148 * and of course the problem with trees and branch prediction.
1149 * This is why we use TLBs in front of most of the trees.
1150 *
1151 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1152 * difficult when we switch to inlined AVL trees (from kStuff).
1153 */
1154typedef struct PGMCHUNKR3MAPTLB
1155{
1156 /** The TLB entries. */
1157 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1158} PGMCHUNKR3MAPTLB;
1159
1160/**
1161 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1162 * @returns Chunk TLB index.
1163 * @param idChunk The Chunk ID.
1164 */
1165#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1166
1167
1168/**
1169 * Ring-3 guest page mapping TLB entry.
1170 * @remarks used in ring-0 as well at the moment.
1171 */
1172typedef struct PGMPAGER3MAPTLBE
1173{
1174 /** Address of the page. */
1175 RTGCPHYS volatile GCPhys;
1176 /** The guest page. */
1177 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1178 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1179 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1180 /** The address */
1181 R3R0PTRTYPE(void *) volatile pv;
1182#if HC_ARCH_BITS == 32
1183 uint32_t u32Padding; /**< alignment padding. */
1184#endif
1185} PGMPAGER3MAPTLBE;
1186/** Pointer to an entry in the HC physical TLB. */
1187typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1188
1189
1190/** The number of entries in the ring-3 guest page mapping TLB.
1191 * @remarks The value must be a power of two. */
1192#define PGM_PAGER3MAPTLB_ENTRIES 64
1193
1194/**
1195 * Ring-3 guest page mapping TLB.
1196 * @remarks used in ring-0 as well at the moment.
1197 */
1198typedef struct PGMPAGER3MAPTLB
1199{
1200 /** The TLB entries. */
1201 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1202} PGMPAGER3MAPTLB;
1203/** Pointer to the ring-3 guest page mapping TLB. */
1204typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1205
1206/**
1207 * Calculates the index of the TLB entry for the specified guest page.
1208 * @returns Physical TLB index.
1209 * @param GCPhys The guest physical address.
1210 */
1211#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1212
1213
1214/** @name Context neutrual page mapper TLB.
1215 *
1216 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1217 * code is writting in a kind of context neutrual way. Time will show whether
1218 * this actually makes sense or not...
1219 *
1220 * @{ */
1221/** @typedef PPGMPAGEMAPTLB
1222 * The page mapper TLB pointer type for the current context. */
1223/** @typedef PPGMPAGEMAPTLB
1224 * The page mapper TLB entry pointer type for the current context. */
1225/** @typedef PPGMPAGEMAPTLB
1226 * The page mapper TLB entry pointer pointer type for the current context. */
1227/** @def PGM_PAGEMAPTLB_ENTRIES
1228 * The number of TLB entries in the page mapper TLB for the current context. */
1229/** @def PGM_PAGEMAPTLB_IDX
1230 * Calculate the TLB index for a guest physical address.
1231 * @returns The TLB index.
1232 * @param GCPhys The guest physical address. */
1233/** @typedef PPGMPAGEMAP
1234 * Pointer to a page mapper unit for current context. */
1235/** @typedef PPPGMPAGEMAP
1236 * Pointer to a page mapper unit pointer for current context. */
1237#ifdef IN_GC
1238// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1239// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1240// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1241# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1242# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1243 typedef void * PPGMPAGEMAP;
1244 typedef void ** PPPGMPAGEMAP;
1245//#elif IN_RING0
1246// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1247// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1248// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1249//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1250//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1251// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1252// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1253#else
1254 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1255 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1256 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1257# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1258# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1259 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1260 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1261#endif
1262/** @} */
1263
1264
1265/** @name PGM Pool Indexes.
1266 * Aka. the unique shadow page identifier.
1267 * @{ */
1268/** NIL page pool IDX. */
1269#define NIL_PGMPOOL_IDX 0
1270/** The first normal index. */
1271#define PGMPOOL_IDX_FIRST_SPECIAL 1
1272/** Page directory (32-bit root). */
1273#define PGMPOOL_IDX_PD 1
1274/** The extended PAE page directory (2048 entries, works as root currently). */
1275#define PGMPOOL_IDX_PAE_PD 2
1276/** PAE Page Directory Table 0. */
1277#define PGMPOOL_IDX_PAE_PD_0 3
1278/** PAE Page Directory Table 1. */
1279#define PGMPOOL_IDX_PAE_PD_1 4
1280/** PAE Page Directory Table 2. */
1281#define PGMPOOL_IDX_PAE_PD_2 5
1282/** PAE Page Directory Table 3. */
1283#define PGMPOOL_IDX_PAE_PD_3 6
1284/** Page Directory Pointer Table (PAE root, not currently used). */
1285#define PGMPOOL_IDX_PDPT 7
1286/** AMD64 CR3 level index.*/
1287#define PGMPOOL_IDX_AMD64_CR3 8
1288/** Nested paging root.*/
1289#define PGMPOOL_IDX_NESTED_ROOT 9
1290/** The first normal index. */
1291#define PGMPOOL_IDX_FIRST 10
1292/** The last valid index. (inclusive, 14 bits) */
1293#define PGMPOOL_IDX_LAST 0x3fff
1294/** @} */
1295
1296/** The NIL index for the parent chain. */
1297#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1298
1299/**
1300 * Node in the chain linking a shadowed page to it's parent (user).
1301 */
1302#pragma pack(1)
1303typedef struct PGMPOOLUSER
1304{
1305 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1306 uint16_t iNext;
1307 /** The user page index. */
1308 uint16_t iUser;
1309 /** Index into the user table. */
1310 uint32_t iUserTable;
1311} PGMPOOLUSER, *PPGMPOOLUSER;
1312typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1313#pragma pack()
1314
1315
1316/** The NIL index for the phys ext chain. */
1317#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1318
1319/**
1320 * Node in the chain of physical cross reference extents.
1321 */
1322#pragma pack(1)
1323typedef struct PGMPOOLPHYSEXT
1324{
1325 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1326 uint16_t iNext;
1327 /** The user page index. */
1328 uint16_t aidx[3];
1329} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1330typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1331#pragma pack()
1332
1333
1334/**
1335 * The kind of page that's being shadowed.
1336 */
1337typedef enum PGMPOOLKIND
1338{
1339 /** The virtual invalid 0 entry. */
1340 PGMPOOLKIND_INVALID = 0,
1341 /** The entry is free (=unused). */
1342 PGMPOOLKIND_FREE,
1343
1344 /** Shw: 32-bit page table; Gst: no paging */
1345 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1346 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1347 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1348 /** Shw: 32-bit page table; Gst: 4MB page. */
1349 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1350 /** Shw: PAE page table; Gst: no paging */
1351 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1352 /** Shw: PAE page table; Gst: 32-bit page table. */
1353 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1354 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1355 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1356 /** Shw: PAE page table; Gst: PAE page table. */
1357 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1358 /** Shw: PAE page table; Gst: 2MB page. */
1359 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1360
1361 /** Shw: PAE page directory; Gst: 32-bit page directory. */
1362 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD,
1363 /** Shw: PAE page directory; Gst: PAE page directory. */
1364 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1365
1366 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1367 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1368 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1369 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1370 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1371 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1372 /** Shw: 64-bit page directory table; Gst: no paging */
1373 PGMPOOLKIND_64BIT_PD_FOR_PHYS,
1374
1375 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1376 PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4,
1377
1378 /** Shw: EPT page directory pointer table; Gst: no paging */
1379 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1380 /** Shw: EPT page directory table; Gst: no paging */
1381 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1382 /** Shw: EPT page table; Gst: no paging */
1383 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1384
1385 /** Shw: Root 32-bit page directory. */
1386 PGMPOOLKIND_ROOT_32BIT_PD,
1387 /** Shw: Root PAE page directory */
1388 PGMPOOLKIND_ROOT_PAE_PD,
1389 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
1390 PGMPOOLKIND_ROOT_PDPT,
1391 /** Shw: Root Nested paging table. */
1392 PGMPOOLKIND_ROOT_NESTED,
1393
1394 /** The last valid entry. */
1395 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1396} PGMPOOLKIND;
1397
1398
1399/**
1400 * The tracking data for a page in the pool.
1401 */
1402typedef struct PGMPOOLPAGE
1403{
1404 /** AVL node code with the (HC) physical address of this page. */
1405 AVLOHCPHYSNODECORE Core;
1406 /** Pointer to the HC mapping of the page. */
1407 R3R0PTRTYPE(void *) pvPageHC;
1408 /** The guest physical address. */
1409#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1410 uint32_t Alignment0;
1411#endif
1412 RTGCPHYS GCPhys;
1413 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1414 uint8_t enmKind;
1415 uint8_t bPadding;
1416 /** The index of this page. */
1417 uint16_t idx;
1418 /** The next entry in the list this page currently resides in.
1419 * It's either in the free list or in the GCPhys hash. */
1420 uint16_t iNext;
1421#ifdef PGMPOOL_WITH_USER_TRACKING
1422 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1423 uint16_t iUserHead;
1424 /** The number of present entries. */
1425 uint16_t cPresent;
1426 /** The first entry in the table which is present. */
1427 uint16_t iFirstPresent;
1428#endif
1429#ifdef PGMPOOL_WITH_MONITORING
1430 /** The number of modifications to the monitored page. */
1431 uint16_t cModifications;
1432 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1433 uint16_t iModifiedNext;
1434 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1435 uint16_t iModifiedPrev;
1436 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1437 uint16_t iMonitoredNext;
1438 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1439 uint16_t iMonitoredPrev;
1440#endif
1441#ifdef PGMPOOL_WITH_CACHE
1442 /** The next page in the age list. */
1443 uint16_t iAgeNext;
1444 /** The previous page in the age list. */
1445 uint16_t iAgePrev;
1446#endif /* PGMPOOL_WITH_CACHE */
1447 /** Used to indicate that the page is zeroed. */
1448 bool fZeroed;
1449 /** Used to indicate that a PT has non-global entries. */
1450 bool fSeenNonGlobal;
1451 /** Used to indicate that we're monitoring writes to the guest page. */
1452 bool fMonitored;
1453 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1454 * (All pages are in the age list.) */
1455 bool fCached;
1456 /** This is used by the R3 access handlers when invoked by an async thread.
1457 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1458 bool volatile fReusedFlushPending;
1459 /** Used to indicate that the guest is mapping the page is also used as a CR3.
1460 * In these cases the access handler acts differently and will check
1461 * for mapping conflicts like the normal CR3 handler.
1462 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
1463 * replaced by a list of pages which share access handler.
1464 */
1465 bool fCR3Mix;
1466} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1467
1468
1469#ifdef PGMPOOL_WITH_CACHE
1470/** The hash table size. */
1471# define PGMPOOL_HASH_SIZE 0x40
1472/** The hash function. */
1473# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1474#endif
1475
1476
1477/**
1478 * The shadow page pool instance data.
1479 *
1480 * It's all one big allocation made at init time, except for the
1481 * pages that is. The user nodes follows immediatly after the
1482 * page structures.
1483 */
1484typedef struct PGMPOOL
1485{
1486 /** The VM handle - HC Ptr. */
1487 R3R0PTRTYPE(PVM) pVMHC;
1488 /** The VM handle - GC Ptr. */
1489 RCPTRTYPE(PVM) pVMGC;
1490 /** The max pool size. This includes the special IDs. */
1491 uint16_t cMaxPages;
1492 /** The current pool size. */
1493 uint16_t cCurPages;
1494 /** The head of the free page list. */
1495 uint16_t iFreeHead;
1496 /* Padding. */
1497 uint16_t u16Padding;
1498#ifdef PGMPOOL_WITH_USER_TRACKING
1499 /** Head of the chain of free user nodes. */
1500 uint16_t iUserFreeHead;
1501 /** The number of user nodes we've allocated. */
1502 uint16_t cMaxUsers;
1503 /** The number of present page table entries in the entire pool. */
1504 uint32_t cPresent;
1505 /** Pointer to the array of user nodes - GC pointer. */
1506 RCPTRTYPE(PPGMPOOLUSER) paUsersGC;
1507 /** Pointer to the array of user nodes - HC pointer. */
1508 R3R0PTRTYPE(PPGMPOOLUSER) paUsersHC;
1509#endif /* PGMPOOL_WITH_USER_TRACKING */
1510#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1511 /** Head of the chain of free phys ext nodes. */
1512 uint16_t iPhysExtFreeHead;
1513 /** The number of user nodes we've allocated. */
1514 uint16_t cMaxPhysExts;
1515 /** Pointer to the array of physical xref extent - GC pointer. */
1516 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsGC;
1517 /** Pointer to the array of physical xref extent nodes - HC pointer. */
1518 R3R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsHC;
1519#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1520#ifdef PGMPOOL_WITH_CACHE
1521 /** Hash table for GCPhys addresses. */
1522 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1523 /** The head of the age list. */
1524 uint16_t iAgeHead;
1525 /** The tail of the age list. */
1526 uint16_t iAgeTail;
1527 /** Set if the cache is enabled. */
1528 bool fCacheEnabled;
1529#endif /* PGMPOOL_WITH_CACHE */
1530#ifdef PGMPOOL_WITH_MONITORING
1531 /** Head of the list of modified pages. */
1532 uint16_t iModifiedHead;
1533 /** The current number of modified pages. */
1534 uint16_t cModifiedPages;
1535 /** Access handler, RC. */
1536 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1537 /** Access handler, R0. */
1538 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1539 /** Access handler, R3. */
1540 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1541 /** The access handler description (HC ptr). */
1542 R3PTRTYPE(const char *) pszAccessHandler;
1543#endif /* PGMPOOL_WITH_MONITORING */
1544 /** The number of pages currently in use. */
1545 uint16_t cUsedPages;
1546#ifdef VBOX_WITH_STATISTICS
1547 /** The high wather mark for cUsedPages. */
1548 uint16_t cUsedPagesHigh;
1549 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1550 /** Profiling pgmPoolAlloc(). */
1551 STAMPROFILEADV StatAlloc;
1552 /** Profiling pgmPoolClearAll(). */
1553 STAMPROFILE StatClearAll;
1554 /** Profiling pgmPoolFlushAllInt(). */
1555 STAMPROFILE StatFlushAllInt;
1556 /** Profiling pgmPoolFlushPage(). */
1557 STAMPROFILE StatFlushPage;
1558 /** Profiling pgmPoolFree(). */
1559 STAMPROFILE StatFree;
1560 /** Profiling time spent zeroing pages. */
1561 STAMPROFILE StatZeroPage;
1562# ifdef PGMPOOL_WITH_USER_TRACKING
1563 /** Profiling of pgmPoolTrackDeref. */
1564 STAMPROFILE StatTrackDeref;
1565 /** Profiling pgmTrackFlushGCPhysPT. */
1566 STAMPROFILE StatTrackFlushGCPhysPT;
1567 /** Profiling pgmTrackFlushGCPhysPTs. */
1568 STAMPROFILE StatTrackFlushGCPhysPTs;
1569 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1570 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1571 /** Number of times we've been out of user records. */
1572 STAMCOUNTER StatTrackFreeUpOneUser;
1573# endif
1574# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1575 /** Profiling deref activity related tracking GC physical pages. */
1576 STAMPROFILE StatTrackDerefGCPhys;
1577 /** Number of linear searches for a HCPhys in the ram ranges. */
1578 STAMCOUNTER StatTrackLinearRamSearches;
1579 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1580 STAMCOUNTER StamTrackPhysExtAllocFailures;
1581# endif
1582# ifdef PGMPOOL_WITH_MONITORING
1583 /** Profiling the GC PT access handler. */
1584 STAMPROFILE StatMonitorGC;
1585 /** Times we've failed interpreting the instruction. */
1586 STAMCOUNTER StatMonitorGCEmulateInstr;
1587 /** Profiling the pgmPoolFlushPage calls made from the GC PT access handler. */
1588 STAMPROFILE StatMonitorGCFlushPage;
1589 /** Times we've detected fork(). */
1590 STAMCOUNTER StatMonitorGCFork;
1591 /** Profiling the GC access we've handled (except REP STOSD). */
1592 STAMPROFILE StatMonitorGCHandled;
1593 /** Times we've failed interpreting a patch code instruction. */
1594 STAMCOUNTER StatMonitorGCIntrFailPatch1;
1595 /** Times we've failed interpreting a patch code instruction during flushing. */
1596 STAMCOUNTER StatMonitorGCIntrFailPatch2;
1597 /** The number of times we've seen rep prefixes we can't handle. */
1598 STAMCOUNTER StatMonitorGCRepPrefix;
1599 /** Profiling the REP STOSD cases we've handled. */
1600 STAMPROFILE StatMonitorGCRepStosd;
1601
1602 /** Profiling the HC PT access handler. */
1603 STAMPROFILE StatMonitorHC;
1604 /** Times we've failed interpreting the instruction. */
1605 STAMCOUNTER StatMonitorHCEmulateInstr;
1606 /** Profiling the pgmPoolFlushPage calls made from the HC PT access handler. */
1607 STAMPROFILE StatMonitorHCFlushPage;
1608 /** Times we've detected fork(). */
1609 STAMCOUNTER StatMonitorHCFork;
1610 /** Profiling the HC access we've handled (except REP STOSD). */
1611 STAMPROFILE StatMonitorHCHandled;
1612 /** The number of times we've seen rep prefixes we can't handle. */
1613 STAMCOUNTER StatMonitorHCRepPrefix;
1614 /** Profiling the REP STOSD cases we've handled. */
1615 STAMPROFILE StatMonitorHCRepStosd;
1616 /** The number of times we're called in an async thread an need to flush. */
1617 STAMCOUNTER StatMonitorHCAsync;
1618 /** The high wather mark for cModifiedPages. */
1619 uint16_t cModifiedPagesHigh;
1620 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
1621# endif
1622# ifdef PGMPOOL_WITH_CACHE
1623 /** The number of cache hits. */
1624 STAMCOUNTER StatCacheHits;
1625 /** The number of cache misses. */
1626 STAMCOUNTER StatCacheMisses;
1627 /** The number of times we've got a conflict of 'kind' in the cache. */
1628 STAMCOUNTER StatCacheKindMismatches;
1629 /** Number of times we've been out of pages. */
1630 STAMCOUNTER StatCacheFreeUpOne;
1631 /** The number of cacheable allocations. */
1632 STAMCOUNTER StatCacheCacheable;
1633 /** The number of uncacheable allocations. */
1634 STAMCOUNTER StatCacheUncacheable;
1635# endif
1636#elif HC_ARCH_BITS == 64
1637 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
1638#endif
1639 /** The AVL tree for looking up a page by its HC physical address. */
1640 AVLOHCPHYSTREE HCPhysTree;
1641 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
1642 /** Array of pages. (cMaxPages in length)
1643 * The Id is the index into thist array.
1644 */
1645 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
1646} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
1647
1648
1649/** @def PGMPOOL_PAGE_2_PTR
1650 * Maps a pool page pool into the current context.
1651 *
1652 * @returns VBox status code.
1653 * @param pVM The VM handle.
1654 * @param pPage The pool page.
1655 *
1656 * @remark In HC this uses PGMGCDynMapHCPage(), so it will consume of the
1657 * small page window employeed by that function. Be careful.
1658 * @remark There is no need to assert on the result.
1659 */
1660#ifdef IN_GC
1661# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmGCPoolMapPage((pVM), (pPage))
1662#else
1663# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC)
1664#endif
1665
1666
1667/**
1668 * Trees are using self relative offsets as pointers.
1669 * So, all its data, including the root pointer, must be in the heap for HC and GC
1670 * to have the same layout.
1671 */
1672typedef struct PGMTREES
1673{
1674 /** Physical access handlers (AVL range+offsetptr tree). */
1675 AVLROGCPHYSTREE PhysHandlers;
1676 /** Virtual access handlers (AVL range + GC ptr tree). */
1677 AVLROGCPTRTREE VirtHandlers;
1678 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
1679 AVLROGCPHYSTREE PhysToVirtHandlers;
1680 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
1681 AVLROGCPTRTREE HyperVirtHandlers;
1682} PGMTREES;
1683/** Pointer to PGM trees. */
1684typedef PGMTREES *PPGMTREES;
1685
1686
1687/** @name Paging mode macros
1688 * @{ */
1689#ifdef IN_GC
1690# define PGM_CTX(a,b) a##GC##b
1691# define PGM_CTX_STR(a,b) a "GC" b
1692# define PGM_CTX_DECL(type) VMMRCDECL(type)
1693#else
1694# ifdef IN_RING3
1695# define PGM_CTX(a,b) a##R3##b
1696# define PGM_CTX_STR(a,b) a "R3" b
1697# define PGM_CTX_DECL(type) DECLCALLBACK(type)
1698# else
1699# define PGM_CTX(a,b) a##R0##b
1700# define PGM_CTX_STR(a,b) a "R0" b
1701# define PGM_CTX_DECL(type) VMMDECL(type)
1702# endif
1703#endif
1704
1705#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
1706#define PGM_GST_NAME_GC_REAL_STR(name) "pgmGCGstReal" #name
1707#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
1708#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
1709#define PGM_GST_NAME_GC_PROT_STR(name) "pgmGCGstProt" #name
1710#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
1711#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
1712#define PGM_GST_NAME_GC_32BIT_STR(name) "pgmGCGst32Bit" #name
1713#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
1714#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
1715#define PGM_GST_NAME_GC_PAE_STR(name) "pgmGCGstPAE" #name
1716#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
1717#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
1718#define PGM_GST_NAME_GC_AMD64_STR(name) "pgmGCGstAMD64" #name
1719#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
1720#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
1721#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
1722
1723#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
1724#define PGM_SHW_NAME_GC_32BIT_STR(name) "pgmGCShw32Bit" #name
1725#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
1726#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
1727#define PGM_SHW_NAME_GC_PAE_STR(name) "pgmGCShwPAE" #name
1728#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
1729#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
1730#define PGM_SHW_NAME_GC_AMD64_STR(name) "pgmGCShwAMD64" #name
1731#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
1732#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
1733#define PGM_SHW_NAME_GC_NESTED_STR(name) "pgmGCShwNested" #name
1734#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
1735#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
1736#define PGM_SHW_NAME_GC_EPT_STR(name) "pgmGCShwEPT" #name
1737#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
1738#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
1739#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
1740
1741/* Shw_Gst */
1742#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
1743#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
1744#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
1745#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
1746#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
1747#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
1748#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
1749#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
1750#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
1751#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
1752#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
1753#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
1754#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
1755#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
1756#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
1757#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
1758#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
1759#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
1760#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
1761
1762#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name
1763#define PGM_BTH_NAME_GC_32BIT_PROT_STR(name) "pgmGCBth32BitProt" #name
1764#define PGM_BTH_NAME_GC_32BIT_32BIT_STR(name) "pgmGCBth32Bit32Bit" #name
1765#define PGM_BTH_NAME_GC_PAE_REAL_STR(name) "pgmGCBthPAEReal" #name
1766#define PGM_BTH_NAME_GC_PAE_PROT_STR(name) "pgmGCBthPAEProt" #name
1767#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name
1768#define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name
1769#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name
1770#define PGM_BTH_NAME_GC_NESTED_REAL_STR(name) "pgmGCBthNestedReal" #name
1771#define PGM_BTH_NAME_GC_NESTED_PROT_STR(name) "pgmGCBthNestedProt" #name
1772#define PGM_BTH_NAME_GC_NESTED_32BIT_STR(name) "pgmGCBthNested32Bit" #name
1773#define PGM_BTH_NAME_GC_NESTED_PAE_STR(name) "pgmGCBthNestedPAE" #name
1774#define PGM_BTH_NAME_GC_NESTED_AMD64_STR(name) "pgmGCBthNestedAMD64" #name
1775#define PGM_BTH_NAME_GC_EPT_REAL_STR(name) "pgmGCBthEPTReal" #name
1776#define PGM_BTH_NAME_GC_EPT_PROT_STR(name) "pgmGCBthEPTProt" #name
1777#define PGM_BTH_NAME_GC_EPT_32BIT_STR(name) "pgmGCBthEPT32Bit" #name
1778#define PGM_BTH_NAME_GC_EPT_PAE_STR(name) "pgmGCBthEPTPAE" #name
1779#define PGM_BTH_NAME_GC_EPT_AMD64_STR(name) "pgmGCBthEPTAMD64" #name
1780#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
1781#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
1782#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
1783#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
1784#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
1785#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
1786#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
1787#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
1788#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
1789#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
1790#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
1791#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
1792#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
1793#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
1794#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
1795#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
1796#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
1797#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
1798#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
1799
1800#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
1801#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
1802/** @} */
1803
1804/**
1805 * Data for each paging mode.
1806 */
1807typedef struct PGMMODEDATA
1808{
1809 /** The guest mode type. */
1810 uint32_t uGstType;
1811 /** The shadow mode type. */
1812 uint32_t uShwType;
1813
1814 /** @name Function pointers for Shadow paging.
1815 * @{
1816 */
1817 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1818 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1819 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1820 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1821
1822 DECLRCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1823 DECLRCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1824
1825 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1826 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1827 /** @} */
1828
1829 /** @name Function pointers for Guest paging.
1830 * @{
1831 */
1832 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1833 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1834 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1835 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1836 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1837 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1838 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1839 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1840 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1841 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
1842 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
1843 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
1844 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
1845
1846 DECLRCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1847 DECLRCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1848 DECLRCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1849 DECLRCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1850 DECLRCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1851 DECLRCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1852 DECLRCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1853 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1854 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnGCGstPAEWriteHandlerCR3;
1855
1856 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1857 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1858 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1859 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1860 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1861 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1862 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1863 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
1864 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
1865 /** @} */
1866
1867 /** @name Function pointers for Both Shadow and Guest paging.
1868 * @{
1869 */
1870 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1871 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1872 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1873 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1874 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1875 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1876 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1877#ifdef VBOX_STRICT
1878 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1879#endif
1880
1881 DECLRCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1882 DECLRCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1883 DECLRCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1884 DECLRCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1885 DECLRCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1886 DECLRCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1887#ifdef VBOX_STRICT
1888 DECLRCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1889#endif
1890
1891 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1892 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1893 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1894 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1895 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1896 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1897#ifdef VBOX_STRICT
1898 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1899#endif
1900 /** @} */
1901} PGMMODEDATA, *PPGMMODEDATA;
1902
1903
1904
1905/**
1906 * Converts a PGM pointer into a VM pointer.
1907 * @returns Pointer to the VM structure the PGM is part of.
1908 * @param pPGM Pointer to PGM instance data.
1909 */
1910#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
1911
1912/**
1913 * PGM Data (part of VM)
1914 */
1915typedef struct PGM
1916{
1917 /** Offset to the VM structure. */
1918 RTINT offVM;
1919
1920 /*
1921 * This will be redefined at least two more times before we're done, I'm sure.
1922 * The current code is only to get on with the coding.
1923 * - 2004-06-10: initial version, bird.
1924 * - 2004-07-02: 1st time, bird.
1925 * - 2004-10-18: 2nd time, bird.
1926 * - 2005-07-xx: 3rd time, bird.
1927 */
1928
1929 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1930 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
1931 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1932 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
1933
1934 /** The host paging mode. (This is what SUPLib reports.) */
1935 SUPPAGINGMODE enmHostMode;
1936 /** The shadow paging mode. */
1937 PGMMODE enmShadowMode;
1938 /** The guest paging mode. */
1939 PGMMODE enmGuestMode;
1940
1941 /** The current physical address representing in the guest CR3 register. */
1942 RTGCPHYS GCPhysCR3;
1943 /** Pointer to the 5 page CR3 content mapping.
1944 * The first page is always the CR3 (in some form) while the 4 other pages
1945 * are used of the PDs in PAE mode. */
1946 RTGCPTR GCPtrCR3Mapping;
1947#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1948 uint32_t u32Alignment;
1949#endif
1950 /** The physical address of the currently monitored guest CR3 page.
1951 * When this value is NIL_RTGCPHYS no page is being monitored. */
1952 RTGCPHYS GCPhysGstCR3Monitored;
1953
1954 /** @name 32-bit Guest Paging.
1955 * @{ */
1956 /** The guest's page directory, HC pointer. */
1957 R3R0PTRTYPE(PX86PD) pGuestPDHC;
1958 /** The guest's page directory, static GC mapping. */
1959 RCPTRTYPE(PX86PD) pGuestPDGC;
1960 /** @} */
1961
1962 /** @name PAE Guest Paging.
1963 * @{ */
1964 /** The guest's page directory pointer table, static GC mapping. */
1965 RCPTRTYPE(PX86PDPT) pGstPaePDPTGC;
1966 /** The guest's page directory pointer table, HC pointer. */
1967 R3R0PTRTYPE(PX86PDPT) pGstPaePDPTHC;
1968 /** The guest's page directories, HC pointers.
1969 * These are individual pointers and don't have to be adjecent.
1970 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
1971 R3R0PTRTYPE(PX86PDPAE) apGstPaePDsHC[4];
1972 /** The guest's page directories, static GC mapping.
1973 * Unlike the HC array the first entry can be accessed as a 2048 entry PD.
1974 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
1975 RCPTRTYPE(PX86PDPAE) apGstPaePDsGC[4];
1976 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
1977 RTGCPHYS aGCPhysGstPaePDs[4];
1978 /** The physical addresses of the monitored guest page directories (PAE). */
1979 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
1980 /** @} */
1981
1982 /** @name AMD64 Guest Paging.
1983 * @{ */
1984 /** The guest's page directory pointer table, HC pointer. */
1985 R3R0PTRTYPE(PX86PML4) pGstPaePML4HC;
1986 /** @} */
1987
1988 /** @name 32-bit Shadow Paging
1989 * @{ */
1990 /** The 32-Bit PD - HC Ptr. */
1991 R3R0PTRTYPE(PX86PD) pHC32BitPD;
1992 /** The 32-Bit PD - GC Ptr. */
1993 RCPTRTYPE(PX86PD) pGC32BitPD;
1994#if HC_ARCH_BITS == 64
1995 uint32_t u32Padding1; /**< alignment padding. */
1996#endif
1997 /** The Physical Address (HC) of the 32-Bit PD. */
1998 RTHCPHYS HCPhys32BitPD;
1999 /** @} */
2000
2001 /** @name PAE Shadow Paging
2002 * @{ */
2003 /** The four PDs for the low 4GB - HC Ptr.
2004 * Even though these are 4 pointers, what they point at is a single table.
2005 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
2006 R3R0PTRTYPE(PX86PDPAE) apHCPaePDs[4];
2007 /** The four PDs for the low 4GB - GC Ptr.
2008 * Same kind of mapping as apHCPaePDs. */
2009 RCPTRTYPE(PX86PDPAE) apGCPaePDs[4];
2010 /** The Physical Address (HC) of the four PDs for the low 4GB.
2011 * These are *NOT* 4 contiguous pages. */
2012 RTHCPHYS aHCPhysPaePDs[4];
2013 /** The PAE PDP - HC Ptr. */
2014 R3R0PTRTYPE(PX86PDPT) pHCPaePDPT;
2015 /** The Physical Address (HC) of the PAE PDPT. */
2016 RTHCPHYS HCPhysPaePDPT;
2017 /** The PAE PDPT - GC Ptr. */
2018 RCPTRTYPE(PX86PDPT) pGCPaePDPT;
2019 /** @} */
2020
2021 /** @name AMD64 Shadow Paging
2022 * Extends PAE Paging.
2023 * @{ */
2024#if HC_ARCH_BITS == 64
2025 RTRCPTR alignment5; /**< structure size alignment. */
2026#endif
2027 /** The Page Map Level 4 table - HC Ptr. */
2028 R3R0PTRTYPE(PX86PML4) pHCPaePML4;
2029 /** The Physical Address (HC) of the Page Map Level 4 table. */
2030 RTHCPHYS HCPhysPaePML4;
2031 /** The pgm pool page descriptor for the current active CR3. */
2032 R3R0PTRTYPE(PPGMPOOLPAGE) pHCShwAmd64CR3;
2033
2034 /** @}*/
2035
2036 /** @name Nested Shadow Paging
2037 * @{ */
2038 /** Root table; format depends on the host paging mode (AMD-V) or EPT */
2039 R3R0PTRTYPE(void *) pHCNestedRoot;
2040 /** The Physical Address (HC) of the nested paging root. */
2041 RTHCPHYS HCPhysNestedRoot;
2042
2043 /** @name Function pointers for Shadow paging.
2044 * @{
2045 */
2046 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
2047 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
2048 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2049 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2050
2051 DECLRCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2052 DECLRCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2053
2054 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2055 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2056
2057 /** @} */
2058
2059 /** @name Function pointers for Guest paging.
2060 * @{
2061 */
2062 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
2063 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
2064 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2065 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2066 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
2067 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2068 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
2069 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2070 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
2071 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
2072 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
2073 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
2074 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
2075
2076 DECLRCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2077 DECLRCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2078 DECLRCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
2079 DECLRCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2080 DECLRCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
2081 DECLRCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2082 DECLRCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
2083 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
2084 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnGCGstPAEWriteHandlerCR3;
2085#if HC_ARCH_BITS == 64
2086 RTRCPTR alignment3; /**< structure size alignment. */
2087#endif
2088
2089 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2090 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2091 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
2092 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2093 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
2094 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2095 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
2096 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
2097 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
2098 /** @} */
2099
2100 /** @name Function pointers for Both Shadow and Guest paging.
2101 * @{
2102 */
2103 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
2104 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2105 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2106 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2107 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2108 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2109 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2110 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2111
2112 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2113 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2114 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2115 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2116 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2117 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2118 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2119
2120 DECLRCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2121 DECLRCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2122 DECLRCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2123 DECLRCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2124 DECLRCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2125 DECLRCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2126 DECLRCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2127#if HC_ARCH_BITS == 64
2128 RTRCPTR alignment2; /**< structure size alignment. */
2129#endif
2130 /** @} */
2131
2132 /** Pointer to SHW+GST mode data (function pointers).
2133 * The index into this table is made up from */
2134 R3PTRTYPE(PPGMMODEDATA) paModeData;
2135
2136 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2137 * This is sorted by physical address and contains no overlapping ranges. */
2138 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2139 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2140 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2141 /** RC pointer corresponding to PGM::pRamRangesR3. */
2142 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2143 /** The configured RAM size. */
2144 RTUINT cbRamSize;
2145
2146 /** Pointer to the list of ROM ranges - for R3.
2147 * This is sorted by physical address and contains no overlapping ranges. */
2148 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2149 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2150 R0PTRTYPE(PPGMRAMRANGE) pRomRangesR0;
2151 /** GC pointer corresponding to PGM::pRomRangesR3. */
2152 RCPTRTYPE(PPGMRAMRANGE) pRomRangesGC;
2153 /** Alignment padding. */
2154 RTRCPTR GCPtrPadding2;
2155
2156 /** Pointer to the list of MMIO2 ranges - for R3.
2157 * Registration order. */
2158 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2159
2160 /** PGM offset based trees - HC Ptr. */
2161 R3R0PTRTYPE(PPGMTREES) pTreesHC;
2162 /** PGM offset based trees - GC Ptr. */
2163 RCPTRTYPE(PPGMTREES) pTreesGC;
2164
2165 /** Linked list of GC mappings - for GC.
2166 * The list is sorted ascending on address.
2167 */
2168 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2169 /** Linked list of GC mappings - for HC.
2170 * The list is sorted ascending on address.
2171 */
2172 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2173 /** Linked list of GC mappings - for R0.
2174 * The list is sorted ascending on address.
2175 */
2176 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2177
2178 /** If set no conflict checks are required. (boolean) */
2179 bool fMappingsFixed;
2180 /** If set, then no mappings are put into the shadow page table. (boolean) */
2181 bool fDisableMappings;
2182 /** Size of fixed mapping */
2183 uint32_t cbMappingFixed;
2184 /** Base address (GC) of fixed mapping */
2185 RTGCPTR GCPtrMappingFixed;
2186#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2187 uint32_t u32Padding0; /**< alignment padding. */
2188#endif
2189
2190
2191 /** @name Intermediate Context
2192 * @{ */
2193 /** Pointer to the intermediate page directory - Normal. */
2194 R3PTRTYPE(PX86PD) pInterPD;
2195 /** Pointer to the intermedate page tables - Normal.
2196 * There are two page tables, one for the identity mapping and one for
2197 * the host context mapping (of the core code). */
2198 R3PTRTYPE(PX86PT) apInterPTs[2];
2199 /** Pointer to the intermedate page tables - PAE. */
2200 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2201 /** Pointer to the intermedate page directory - PAE. */
2202 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2203 /** Pointer to the intermedate page directory - PAE. */
2204 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2205 /** Pointer to the intermedate page-map level 4 - AMD64. */
2206 R3PTRTYPE(PX86PML4) pInterPaePML4;
2207 /** Pointer to the intermedate page directory - AMD64. */
2208 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2209 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2210 RTHCPHYS HCPhysInterPD;
2211 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2212 RTHCPHYS HCPhysInterPaePDPT;
2213 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2214 RTHCPHYS HCPhysInterPaePML4;
2215 /** @} */
2216
2217 /** Base address of the dynamic page mapping area.
2218 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2219 */
2220 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2221 /** The index of the last entry used in the dynamic page mapping area. */
2222 RTUINT iDynPageMapLast;
2223 /** Cache containing the last entries in the dynamic page mapping area.
2224 * The cache size is covering half of the mapping area. */
2225 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2226
2227 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 */
2228 RTGCPHYS GCPhys4MBPSEMask;
2229
2230 /** A20 gate mask.
2231 * Our current approach to A20 emulation is to let REM do it and don't bother
2232 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2233 * But whould need arrise, we'll subject physical addresses to this mask. */
2234 RTGCPHYS GCPhysA20Mask;
2235 /** A20 gate state - boolean! */
2236 RTUINT fA20Enabled;
2237
2238 /** What needs syncing (PGM_SYNC_*).
2239 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2240 * PGMFlushTLB, and PGMR3Load. */
2241 RTUINT fSyncFlags;
2242
2243 /** PGM critical section.
2244 * This protects the physical & virtual access handlers, ram ranges,
2245 * and the page flag updating (some of it anyway).
2246 */
2247 PDMCRITSECT CritSect;
2248
2249 /** Shadow Page Pool - HC Ptr. */
2250 R3R0PTRTYPE(PPGMPOOL) pPoolHC;
2251 /** Shadow Page Pool - GC Ptr. */
2252 RCPTRTYPE(PPGMPOOL) pPoolGC;
2253
2254 /** We're not in a state which permits writes to guest memory.
2255 * (Only used in strict builds.) */
2256 bool fNoMorePhysWrites;
2257
2258 /** Flush the cache on the next access. */
2259 bool fPhysCacheFlushPending;
2260/** @todo r=bird: Fix member names!*/
2261 /** PGMPhysRead cache */
2262 PGMPHYSCACHE pgmphysreadcache;
2263 /** PGMPhysWrite cache */
2264 PGMPHYSCACHE pgmphyswritecache;
2265
2266 /**
2267 * Data associated with managing the ring-3 mappings of the allocation chunks.
2268 */
2269 struct
2270 {
2271 /** The chunk tree, ordered by chunk id. */
2272 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2273 /** The chunk mapping TLB. */
2274 PGMCHUNKR3MAPTLB Tlb;
2275 /** The number of mapped chunks. */
2276 uint32_t c;
2277 /** The maximum number of mapped chunks.
2278 * @cfgm PGM/MaxRing3Chunks */
2279 uint32_t cMax;
2280 /** The chunk age tree, ordered by ageing sequence number. */
2281 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2282 /** The current time. */
2283 uint32_t iNow;
2284 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2285 uint32_t AgeingCountdown;
2286 } ChunkR3Map;
2287
2288 /**
2289 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2290 */
2291 PGMPAGER3MAPTLB PhysTlbHC;
2292
2293 /** @name The zero page.
2294 * @{ */
2295 /** The host physical address of the zero page. */
2296 RTHCPHYS HCPhysZeroPg;
2297 /** The ring-3 mapping of the zero page. */
2298 RTR3PTR pvZeroPgR3;
2299 /** The ring-0 mapping of the zero page. */
2300 RTR0PTR pvZeroPgR0;
2301 /** The GC mapping of the zero page. */
2302 RTGCPTR pvZeroPgGC;
2303#if GC_ARCH_BITS != 32
2304 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2305#endif
2306 /** @}*/
2307
2308 /** The number of handy pages. */
2309 uint32_t cHandyPages;
2310 /**
2311 * Array of handy pages.
2312 *
2313 * This array is used in a two way communication between pgmPhysAllocPage
2314 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2315 * an intermediary.
2316 *
2317 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2318 * (The current size of 32 pages, means 128 KB of handy memory.)
2319 */
2320 GMMPAGEDESC aHandyPages[32];
2321
2322 /** @name Release Statistics
2323 * @{ */
2324 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */
2325 uint32_t cPrivatePages; /**< The number of private pages. */
2326 uint32_t cSharedPages; /**< The number of shared pages. */
2327 uint32_t cZeroPages; /**< The number of zero backed pages. */
2328 /** The number of times the guest has switched mode since last reset or statistics reset. */
2329 STAMCOUNTER cGuestModeChanges;
2330 /** @} */
2331
2332#ifdef VBOX_WITH_STATISTICS
2333 /** GC: Which statistic this \#PF should be attributed to. */
2334 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionGC;
2335 RTRCPTR padding0;
2336 /** HC: Which statistic this \#PF should be attributed to. */
2337 R3R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionHC;
2338 RTHCPTR padding1;
2339 STAMPROFILE StatGCTrap0e; /**< GC: PGMGCTrap0eHandler() profiling. */
2340 STAMPROFILE StatTrap0eCSAM; /**< Profiling of the Trap0eHandler body when the cause is CSAM. */
2341 STAMPROFILE StatTrap0eDirtyAndAccessedBits; /**< Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
2342 STAMPROFILE StatTrap0eGuestTrap; /**< Profiling of the Trap0eHandler body when the cause is a guest trap. */
2343 STAMPROFILE StatTrap0eHndPhys; /**< Profiling of the Trap0eHandler body when the cause is a physical handler. */
2344 STAMPROFILE StatTrap0eHndVirt; /**< Profiling of the Trap0eHandler body when the cause is a virtual handler. */
2345 STAMPROFILE StatTrap0eHndUnhandled; /**< Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
2346 STAMPROFILE StatTrap0eMisc; /**< Profiling of the Trap0eHandler body when the cause is not known. */
2347 STAMPROFILE StatTrap0eOutOfSync; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
2348 STAMPROFILE StatTrap0eOutOfSyncHndPhys; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
2349 STAMPROFILE StatTrap0eOutOfSyncHndVirt; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
2350 STAMPROFILE StatTrap0eOutOfSyncObsHnd; /**< Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
2351 STAMPROFILE StatTrap0eSyncPT; /**< Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
2352
2353 STAMCOUNTER StatTrap0eMapHandler; /**< Number of traps due to access handlers in mappings. */
2354 STAMCOUNTER StatGCTrap0eConflicts; /**< GC: The number of times \#PF was caused by an undetected conflict. */
2355
2356 STAMCOUNTER StatGCTrap0eUSNotPresentRead;
2357 STAMCOUNTER StatGCTrap0eUSNotPresentWrite;
2358 STAMCOUNTER StatGCTrap0eUSWrite;
2359 STAMCOUNTER StatGCTrap0eUSReserved;
2360 STAMCOUNTER StatGCTrap0eUSNXE;
2361 STAMCOUNTER StatGCTrap0eUSRead;
2362
2363 STAMCOUNTER StatGCTrap0eSVNotPresentRead;
2364 STAMCOUNTER StatGCTrap0eSVNotPresentWrite;
2365 STAMCOUNTER StatGCTrap0eSVWrite;
2366 STAMCOUNTER StatGCTrap0eSVReserved;
2367 STAMCOUNTER StatGCTrap0eSNXE;
2368
2369 STAMCOUNTER StatTrap0eWPEmulGC;
2370 STAMCOUNTER StatTrap0eWPEmulR3;
2371
2372 STAMCOUNTER StatGCTrap0eUnhandled;
2373 STAMCOUNTER StatGCTrap0eMap;
2374
2375 /** GC: PGMSyncPT() profiling. */
2376 STAMPROFILE StatGCSyncPT;
2377 /** GC: The number of times PGMSyncPT() needed to allocate page tables. */
2378 STAMCOUNTER StatGCSyncPTAlloc;
2379 /** GC: The number of times PGMSyncPT() detected conflicts. */
2380 STAMCOUNTER StatGCSyncPTConflict;
2381 /** GC: The number of times PGMSyncPT() failed. */
2382 STAMCOUNTER StatGCSyncPTFailed;
2383 /** GC: PGMGCInvalidatePage() profiling. */
2384 STAMPROFILE StatGCInvalidatePage;
2385 /** GC: The number of times PGMGCInvalidatePage() was called for a 4KB page. */
2386 STAMCOUNTER StatGCInvalidatePage4KBPages;
2387 /** GC: The number of times PGMGCInvalidatePage() was called for a 4MB page. */
2388 STAMCOUNTER StatGCInvalidatePage4MBPages;
2389 /** GC: The number of times PGMGCInvalidatePage() skipped a 4MB page. */
2390 STAMCOUNTER StatGCInvalidatePage4MBPagesSkip;
2391 /** GC: The number of times PGMGCInvalidatePage() was called for a not accessed page directory. */
2392 STAMCOUNTER StatGCInvalidatePagePDNAs;
2393 /** GC: The number of times PGMGCInvalidatePage() was called for a not present page directory. */
2394 STAMCOUNTER StatGCInvalidatePagePDNPs;
2395 /** GC: The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict). */
2396 STAMCOUNTER StatGCInvalidatePagePDMappings;
2397 /** GC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
2398 STAMCOUNTER StatGCInvalidatePagePDOutOfSync;
2399 /** HC: The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2400 STAMCOUNTER StatGCInvalidatePageSkipped;
2401 /** GC: The number of times user page is out of sync was detected in GC. */
2402 STAMCOUNTER StatGCPageOutOfSyncUser;
2403 /** GC: The number of times supervisor page is out of sync was detected in GC. */
2404 STAMCOUNTER StatGCPageOutOfSyncSupervisor;
2405 /** GC: The number of dynamic page mapping cache hits */
2406 STAMCOUNTER StatDynMapCacheMisses;
2407 /** GC: The number of dynamic page mapping cache misses */
2408 STAMCOUNTER StatDynMapCacheHits;
2409 /** GC: The number of times pgmGCGuestPDWriteHandler() was successfully called. */
2410 STAMCOUNTER StatGCGuestCR3WriteHandled;
2411 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and we had to fall back to the recompiler. */
2412 STAMCOUNTER StatGCGuestCR3WriteUnhandled;
2413 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and a conflict was detected. */
2414 STAMCOUNTER StatGCGuestCR3WriteConflict;
2415 /** GC: Number of out-of-sync handled pages. */
2416 STAMCOUNTER StatHandlersOutOfSync;
2417 /** GC: Number of traps due to physical access handlers. */
2418 STAMCOUNTER StatHandlersPhysical;
2419 /** GC: Number of traps due to virtual access handlers. */
2420 STAMCOUNTER StatHandlersVirtual;
2421 /** GC: Number of traps due to virtual access handlers found by physical address. */
2422 STAMCOUNTER StatHandlersVirtualByPhys;
2423 /** GC: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
2424 STAMCOUNTER StatHandlersVirtualUnmarked;
2425 /** GC: Number of traps due to access outside range of monitored page(s). */
2426 STAMCOUNTER StatHandlersUnhandled;
2427 /** GC: Number of traps due to access to invalid physical memory. */
2428 STAMCOUNTER StatHandlersInvalid;
2429
2430 /** GC: The number of times pgmGCGuestROMWriteHandler() was successfully called. */
2431 STAMCOUNTER StatGCGuestROMWriteHandled;
2432 /** GC: The number of times pgmGCGuestROMWriteHandler() was called and we had to fall back to the recompiler */
2433 STAMCOUNTER StatGCGuestROMWriteUnhandled;
2434
2435 /** HC: PGMR3InvalidatePage() profiling. */
2436 STAMPROFILE StatHCInvalidatePage;
2437 /** HC: The number of times PGMR3InvalidatePage() was called for a 4KB page. */
2438 STAMCOUNTER StatHCInvalidatePage4KBPages;
2439 /** HC: The number of times PGMR3InvalidatePage() was called for a 4MB page. */
2440 STAMCOUNTER StatHCInvalidatePage4MBPages;
2441 /** HC: The number of times PGMR3InvalidatePage() skipped a 4MB page. */
2442 STAMCOUNTER StatHCInvalidatePage4MBPagesSkip;
2443 /** HC: The number of times PGMR3InvalidatePage() was called for a not accessed page directory. */
2444 STAMCOUNTER StatHCInvalidatePagePDNAs;
2445 /** HC: The number of times PGMR3InvalidatePage() was called for a not present page directory. */
2446 STAMCOUNTER StatHCInvalidatePagePDNPs;
2447 /** HC: The number of times PGMR3InvalidatePage() was called for a page directory containing mappings (no conflict). */
2448 STAMCOUNTER StatHCInvalidatePagePDMappings;
2449 /** HC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
2450 STAMCOUNTER StatHCInvalidatePagePDOutOfSync;
2451 /** HC: The number of times PGMR3InvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2452 STAMCOUNTER StatHCInvalidatePageSkipped;
2453 /** HC: PGMR3SyncPT() profiling. */
2454 STAMPROFILE StatHCSyncPT;
2455 /** HC: pgmr3SyncPTResolveConflict() profiling (includes the entire relocation). */
2456 STAMPROFILE StatHCResolveConflict;
2457 /** HC: Number of times PGMR3CheckMappingConflicts() detected a conflict. */
2458 STAMCOUNTER StatHCDetectedConflicts;
2459 /** HC: The total number of times pgmHCGuestPDWriteHandler() was called. */
2460 STAMCOUNTER StatHCGuestPDWrite;
2461 /** HC: The number of times pgmHCGuestPDWriteHandler() detected a conflict */
2462 STAMCOUNTER StatHCGuestPDWriteConflict;
2463
2464 /** HC: The number of pages marked not present for accessed bit emulation. */
2465 STAMCOUNTER StatHCAccessedPage;
2466 /** HC: The number of pages marked read-only for dirty bit tracking. */
2467 STAMCOUNTER StatHCDirtyPage;
2468 /** HC: The number of pages marked read-only for dirty bit tracking. */
2469 STAMCOUNTER StatHCDirtyPageBig;
2470 /** HC: The number of traps generated for dirty bit tracking. */
2471 STAMCOUNTER StatHCDirtyPageTrap;
2472 /** HC: The number of pages already dirty or readonly. */
2473 STAMCOUNTER StatHCDirtyPageSkipped;
2474
2475 /** GC: The number of pages marked not present for accessed bit emulation. */
2476 STAMCOUNTER StatGCAccessedPage;
2477 /** GC: The number of pages marked read-only for dirty bit tracking. */
2478 STAMCOUNTER StatGCDirtyPage;
2479 /** GC: The number of pages marked read-only for dirty bit tracking. */
2480 STAMCOUNTER StatGCDirtyPageBig;
2481 /** GC: The number of traps generated for dirty bit tracking. */
2482 STAMCOUNTER StatGCDirtyPageTrap;
2483 /** GC: The number of pages already dirty or readonly. */
2484 STAMCOUNTER StatGCDirtyPageSkipped;
2485 /** GC: The number of pages marked dirty because of write accesses. */
2486 STAMCOUNTER StatGCDirtiedPage;
2487 /** GC: The number of pages already marked dirty because of write accesses. */
2488 STAMCOUNTER StatGCPageAlreadyDirty;
2489 /** GC: The number of real pages faults during dirty bit tracking. */
2490 STAMCOUNTER StatGCDirtyTrackRealPF;
2491
2492 /** GC: Profiling of the PGMTrackDirtyBit() body */
2493 STAMPROFILE StatGCDirtyBitTracking;
2494 /** HC: Profiling of the PGMTrackDirtyBit() body */
2495 STAMPROFILE StatHCDirtyBitTracking;
2496
2497 /** GC: Profiling of the PGMGstModifyPage() body */
2498 STAMPROFILE StatGCGstModifyPage;
2499 /** HC: Profiling of the PGMGstModifyPage() body */
2500 STAMPROFILE StatHCGstModifyPage;
2501
2502 /** GC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2503 STAMCOUNTER StatGCSyncPagePDNAs;
2504 /** GC: The number of time we've encountered an out-of-sync PD in SyncPage. */
2505 STAMCOUNTER StatGCSyncPagePDOutOfSync;
2506 /** HC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2507 STAMCOUNTER StatHCSyncPagePDNAs;
2508 /** HC: The number of time we've encountered an out-of-sync PD in SyncPage. */
2509 STAMCOUNTER StatHCSyncPagePDOutOfSync;
2510
2511 STAMCOUNTER StatSynPT4kGC;
2512 STAMCOUNTER StatSynPT4kHC;
2513 STAMCOUNTER StatSynPT4MGC;
2514 STAMCOUNTER StatSynPT4MHC;
2515
2516 /** Profiling of the PGMFlushTLB() body. */
2517 STAMPROFILE StatFlushTLB;
2518 /** The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2519 STAMCOUNTER StatFlushTLBNewCR3;
2520 /** The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2521 STAMCOUNTER StatFlushTLBNewCR3Global;
2522 /** The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2523 STAMCOUNTER StatFlushTLBSameCR3;
2524 /** The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2525 STAMCOUNTER StatFlushTLBSameCR3Global;
2526
2527 STAMPROFILE StatGCSyncCR3; /**< GC: PGMSyncCR3() profiling. */
2528 STAMPROFILE StatGCSyncCR3Handlers; /**< GC: Profiling of the PGMSyncCR3() update handler section. */
2529 STAMPROFILE StatGCSyncCR3HandlerVirtualReset; /**< GC: Profiling of the virtual handler resets. */
2530 STAMPROFILE StatGCSyncCR3HandlerVirtualUpdate; /**< GC: Profiling of the virtual handler updates. */
2531 STAMCOUNTER StatGCSyncCR3Global; /**< GC: The number of global CR3 syncs. */
2532 STAMCOUNTER StatGCSyncCR3NotGlobal; /**< GC: The number of non-global CR3 syncs. */
2533 STAMCOUNTER StatGCSyncCR3DstFreed; /**< GC: The number of times we've had to free a shadow entry. */
2534 STAMCOUNTER StatGCSyncCR3DstFreedSrcNP; /**< GC: The number of times we've had to free a shadow entry for which the source entry was not present. */
2535 STAMCOUNTER StatGCSyncCR3DstNotPresent; /**< GC: The number of times we've encountered a not present shadow entry for a present guest entry. */
2536 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPD; /**< GC: The number of times a global page directory wasn't flushed. */
2537 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPT; /**< GC: The number of times a page table with only global entries wasn't flushed. */
2538 STAMCOUNTER StatGCSyncCR3DstCacheHit; /**< GC: The number of times we got some kind of cache hit on a page table. */
2539
2540 STAMPROFILE StatHCSyncCR3; /**< HC: PGMSyncCR3() profiling. */
2541 STAMPROFILE StatHCSyncCR3Handlers; /**< HC: Profiling of the PGMSyncCR3() update handler section. */
2542 STAMPROFILE StatHCSyncCR3HandlerVirtualReset; /**< HC: Profiling of the virtual handler resets. */
2543 STAMPROFILE StatHCSyncCR3HandlerVirtualUpdate; /**< HC: Profiling of the virtual handler updates. */
2544 STAMCOUNTER StatHCSyncCR3Global; /**< HC: The number of global CR3 syncs. */
2545 STAMCOUNTER StatHCSyncCR3NotGlobal; /**< HC: The number of non-global CR3 syncs. */
2546 STAMCOUNTER StatHCSyncCR3DstFreed; /**< HC: The number of times we've had to free a shadow entry. */
2547 STAMCOUNTER StatHCSyncCR3DstFreedSrcNP; /**< HC: The number of times we've had to free a shadow entry for which the source entry was not present. */
2548 STAMCOUNTER StatHCSyncCR3DstNotPresent; /**< HC: The number of times we've encountered a not present shadow entry for a present guest entry. */
2549 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPD; /**< HC: The number of times a global page directory wasn't flushed. */
2550 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPT; /**< HC: The number of times a page table with only global entries wasn't flushed. */
2551 STAMCOUNTER StatHCSyncCR3DstCacheHit; /**< HC: The number of times we got some kind of cache hit on a page table. */
2552
2553 /** GC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2554 STAMPROFILE StatVirtHandleSearchByPhysGC;
2555 /** HC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2556 STAMPROFILE StatVirtHandleSearchByPhysHC;
2557 /** HC: The number of times PGMR3HandlerPhysicalReset is called. */
2558 STAMCOUNTER StatHandlePhysicalReset;
2559
2560 STAMPROFILE StatCheckPageFault;
2561 STAMPROFILE StatLazySyncPT;
2562 STAMPROFILE StatMapping;
2563 STAMPROFILE StatOutOfSync;
2564 STAMPROFILE StatHandlers;
2565 STAMPROFILE StatEIPHandlers;
2566 STAMPROFILE StatHCPrefetch;
2567
2568# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2569 /** The number of first time shadowings. */
2570 STAMCOUNTER StatTrackVirgin;
2571 /** The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2572 STAMCOUNTER StatTrackAliased;
2573 /** The number of times we're tracking using cRef2. */
2574 STAMCOUNTER StatTrackAliasedMany;
2575 /** The number of times we're hitting pages which has overflowed cRef2. */
2576 STAMCOUNTER StatTrackAliasedLots;
2577 /** The number of times the extent list grows to long. */
2578 STAMCOUNTER StatTrackOverflows;
2579 /** Profiling of SyncPageWorkerTrackDeref (expensive). */
2580 STAMPROFILE StatTrackDeref;
2581# endif
2582
2583 /** Ring-3/0 page mapper TLB hits. */
2584 STAMCOUNTER StatPageHCMapTlbHits;
2585 /** Ring-3/0 page mapper TLB misses. */
2586 STAMCOUNTER StatPageHCMapTlbMisses;
2587 /** Ring-3/0 chunk mapper TLB hits. */
2588 STAMCOUNTER StatChunkR3MapTlbHits;
2589 /** Ring-3/0 chunk mapper TLB misses. */
2590 STAMCOUNTER StatChunkR3MapTlbMisses;
2591 /** Times a shared page has been replaced by a private one. */
2592 STAMCOUNTER StatPageReplaceShared;
2593 /** Times the zero page has been replaced by a private one. */
2594 STAMCOUNTER StatPageReplaceZero;
2595 /** The number of times we've executed GMMR3AllocateHandyPages. */
2596 STAMCOUNTER StatPageHandyAllocs;
2597
2598 /** Allocated mbs of guest ram */
2599 STAMCOUNTER StatDynRamTotal;
2600 /** Nr of pgmr3PhysGrowRange calls. */
2601 STAMCOUNTER StatDynRamGrow;
2602
2603 STAMCOUNTER StatGCTrap0ePD[X86_PG_ENTRIES];
2604 STAMCOUNTER StatGCSyncPtPD[X86_PG_ENTRIES];
2605 STAMCOUNTER StatGCSyncPagePD[X86_PG_ENTRIES];
2606#endif
2607} PGM, *PPGM;
2608
2609
2610/** @name PGM::fSyncFlags Flags
2611 * @{
2612 */
2613/** Updates the virtual access handler state bit in PGMPAGE. */
2614#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
2615/** Always sync CR3. */
2616#define PGM_SYNC_ALWAYS RT_BIT(1)
2617/** Check monitoring on next CR3 (re)load and invalidate page. */
2618#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
2619/** Clear the page pool (a light weight flush). */
2620#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(8)
2621/** @} */
2622
2623
2624__BEGIN_DECLS
2625
2626int pgmLock(PVM pVM);
2627void pgmUnlock(PVM pVM);
2628
2629VMMRCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
2630VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
2631
2632int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
2633int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
2634PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
2635void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping);
2636DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2637
2638void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
2639int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
2640DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
2641#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2642void pgmHandlerVirtualDumpPhysPages(PVM pVM);
2643#else
2644# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
2645#endif
2646DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2647
2648
2649void pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2650int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
2651int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2652int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv);
2653#ifdef IN_RING3
2654int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
2655int pgmR3PhysRamReset(PVM pVM);
2656int pgmR3PhysRomReset(PVM pVM);
2657#ifndef VBOX_WITH_NEW_PHYS_CODE
2658int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
2659#endif
2660
2661int pgmR3PoolInit(PVM pVM);
2662void pgmR3PoolRelocate(PVM pVM);
2663void pgmR3PoolReset(PVM pVM);
2664
2665#endif /* IN_RING3 */
2666#ifdef IN_GC
2667void *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
2668#endif
2669int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage);
2670PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
2671void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
2672void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
2673int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2674void pgmPoolFlushAll(PVM pVM);
2675void pgmPoolClearAll(PVM pVM);
2676int pgmPoolSyncCR3(PVM pVM);
2677void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
2678void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
2679int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
2680PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
2681void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
2682void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
2683uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
2684void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
2685#ifdef PGMPOOL_WITH_MONITORING
2686# ifdef IN_RING3
2687void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu);
2688# else
2689void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu);
2690# endif
2691int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2692void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2693void pgmPoolMonitorModifiedClearAll(PVM pVM);
2694int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
2695int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
2696#endif
2697
2698__END_DECLS
2699
2700
2701/**
2702 * Gets the PGMRAMRANGE structure for a guest page.
2703 *
2704 * @returns Pointer to the RAM range on success.
2705 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2706 *
2707 * @param pPGM PGM handle.
2708 * @param GCPhys The GC physical address.
2709 */
2710DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
2711{
2712 /*
2713 * Optimize for the first range.
2714 */
2715 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2716 RTGCPHYS off = GCPhys - pRam->GCPhys;
2717 if (RT_UNLIKELY(off >= pRam->cb))
2718 {
2719 do
2720 {
2721 pRam = pRam->CTX_SUFF(pNext);
2722 if (RT_UNLIKELY(!pRam))
2723 break;
2724 off = GCPhys - pRam->GCPhys;
2725 } while (off >= pRam->cb);
2726 }
2727 return pRam;
2728}
2729
2730
2731/**
2732 * Gets the PGMPAGE structure for a guest page.
2733 *
2734 * @returns Pointer to the page on success.
2735 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2736 *
2737 * @param pPGM PGM handle.
2738 * @param GCPhys The GC physical address.
2739 */
2740DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
2741{
2742 /*
2743 * Optimize for the first range.
2744 */
2745 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2746 RTGCPHYS off = GCPhys - pRam->GCPhys;
2747 if (RT_UNLIKELY(off >= pRam->cb))
2748 {
2749 do
2750 {
2751 pRam = pRam->CTX_SUFF(pNext);
2752 if (RT_UNLIKELY(!pRam))
2753 return NULL;
2754 off = GCPhys - pRam->GCPhys;
2755 } while (off >= pRam->cb);
2756 }
2757 return &pRam->aPages[off >> PAGE_SHIFT];
2758}
2759
2760
2761/**
2762 * Gets the PGMPAGE structure for a guest page.
2763 *
2764 * Old Phys code: Will make sure the page is present.
2765 *
2766 * @returns VBox status code.
2767 * @retval VINF_SUCCESS and a valid *ppPage on success.
2768 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
2769 *
2770 * @param pPGM PGM handle.
2771 * @param GCPhys The GC physical address.
2772 * @param ppPage Where to store the page poitner on success.
2773 */
2774DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
2775{
2776 /*
2777 * Optimize for the first range.
2778 */
2779 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2780 RTGCPHYS off = GCPhys - pRam->GCPhys;
2781 if (RT_UNLIKELY(off >= pRam->cb))
2782 {
2783 do
2784 {
2785 pRam = pRam->CTX_SUFF(pNext);
2786 if (RT_UNLIKELY(!pRam))
2787 {
2788 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
2789 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2790 }
2791 off = GCPhys - pRam->GCPhys;
2792 } while (off >= pRam->cb);
2793 }
2794 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2795#ifndef VBOX_WITH_NEW_PHYS_CODE
2796
2797 /*
2798 * Make sure it's present.
2799 */
2800 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2801 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2802 {
2803#ifdef IN_RING3
2804 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2805#else
2806 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2807#endif
2808 if (VBOX_FAILURE(rc))
2809 {
2810 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
2811 return rc;
2812 }
2813 Assert(rc == VINF_SUCCESS);
2814 }
2815#endif
2816 return VINF_SUCCESS;
2817}
2818
2819
2820
2821
2822/**
2823 * Gets the PGMPAGE structure for a guest page.
2824 *
2825 * Old Phys code: Will make sure the page is present.
2826 *
2827 * @returns VBox status code.
2828 * @retval VINF_SUCCESS and a valid *ppPage on success.
2829 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
2830 *
2831 * @param pPGM PGM handle.
2832 * @param GCPhys The GC physical address.
2833 * @param ppPage Where to store the page poitner on success.
2834 * @param ppRamHint Where to read and store the ram list hint.
2835 * The caller initializes this to NULL before the call.
2836 */
2837DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
2838{
2839 RTGCPHYS off;
2840 PPGMRAMRANGE pRam = *ppRamHint;
2841 if ( !pRam
2842 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
2843 {
2844 pRam = pPGM->CTX_SUFF(pRamRanges);
2845 off = GCPhys - pRam->GCPhys;
2846 if (RT_UNLIKELY(off >= pRam->cb))
2847 {
2848 do
2849 {
2850 pRam = pRam->CTX_SUFF(pNext);
2851 if (RT_UNLIKELY(!pRam))
2852 {
2853 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
2854 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2855 }
2856 off = GCPhys - pRam->GCPhys;
2857 } while (off >= pRam->cb);
2858 }
2859 *ppRamHint = pRam;
2860 }
2861 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2862#ifndef VBOX_WITH_NEW_PHYS_CODE
2863
2864 /*
2865 * Make sure it's present.
2866 */
2867 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2868 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2869 {
2870#ifdef IN_RING3
2871 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2872#else
2873 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2874#endif
2875 if (VBOX_FAILURE(rc))
2876 {
2877 *ppPage = NULL; /* Shut up annoying smart ass. */
2878 return rc;
2879 }
2880 Assert(rc == VINF_SUCCESS);
2881 }
2882#endif
2883 return VINF_SUCCESS;
2884}
2885
2886
2887/**
2888 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2889 *
2890 * @returns Pointer to the page on success.
2891 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2892 *
2893 * @param pPGM PGM handle.
2894 * @param GCPhys The GC physical address.
2895 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
2896 */
2897DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
2898{
2899 /*
2900 * Optimize for the first range.
2901 */
2902 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2903 RTGCPHYS off = GCPhys - pRam->GCPhys;
2904 if (RT_UNLIKELY(off >= pRam->cb))
2905 {
2906 do
2907 {
2908 pRam = pRam->CTX_SUFF(pNext);
2909 if (RT_UNLIKELY(!pRam))
2910 return NULL;
2911 off = GCPhys - pRam->GCPhys;
2912 } while (off >= pRam->cb);
2913 }
2914 *ppRam = pRam;
2915 return &pRam->aPages[off >> PAGE_SHIFT];
2916}
2917
2918
2919
2920
2921/**
2922 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2923 *
2924 * @returns Pointer to the page on success.
2925 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2926 *
2927 * @param pPGM PGM handle.
2928 * @param GCPhys The GC physical address.
2929 * @param ppPage Where to store the pointer to the PGMPAGE structure.
2930 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
2931 */
2932DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
2933{
2934 /*
2935 * Optimize for the first range.
2936 */
2937 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
2938 RTGCPHYS off = GCPhys - pRam->GCPhys;
2939 if (RT_UNLIKELY(off >= pRam->cb))
2940 {
2941 do
2942 {
2943 pRam = pRam->CTX_SUFF(pNext);
2944 if (RT_UNLIKELY(!pRam))
2945 {
2946 *ppRam = NULL; /* Shut up silly GCC warnings. */
2947 *ppPage = NULL; /* ditto */
2948 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2949 }
2950 off = GCPhys - pRam->GCPhys;
2951 } while (off >= pRam->cb);
2952 }
2953 *ppRam = pRam;
2954 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2955#ifndef VBOX_WITH_NEW_PHYS_CODE
2956
2957 /*
2958 * Make sure it's present.
2959 */
2960 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2961 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2962 {
2963#ifdef IN_RING3
2964 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2965#else
2966 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2967#endif
2968 if (VBOX_FAILURE(rc))
2969 {
2970 *ppPage = NULL; /* Shut up silly GCC warnings. */
2971 *ppPage = NULL; /* ditto */
2972 return rc;
2973 }
2974 Assert(rc == VINF_SUCCESS);
2975
2976 }
2977#endif
2978 return VINF_SUCCESS;
2979}
2980
2981
2982/**
2983 * Convert GC Phys to HC Phys.
2984 *
2985 * @returns VBox status.
2986 * @param pPGM PGM handle.
2987 * @param GCPhys The GC physical address.
2988 * @param pHCPhys Where to store the corresponding HC physical address.
2989 *
2990 * @deprecated Doesn't deal with zero, shared or write monitored pages.
2991 * Avoid when writing new code!
2992 */
2993DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
2994{
2995 PPGMPAGE pPage;
2996 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
2997 if (VBOX_FAILURE(rc))
2998 return rc;
2999 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
3000 return VINF_SUCCESS;
3001}
3002
3003
3004#ifndef IN_GC
3005/**
3006 * Queries the Physical TLB entry for a physical guest page,
3007 * attemting to load the TLB entry if necessary.
3008 *
3009 * @returns VBox status code.
3010 * @retval VINF_SUCCESS on success
3011 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3012 * @param pPGM The PGM instance handle.
3013 * @param GCPhys The address of the guest page.
3014 * @param ppTlbe Where to store the pointer to the TLB entry.
3015 */
3016
3017DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
3018{
3019 int rc;
3020 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3021 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
3022 {
3023 STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbHits));
3024 rc = VINF_SUCCESS;
3025 }
3026 else
3027 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
3028 *ppTlbe = pTlbe;
3029 return rc;
3030}
3031#endif /* !IN_GC */
3032
3033#if !defined(IN_GC) /** @todo && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) */
3034
3035# ifndef VBOX_WITH_NEW_PHYS_CODE
3036/**
3037 * Convert GC Phys to HC Virt.
3038 *
3039 * @returns VBox status.
3040 * @param pPGM PGM handle.
3041 * @param GCPhys The GC physical address.
3042 * @param pHCPtr Where to store the corresponding HC virtual address.
3043 *
3044 * @deprecated This will be eliminated by PGMPhysGCPhys2CCPtr.
3045 */
3046DECLINLINE(int) pgmRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
3047{
3048 PPGMRAMRANGE pRam;
3049 PPGMPAGE pPage;
3050 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3051 if (VBOX_FAILURE(rc))
3052 {
3053 *pHCPtr = 0; /* Shut up silly GCC warnings. */
3054 return rc;
3055 }
3056 RTGCPHYS off = GCPhys - pRam->GCPhys;
3057
3058 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3059 {
3060 unsigned iChunk = off >> PGM_DYNAMIC_CHUNK_SHIFT;
3061 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3062 return VINF_SUCCESS;
3063 }
3064 if (pRam->pvR3)
3065 {
3066 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off); /** @todo @bugref{1865,3202}: Code is converting R3 pointer and maybe using it in R0! */
3067 return VINF_SUCCESS;
3068 }
3069 *pHCPtr = 0; /* Shut up silly GCC warnings. */
3070 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3071}
3072# endif /* !VBOX_WITH_NEW_PHYS_CODE */
3073
3074
3075/**
3076 * Convert GC Phys to HC Virt.
3077 *
3078 * @returns VBox status.
3079 * @param PVM VM handle.
3080 * @param pRam Ram range
3081 * @param GCPhys The GC physical address.
3082 * @param pHCPtr Where to store the corresponding HC virtual address.
3083 *
3084 * @deprecated This will be eliminated. Don't use it.
3085 */
3086DECLINLINE(int) pgmRamGCPhys2HCPtrWithRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
3087{
3088 RTGCPHYS off = GCPhys - pRam->GCPhys;
3089 Assert(off < pRam->cb);
3090
3091 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3092 {
3093 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3094 /* Physical chunk in dynamically allocated range not present? */
3095 if (RT_UNLIKELY(!pRam->paChunkR3Ptrs[idx]))
3096 {
3097#ifdef IN_RING3
3098 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
3099#else
3100 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3101#endif
3102 if (rc != VINF_SUCCESS)
3103 {
3104 *pHCPtr = 0; /* GCC crap */
3105 return rc;
3106 }
3107 }
3108 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3109 return VINF_SUCCESS;
3110 }
3111 if (pRam->pvR3)
3112 {
3113 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off); /** @todo @bugref{1865,3202}: Code is converting R3 pointer and maybe using it in R0! */
3114 return VINF_SUCCESS;
3115 }
3116 *pHCPtr = 0; /* GCC crap */
3117 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3118}
3119
3120#endif /* !IN_GC && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) */
3121
3122/**
3123 * Convert GC Phys to HC Virt and HC Phys.
3124 *
3125 * @returns VBox status.
3126 * @param pPGM PGM handle.
3127 * @param GCPhys The GC physical address.
3128 * @param pHCPtr Where to store the corresponding HC virtual address.
3129 * @param pHCPhys Where to store the HC Physical address and its flags.
3130 *
3131 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
3132 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
3133 */
3134DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
3135{
3136 PPGMRAMRANGE pRam;
3137 PPGMPAGE pPage;
3138 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3139 if (VBOX_FAILURE(rc))
3140 {
3141 *pHCPtr = 0; /* Shut up crappy GCC warnings */
3142 *pHCPhys = 0; /* ditto */
3143 return rc;
3144 }
3145 RTGCPHYS off = GCPhys - pRam->GCPhys;
3146
3147 *pHCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
3148 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3149 {
3150 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3151#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* ASSUMES only MapCR3 usage. */
3152 PRTR3UINTPTR paChunkR3Ptrs = (PRTR3UINTPTR)MMHyperR3ToCC(PGM2VM(pPGM), pRam->paChunkR3Ptrs);
3153 *pHCPtr = paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK);
3154#else
3155 *pHCPtr = (RTHCPTR)(pRam->paChunkR3Ptrs[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3156#endif
3157 return VINF_SUCCESS;
3158 }
3159 if (pRam->pvR3)
3160 {
3161 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvR3 + off); /** @todo @bugref{1865,3202}: Code is converting R3 pointer and maybe using it in R0! */
3162 return VINF_SUCCESS;
3163 }
3164 *pHCPtr = 0;
3165 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3166}
3167
3168
3169/**
3170 * Clears flags associated with a RAM address.
3171 *
3172 * @returns VBox status code.
3173 * @param pPGM PGM handle.
3174 * @param GCPhys Guest context physical address.
3175 * @param fFlags fFlags to clear. (Bits 0-11.)
3176 */
3177DECLINLINE(int) pgmRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3178{
3179 PPGMPAGE pPage;
3180 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3181 if (VBOX_FAILURE(rc))
3182 return rc;
3183
3184 fFlags &= ~X86_PTE_PAE_PG_MASK;
3185 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3186 return VINF_SUCCESS;
3187}
3188
3189
3190/**
3191 * Clears flags associated with a RAM address.
3192 *
3193 * @returns VBox status code.
3194 * @param pPGM PGM handle.
3195 * @param GCPhys Guest context physical address.
3196 * @param fFlags fFlags to clear. (Bits 0-11.)
3197 * @param ppRamHint Where to read and store the ram list hint.
3198 * The caller initializes this to NULL before the call.
3199 */
3200DECLINLINE(int) pgmRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3201{
3202 PPGMPAGE pPage;
3203 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3204 if (VBOX_FAILURE(rc))
3205 return rc;
3206
3207 fFlags &= ~X86_PTE_PAE_PG_MASK;
3208 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3209 return VINF_SUCCESS;
3210}
3211
3212/**
3213 * Sets (bitwise OR) flags associated with a RAM address.
3214 *
3215 * @returns VBox status code.
3216 * @param pPGM PGM handle.
3217 * @param GCPhys Guest context physical address.
3218 * @param fFlags fFlags to set clear. (Bits 0-11.)
3219 */
3220DECLINLINE(int) pgmRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3221{
3222 PPGMPAGE pPage;
3223 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3224 if (VBOX_FAILURE(rc))
3225 return rc;
3226
3227 fFlags &= ~X86_PTE_PAE_PG_MASK;
3228 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3229 return VINF_SUCCESS;
3230}
3231
3232
3233/**
3234 * Sets (bitwise OR) flags associated with a RAM address.
3235 *
3236 * @returns VBox status code.
3237 * @param pPGM PGM handle.
3238 * @param GCPhys Guest context physical address.
3239 * @param fFlags fFlags to set clear. (Bits 0-11.)
3240 * @param ppRamHint Where to read and store the ram list hint.
3241 * The caller initializes this to NULL before the call.
3242 */
3243DECLINLINE(int) pgmRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3244{
3245 PPGMPAGE pPage;
3246 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3247 if (VBOX_FAILURE(rc))
3248 return rc;
3249
3250 fFlags &= ~X86_PTE_PAE_PG_MASK;
3251 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3252 return VINF_SUCCESS;
3253}
3254
3255/**
3256 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
3257 * Takes PSE-36 into account.
3258 *
3259 * @returns guest physical address
3260 * @param pPGM Pointer to the PGM instance data.
3261 * @param Pde Guest Pde
3262 */
3263DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
3264{
3265 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
3266 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
3267
3268 return GCPhys & pPGM->GCPhys4MBPSEMask;
3269}
3270
3271/**
3272 * Gets the page directory for the specified address.
3273 *
3274 * @returns Pointer to the page directory in question.
3275 * @returns NULL if the page directory is not present or on an invalid page.
3276 * @param pPGM Pointer to the PGM instance data.
3277 * @param GCPtr The address.
3278 */
3279DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr)
3280{
3281 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3282 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3283 {
3284 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3285 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt];
3286
3287 /* cache is out-of-sync. */
3288 PX86PDPAE pPD;
3289 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3290 if (VBOX_SUCCESS(rc))
3291 return pPD;
3292 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3293 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3294 }
3295 return NULL;
3296}
3297
3298
3299/**
3300 * Gets the page directory entry for the specified address.
3301 *
3302 * @returns Pointer to the page directory entry in question.
3303 * @returns NULL if the page directory is not present or on an invalid page.
3304 * @param pPGM Pointer to the PGM instance data.
3305 * @param GCPtr The address.
3306 */
3307DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr)
3308{
3309 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3310 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3311 {
3312 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3313 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3314 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD];
3315
3316 /* The cache is out-of-sync. */
3317 PX86PDPAE pPD;
3318 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3319 if (VBOX_SUCCESS(rc))
3320 return &pPD->a[iPD];
3321 AssertMsgFailed(("Impossible! rc=%Vrc PDPE=%RX64\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3322 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */
3323 }
3324 return NULL;
3325}
3326
3327
3328/**
3329 * Gets the page directory entry for the specified address.
3330 *
3331 * @returns The page directory entry in question.
3332 * @returns A non-present entry if the page directory is not present or on an invalid page.
3333 * @param pPGM Pointer to the PGM instance data.
3334 * @param GCPtr The address.
3335 */
3336DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr)
3337{
3338 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3339 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3340 {
3341 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3342 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3343 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD].u;
3344
3345 /* cache is out-of-sync. */
3346 PX86PDPAE pPD;
3347 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3348 if (VBOX_SUCCESS(rc))
3349 return pPD->a[iPD].u;
3350 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3351 }
3352 return 0ULL;
3353}
3354
3355
3356/**
3357 * Gets the page directory pointer table entry for the specified address
3358 * and returns the index into the page directory
3359 *
3360 * @returns Pointer to the page directory in question.
3361 * @returns NULL if the page directory is not present or on an invalid page.
3362 * @param pPGM Pointer to the PGM instance data.
3363 * @param GCPtr The address.
3364 * @param piPD Receives the index into the returned page directory
3365 */
3366DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCUINTPTR GCPtr, unsigned *piPD)
3367{
3368 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3369 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3370 {
3371 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3372 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3373 {
3374 *piPD = iPD;
3375 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt];
3376 }
3377
3378 /* cache is out-of-sync. */
3379 PX86PDPAE pPD;
3380 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3381 if (VBOX_SUCCESS(rc))
3382 {
3383 *piPD = iPD;
3384 return pPD;
3385 }
3386 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3387 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3388 }
3389 return NULL;
3390}
3391
3392#ifndef IN_GC
3393/**
3394 * Gets the page directory pointer entry for the specified address.
3395 *
3396 * @returns Pointer to the page directory pointer entry in question.
3397 * @returns NULL if the page directory is not present or on an invalid page.
3398 * @param pPGM Pointer to the PGM instance data.
3399 * @param GCPtr The address.
3400 * @param ppPml4e Page Map Level-4 Entry (out)
3401 */
3402DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e)
3403{
3404 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3405
3406 Assert(pPGM->pGstPaePML4HC);
3407 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3408 if ((*ppPml4e)->n.u1Present)
3409 {
3410 PX86PDPT pPdpt;
3411 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdpt);
3412 if (VBOX_FAILURE(rc))
3413 {
3414 AssertFailed();
3415 return NULL;
3416 }
3417 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3418 return &pPdpt->a[iPdPt];
3419 }
3420 return NULL;
3421}
3422
3423/**
3424 * Gets the page directory entry for the specified address.
3425 *
3426 * @returns The page directory entry in question.
3427 * @returns A non-present entry if the page directory is not present or on an invalid page.
3428 * @param pPGM Pointer to the PGM instance data.
3429 * @param GCPtr The address.
3430 * @param ppPml4e Page Map Level-4 Entry (out)
3431 * @param pPdpe Page directory pointer table entry (out)
3432 */
3433DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
3434{
3435 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3436
3437 Assert(pPGM->pGstPaePML4HC);
3438 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3439 if ((*ppPml4e)->n.u1Present)
3440 {
3441 PX86PDPT pPdptTemp;
3442 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp);
3443 if (VBOX_FAILURE(rc))
3444 {
3445 AssertFailed();
3446 return 0ULL;
3447 }
3448
3449 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3450 *pPdpe = pPdptTemp->a[iPdPt];
3451 if (pPdpe->n.u1Present)
3452 {
3453 PX86PDPAE pPD;
3454
3455 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD);
3456 if (VBOX_FAILURE(rc))
3457 {
3458 AssertFailed();
3459 return 0ULL;
3460 }
3461 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3462 return pPD->a[iPD].u;
3463 }
3464 }
3465 return 0ULL;
3466}
3467
3468/**
3469 * Gets the page directory entry for the specified address.
3470 *
3471 * @returns The page directory entry in question.
3472 * @returns A non-present entry if the page directory is not present or on an invalid page.
3473 * @param pPGM Pointer to the PGM instance data.
3474 * @param GCPtr The address.
3475 */
3476DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr)
3477{
3478 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3479
3480 Assert(pPGM->pGstPaePML4HC);
3481 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present)
3482 {
3483 PX86PDPT pPdptTemp;
3484 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp);
3485 if (VBOX_FAILURE(rc))
3486 {
3487 AssertFailed();
3488 return 0ULL;
3489 }
3490
3491 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3492 if (pPdptTemp->a[iPdPt].n.u1Present)
3493 {
3494 PX86PDPAE pPD;
3495
3496 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3497 if (VBOX_FAILURE(rc))
3498 {
3499 AssertFailed();
3500 return 0ULL;
3501 }
3502 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3503 return pPD->a[iPD].u;
3504 }
3505 }
3506 return 0ULL;
3507}
3508
3509/**
3510 * Gets the page directory entry for the specified address.
3511 *
3512 * @returns Pointer to the page directory entry in question.
3513 * @returns NULL if the page directory is not present or on an invalid page.
3514 * @param pPGM Pointer to the PGM instance data.
3515 * @param GCPtr The address.
3516 */
3517DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr)
3518{
3519 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3520
3521 Assert(pPGM->pGstPaePML4HC);
3522 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present)
3523 {
3524 PX86PDPT pPdptTemp;
3525 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp);
3526 if (VBOX_FAILURE(rc))
3527 {
3528 AssertFailed();
3529 return NULL;
3530 }
3531
3532 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3533 if (pPdptTemp->a[iPdPt].n.u1Present)
3534 {
3535 PX86PDPAE pPD;
3536
3537 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3538 if (VBOX_FAILURE(rc))
3539 {
3540 AssertFailed();
3541 return NULL;
3542 }
3543 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3544 return &pPD->a[iPD];
3545 }
3546 }
3547 return NULL;
3548}
3549
3550
3551/**
3552 * Gets the GUEST page directory pointer for the specified address.
3553 *
3554 * @returns The page directory in question.
3555 * @returns NULL if the page directory is not present or on an invalid page.
3556 * @param pPGM Pointer to the PGM instance data.
3557 * @param GCPtr The address.
3558 * @param ppPml4e Page Map Level-4 Entry (out)
3559 * @param pPdpe Page directory pointer table entry (out)
3560 * @param piPD Receives the index into the returned page directory
3561 */
3562DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
3563{
3564 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3565
3566 Assert(pPGM->pGstPaePML4HC);
3567 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3568 if ((*ppPml4e)->n.u1Present)
3569 {
3570 PX86PDPT pPdptTemp;
3571 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp);
3572 if (VBOX_FAILURE(rc))
3573 {
3574 AssertFailed();
3575 return 0ULL;
3576 }
3577
3578 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3579 *pPdpe = pPdptTemp->a[iPdPt];
3580 if (pPdpe->n.u1Present)
3581 {
3582 PX86PDPAE pPD;
3583
3584 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD);
3585 if (VBOX_FAILURE(rc))
3586 {
3587 AssertFailed();
3588 return 0ULL;
3589 }
3590 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3591 return pPD;
3592 }
3593 }
3594 return 0ULL;
3595}
3596
3597/**
3598 * Gets the GUEST page directory pointer for the specified address.
3599 *
3600 * @returns The page directory in question.
3601 * @returns NULL if the page directory is not present or on an invalid page.
3602 * @param pPGM Pointer to the PGM instance data.
3603 * @param GCPtr The address.
3604 * @param piPD Receives the index into the returned page directory
3605 */
3606DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, unsigned *piPD)
3607{
3608 PX86PML4E pPml4e;
3609 PX86PDPE pPdpe;
3610 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3611
3612 Assert(pPGM->pGstPaePML4HC);
3613 pPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3614 if (pPml4e->n.u1Present)
3615 {
3616 PX86PDPT pPdptTemp;
3617 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
3618 if (VBOX_FAILURE(rc))
3619 {
3620 AssertFailed();
3621 return 0ULL;
3622 }
3623
3624 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3625 pPdpe = &pPdptTemp->a[iPdPt];
3626 if (pPdpe->n.u1Present)
3627 {
3628 PX86PDPAE pPD;
3629
3630 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD);
3631 if (VBOX_FAILURE(rc))
3632 {
3633 AssertFailed();
3634 return 0ULL;
3635 }
3636 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3637 return pPD;
3638 }
3639 }
3640 return 0ULL;
3641}
3642
3643#endif /* !IN_GC */
3644
3645/**
3646 * Checks if any of the specified page flags are set for the given page.
3647 *
3648 * @returns true if any of the flags are set.
3649 * @returns false if all the flags are clear.
3650 * @param pPGM PGM handle.
3651 * @param GCPhys The GC physical address.
3652 * @param fFlags The flags to check for.
3653 */
3654DECLINLINE(bool) pgmRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
3655{
3656 PPGMPAGE pPage = pgmPhysGetPage(pPGM, GCPhys);
3657 return pPage
3658 && (pPage->HCPhys & fFlags) != 0; /** @todo PAGE FLAGS */
3659}
3660
3661
3662/**
3663 * Gets the page state for a physical handler.
3664 *
3665 * @returns The physical handler page state.
3666 * @param pCur The physical handler in question.
3667 */
3668DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
3669{
3670 switch (pCur->enmType)
3671 {
3672 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
3673 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
3674
3675 case PGMPHYSHANDLERTYPE_MMIO:
3676 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
3677 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
3678
3679 default:
3680 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
3681 }
3682}
3683
3684
3685/**
3686 * Gets the page state for a virtual handler.
3687 *
3688 * @returns The virtual handler page state.
3689 * @param pCur The virtual handler in question.
3690 * @remarks This should never be used on a hypervisor access handler.
3691 */
3692DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
3693{
3694 switch (pCur->enmType)
3695 {
3696 case PGMVIRTHANDLERTYPE_WRITE:
3697 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
3698 case PGMVIRTHANDLERTYPE_ALL:
3699 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
3700 default:
3701 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
3702 }
3703}
3704
3705
3706/**
3707 * Clears one physical page of a virtual handler
3708 *
3709 * @param pPGM Pointer to the PGM instance.
3710 * @param pCur Virtual handler structure
3711 * @param iPage Physical page index
3712 *
3713 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
3714 * need to care about other handlers in the same page.
3715 */
3716DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
3717{
3718 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
3719
3720 /*
3721 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
3722 */
3723#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3724 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
3725 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3726 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
3727#endif
3728 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
3729 {
3730 /* We're the head of the alias chain. */
3731 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
3732#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3733 AssertReleaseMsg(pRemove != NULL,
3734 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3735 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
3736 AssertReleaseMsg(pRemove == pPhys2Virt,
3737 ("wanted: pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3738 " got: pRemove=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3739 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
3740 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
3741#endif
3742 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
3743 {
3744 /* Insert the next list in the alias chain into the tree. */
3745 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3746#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3747 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
3748 ("pNext=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3749 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
3750#endif
3751 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
3752 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
3753 AssertRelease(fRc);
3754 }
3755 }
3756 else
3757 {
3758 /* Locate the previous node in the alias chain. */
3759 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
3760#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3761 AssertReleaseMsg(pPrev != pPhys2Virt,
3762 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
3763 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
3764#endif
3765 for (;;)
3766 {
3767 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3768 if (pNext == pPhys2Virt)
3769 {
3770 /* unlink. */
3771 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%VGp-%VGp]\n",
3772 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
3773 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
3774 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
3775 else
3776 {
3777 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3778 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
3779 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
3780 }
3781 break;
3782 }
3783
3784 /* next */
3785 if (pNext == pPrev)
3786 {
3787#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3788 AssertReleaseMsg(pNext != pPrev,
3789 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
3790 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
3791#endif
3792 break;
3793 }
3794 pPrev = pNext;
3795 }
3796 }
3797 Log2(("PHYS2VIRT: Removing %VGp-%VGp %#RX32 %s\n",
3798 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
3799 pPhys2Virt->offNextAlias = 0;
3800 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
3801
3802 /*
3803 * Clear the ram flags for this page.
3804 */
3805 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
3806 AssertReturnVoid(pPage);
3807 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
3808}
3809
3810
3811/**
3812 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
3813 *
3814 * @returns Pointer to the shadow page structure.
3815 * @param pPool The pool.
3816 * @param HCPhys The HC physical address of the shadow page.
3817 */
3818DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
3819{
3820 /*
3821 * Look up the page.
3822 */
3823 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
3824 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%VHp pPage=%p type=%d\n", HCPhys, pPage, (pPage) ? pPage->enmKind : 0));
3825 return pPage;
3826}
3827
3828
3829/**
3830 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
3831 *
3832 * @returns Pointer to the shadow page structure.
3833 * @param pPool The pool.
3834 * @param idx The pool page index.
3835 */
3836DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
3837{
3838 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
3839 return &pPool->aPages[idx];
3840}
3841
3842
3843#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
3844/**
3845 * Clear references to guest physical memory.
3846 *
3847 * @param pPool The pool.
3848 * @param pPoolPage The pool page.
3849 * @param pPhysPage The physical guest page tracking structure.
3850 */
3851DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
3852{
3853 /*
3854 * Just deal with the simple case here.
3855 */
3856#ifdef LOG_ENABLED
3857 const RTHCPHYS HCPhysOrg = pPhysPage->HCPhys; /** @todo PAGE FLAGS */
3858#endif
3859 const unsigned cRefs = pPhysPage->HCPhys >> MM_RAM_FLAGS_CREFS_SHIFT; /** @todo PAGE FLAGS */
3860 if (cRefs == 1)
3861 {
3862 Assert(pPoolPage->idx == ((pPhysPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK));
3863 pPhysPage->HCPhys = pPhysPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK;
3864 }
3865 else
3866 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
3867 LogFlow(("pgmTrackDerefGCPhys: HCPhys=%RHp -> %RHp\n", HCPhysOrg, pPhysPage->HCPhys));
3868}
3869#endif
3870
3871
3872#ifdef PGMPOOL_WITH_CACHE
3873/**
3874 * Moves the page to the head of the age list.
3875 *
3876 * This is done when the cached page is used in one way or another.
3877 *
3878 * @param pPool The pool.
3879 * @param pPage The cached page.
3880 * @todo inline in PGMInternal.h!
3881 */
3882DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
3883{
3884 /*
3885 * Move to the head of the age list.
3886 */
3887 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
3888 {
3889 /* unlink */
3890 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
3891 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
3892 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
3893 else
3894 pPool->iAgeTail = pPage->iAgePrev;
3895
3896 /* insert at head */
3897 pPage->iAgePrev = NIL_PGMPOOL_IDX;
3898 pPage->iAgeNext = pPool->iAgeHead;
3899 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
3900 pPool->iAgeHead = pPage->idx;
3901 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
3902 }
3903}
3904#endif /* PGMPOOL_WITH_CACHE */
3905
3906/**
3907 * Tells if mappings are to be put into the shadow page table or not
3908 *
3909 * @returns boolean result
3910 * @param pVM VM handle.
3911 */
3912
3913DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
3914{
3915#ifdef IN_RING0
3916 /* There are no mappings in VT-x and AMD-V mode. */
3917 Assert(pPGM->fDisableMappings);
3918 return false;
3919#else
3920 return !pPGM->fDisableMappings;
3921#endif
3922}
3923
3924/** @} */
3925
3926#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette