VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 8842

Last change on this file since 8842 was 8659, checked in by vboxsync, 17 years ago

Updates for 64 bits paging.
Removed conditional dirty and accessed bits syncing. Doesn't make sense not to do this.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 149.8 KB
Line 
1/* $Id: PGMInternal.h 8659 2008-05-07 14:39:41Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <iprt/avl.h>
39#include <iprt/assert.h>
40#include <iprt/critsect.h>
41
42#if !defined(IN_PGM_R3) && !defined(IN_PGM_R0) && !defined(IN_PGM_GC)
43# error "Not in PGM! This is an internal header!"
44#endif
45
46
47/** @defgroup grp_pgm_int Internals
48 * @ingroup grp_pgm
49 * @internal
50 * @{
51 */
52
53
54/** @name PGM Compile Time Config
55 * @{
56 */
57
58/**
59 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
60 * Comment it if it will break something.
61 */
62#define PGM_OUT_OF_SYNC_IN_GC
63
64/**
65 * Check and skip global PDEs for non-global flushes
66 */
67#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
68
69/**
70 * Sync N pages instead of a whole page table
71 */
72#define PGM_SYNC_N_PAGES
73
74/**
75 * Number of pages to sync during a page fault
76 *
77 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
78 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
79 */
80#define PGM_SYNC_NR_PAGES 8
81
82/**
83 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
84 */
85#define PGM_MAX_PHYSCACHE_ENTRIES 64
86#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
87
88/**
89 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
90 */
91#define PGM_PHYSMEMACCESS_CACHING
92
93/** @def PGMPOOL_WITH_CACHE
94 * Enable agressive caching using the page pool.
95 *
96 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
97 */
98#define PGMPOOL_WITH_CACHE
99
100/** @def PGMPOOL_WITH_MIXED_PT_CR3
101 * When defined, we'll deal with 'uncachable' pages.
102 */
103#ifdef PGMPOOL_WITH_CACHE
104# define PGMPOOL_WITH_MIXED_PT_CR3
105#endif
106
107/** @def PGMPOOL_WITH_MONITORING
108 * Monitor the guest pages which are shadowed.
109 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
110 * be enabled as well.
111 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
112 */
113#ifdef PGMPOOL_WITH_CACHE
114# define PGMPOOL_WITH_MONITORING
115#endif
116
117/** @def PGMPOOL_WITH_GCPHYS_TRACKING
118 * Tracking the of shadow pages mapping guest physical pages.
119 *
120 * This is very expensive, the current cache prototype is trying to figure out
121 * whether it will be acceptable with an agressive caching policy.
122 */
123#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
124# define PGMPOOL_WITH_GCPHYS_TRACKING
125#endif
126
127/** @def PGMPOOL_WITH_USER_TRACKNG
128 * Tracking users of shadow pages. This is required for the linking of shadow page
129 * tables and physical guest addresses.
130 */
131#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
132# define PGMPOOL_WITH_USER_TRACKING
133#endif
134
135/** @def PGMPOOL_CFG_MAX_GROW
136 * The maximum number of pages to add to the pool in one go.
137 */
138#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
139
140/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
141 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
142 */
143#ifdef VBOX_STRICT
144# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
145#endif
146/** @} */
147
148
149/** @name PDPT and PML4 flags.
150 * These are placed in the three bits available for system programs in
151 * the PDPT and PML4 entries.
152 * @{ */
153/** The entry is a permanent one and it's must always be present.
154 * Never free such an entry. */
155#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
156/** Mapping (hypervisor allocated pagetable). */
157#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
158/** @} */
159
160/** @name Page directory flags.
161 * These are placed in the three bits available for system programs in
162 * the page directory entries.
163 * @{ */
164/** Mapping (hypervisor allocated pagetable). */
165#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
166/** Made read-only to facilitate dirty bit tracking. */
167#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
168/** @} */
169
170/** @name Page flags.
171 * These are placed in the three bits available for system programs in
172 * the page entries.
173 * @{ */
174/** Made read-only to facilitate dirty bit tracking. */
175#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
176
177#ifndef PGM_PTFLAGS_CSAM_VALIDATED
178/** Scanned and approved by CSAM (tm).
179 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
180 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
181#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
182#endif
183/** @} */
184
185/** @name Defines used to indicate the shadow and guest paging in the templates.
186 * @{ */
187#define PGM_TYPE_REAL 1
188#define PGM_TYPE_PROT 2
189#define PGM_TYPE_32BIT 3
190#define PGM_TYPE_PAE 4
191#define PGM_TYPE_AMD64 5
192/** @} */
193
194/** Macro for checking if the guest is using paging.
195 * @param uType PGM_TYPE_*
196 * @remark ASSUMES certain order of the PGM_TYPE_* values.
197 */
198#define PGM_WITH_PAGING(uType) ((uType) >= PGM_TYPE_32BIT)
199
200/** Macro for checking if the guest supports the NX bit.
201 * @param uType PGM_TYPE_*
202 * @remark ASSUMES certain order of the PGM_TYPE_* values.
203 */
204#define PGM_WITH_NX(uType) ((uType) >= PGM_TYPE_PAE)
205
206
207/** @def PGM_HCPHYS_2_PTR
208 * Maps a HC physical page pool address to a virtual address.
209 *
210 * @returns VBox status code.
211 * @param pVM The VM handle.
212 * @param HCPhys The HC physical address to map to a virtual one.
213 * @param ppv Where to store the virtual address. No need to cast this.
214 *
215 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
216 * small page window employeed by that function. Be careful.
217 * @remark There is no need to assert on the result.
218 */
219#ifdef IN_GC
220# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv))
221#else
222# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
223#endif
224
225/** @def PGM_GCPHYS_2_PTR
226 * Maps a GC physical page address to a virtual address.
227 *
228 * @returns VBox status code.
229 * @param pVM The VM handle.
230 * @param GCPhys The GC physical address to map to a virtual one.
231 * @param ppv Where to store the virtual address. No need to cast this.
232 *
233 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
234 * small page window employeed by that function. Be careful.
235 * @remark There is no need to assert on the result.
236 */
237#ifdef IN_GC
238# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv))
239#else
240# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
241#endif
242
243/** @def PGM_GCPHYS_2_PTR_EX
244 * Maps a unaligned GC physical page address to a virtual address.
245 *
246 * @returns VBox status code.
247 * @param pVM The VM handle.
248 * @param GCPhys The GC physical address to map to a virtual one.
249 * @param ppv Where to store the virtual address. No need to cast this.
250 *
251 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
252 * small page window employeed by that function. Be careful.
253 * @remark There is no need to assert on the result.
254 */
255#ifdef IN_GC
256# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
257#else
258# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
259#endif
260
261/** @def PGM_INVL_PG
262 * Invalidates a page when in GC does nothing in HC.
263 *
264 * @param GCVirt The virtual address of the page to invalidate.
265 */
266#ifdef IN_GC
267# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
268#else
269# define PGM_INVL_PG(GCVirt) ((void)0)
270#endif
271
272/** @def PGM_INVL_BIG_PG
273 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
274 *
275 * @param GCVirt The virtual address within the page directory to invalidate.
276 */
277#ifdef IN_GC
278# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
279#else
280# define PGM_INVL_BIG_PG(GCVirt) ((void)0)
281#endif
282
283/** @def PGM_INVL_GUEST_TLBS()
284 * Invalidates all guest TLBs.
285 */
286#ifdef IN_GC
287# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
288#else
289# define PGM_INVL_GUEST_TLBS() ((void)0)
290#endif
291
292
293/**
294 * Structure for tracking GC Mappings.
295 *
296 * This structure is used by linked list in both GC and HC.
297 */
298typedef struct PGMMAPPING
299{
300 /** Pointer to next entry. */
301 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
302 /** Pointer to next entry. */
303 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
304 /** Pointer to next entry. */
305 GCPTRTYPE(struct PGMMAPPING *) pNextGC;
306 /** Start Virtual address. */
307 RTGCUINTPTR GCPtr;
308 /** Last Virtual address (inclusive). */
309 RTGCUINTPTR GCPtrLast;
310 /** Range size (bytes). */
311 RTGCUINTPTR cb;
312 /** Pointer to relocation callback function. */
313 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
314 /** User argument to the callback. */
315 R3PTRTYPE(void *) pvUser;
316 /** Mapping description / name. For easing debugging. */
317 R3PTRTYPE(const char *) pszDesc;
318 /** Number of page tables. */
319 RTUINT cPTs;
320#if HC_ARCH_BITS != GC_ARCH_BITS
321 RTUINT uPadding0; /**< Alignment padding. */
322#endif
323 /** Array of page table mapping data. Each entry
324 * describes one page table. The array can be longer
325 * than the declared length.
326 */
327 struct
328 {
329 /** The HC physical address of the page table. */
330 RTHCPHYS HCPhysPT;
331 /** The HC physical address of the first PAE page table. */
332 RTHCPHYS HCPhysPaePT0;
333 /** The HC physical address of the second PAE page table. */
334 RTHCPHYS HCPhysPaePT1;
335 /** The HC virtual address of the 32-bit page table. */
336 R3PTRTYPE(PX86PT) pPTR3;
337 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
338 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
339 /** The GC virtual address of the 32-bit page table. */
340 GCPTRTYPE(PX86PT) pPTGC;
341 /** The GC virtual address of the two PAE page table. */
342 GCPTRTYPE(PX86PTPAE) paPaePTsGC;
343 /** The GC virtual address of the 32-bit page table. */
344 R0PTRTYPE(PX86PT) pPTR0;
345 /** The GC virtual address of the two PAE page table. */
346 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
347 } aPTs[1];
348} PGMMAPPING;
349/** Pointer to structure for tracking GC Mappings. */
350typedef struct PGMMAPPING *PPGMMAPPING;
351
352
353/**
354 * Physical page access handler structure.
355 *
356 * This is used to keep track of physical address ranges
357 * which are being monitored in some kind of way.
358 */
359typedef struct PGMPHYSHANDLER
360{
361 AVLROGCPHYSNODECORE Core;
362 /** Access type. */
363 PGMPHYSHANDLERTYPE enmType;
364 /** Number of pages to update. */
365 uint32_t cPages;
366 /** Pointer to R3 callback function. */
367 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
368 /** User argument for R3 handlers. */
369 R3PTRTYPE(void *) pvUserR3;
370 /** Pointer to R0 callback function. */
371 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
372 /** User argument for R0 handlers. */
373 R0PTRTYPE(void *) pvUserR0;
374 /** Pointer to GC callback function. */
375 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC;
376 /** User argument for GC handlers. */
377 GCPTRTYPE(void *) pvUserGC;
378 /** Description / Name. For easing debugging. */
379 R3PTRTYPE(const char *) pszDesc;
380#ifdef VBOX_WITH_STATISTICS
381 /** Profiling of this handler. */
382 STAMPROFILE Stat;
383#endif
384} PGMPHYSHANDLER;
385/** Pointer to a physical page access handler structure. */
386typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
387
388
389/**
390 * Cache node for the physical addresses covered by a virtual handler.
391 */
392typedef struct PGMPHYS2VIRTHANDLER
393{
394 /** Core node for the tree based on physical ranges. */
395 AVLROGCPHYSNODECORE Core;
396 /** Offset from this struct to the PGMVIRTHANDLER structure. */
397 int32_t offVirtHandler;
398 /** Offset of the next alias relative to this one.
399 * Bit 0 is used for indicating whether we're in the tree.
400 * Bit 1 is used for indicating that we're the head node.
401 */
402 int32_t offNextAlias;
403} PGMPHYS2VIRTHANDLER;
404/** Pointer to a phys to virtual handler structure. */
405typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
406
407/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
408 * node is in the tree. */
409#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
410/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
411 * node is in the head of an alias chain.
412 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
413#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
414/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
415#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
416
417
418/**
419 * Virtual page access handler structure.
420 *
421 * This is used to keep track of virtual address ranges
422 * which are being monitored in some kind of way.
423 */
424typedef struct PGMVIRTHANDLER
425{
426 /** Core node for the tree based on virtual ranges. */
427 AVLROGCPTRNODECORE Core;
428 /** Number of cache pages. */
429 uint32_t u32Padding;
430 /** Access type. */
431 PGMVIRTHANDLERTYPE enmType;
432 /** Number of cache pages. */
433 uint32_t cPages;
434
435/** @todo The next two members are redundant. It adds some readability though. */
436 /** Start of the range. */
437 RTGCPTR GCPtr;
438 /** End of the range (exclusive). */
439 RTGCPTR GCPtrLast;
440 /** Size of the range (in bytes). */
441 RTGCUINTPTR cb;
442 /** Pointer to the GC callback function. */
443 GCPTRTYPE(PFNPGMGCVIRTHANDLER) pfnHandlerGC;
444 /** Pointer to the HC callback function for invalidation. */
445 R3PTRTYPE(PFNPGMHCVIRTINVALIDATE) pfnInvalidateHC;
446 /** Pointer to the HC callback function. */
447 R3PTRTYPE(PFNPGMHCVIRTHANDLER) pfnHandlerHC;
448 /** Description / Name. For easing debugging. */
449 R3PTRTYPE(const char *) pszDesc;
450#ifdef VBOX_WITH_STATISTICS
451 /** Profiling of this handler. */
452 STAMPROFILE Stat;
453#endif
454 /** Array of cached physical addresses for the monitored ranged. */
455 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
456} PGMVIRTHANDLER;
457/** Pointer to a virtual page access handler structure. */
458typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
459
460
461/**
462 * Page type.
463 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
464 * @todo convert to \#defines.
465 */
466typedef enum PGMPAGETYPE
467{
468 /** The usual invalid zero entry. */
469 PGMPAGETYPE_INVALID = 0,
470 /** RAM page. (RWX) */
471 PGMPAGETYPE_RAM,
472 /** MMIO2 page. (RWX) */
473 PGMPAGETYPE_MMIO2,
474 /** Shadowed ROM. (RWX) */
475 PGMPAGETYPE_ROM_SHADOW,
476 /** ROM page. (R-X) */
477 PGMPAGETYPE_ROM,
478 /** MMIO page. (---) */
479 PGMPAGETYPE_MMIO,
480 /** End of valid entries. */
481 PGMPAGETYPE_END
482} PGMPAGETYPE;
483AssertCompile(PGMPAGETYPE_END < 7);
484
485/** @name Page type predicates.
486 * @{ */
487#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
488#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
489#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
490#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
491#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
492/** @} */
493
494
495/**
496 * A Physical Guest Page tracking structure.
497 *
498 * The format of this structure is complicated because we have to fit a lot
499 * of information into as few bits as possible. The format is also subject
500 * to change (there is one comming up soon). Which means that for we'll be
501 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
502 * accessess to the structure.
503 */
504typedef struct PGMPAGE
505{
506 /** The physical address and a whole lot of other stuff. All bits are used! */
507 RTHCPHYS HCPhys;
508 /** The page state. */
509 uint32_t u2StateX : 2;
510 /** Flag indicating that a write monitored page was written to when set. */
511 uint32_t fWrittenToX : 1;
512 /** For later. */
513 uint32_t fSomethingElse : 1;
514 /** The Page ID.
515 * @todo Merge with HCPhys once we've liberated HCPhys of its stuff.
516 * The HCPhys will be 100% static. */
517 uint32_t idPageX : 28;
518 /** The page type (PGMPAGETYPE). */
519 uint32_t u3Type : 3;
520 /** The physical handler state (PGM_PAGE_HNDL_PHYS_STATE*) */
521 uint32_t u2HandlerPhysStateX : 2;
522 /** The virtual handler state (PGM_PAGE_HNDL_VIRT_STATE*) */
523 uint32_t u2HandlerVirtStateX : 2;
524 uint32_t u29B : 25;
525} PGMPAGE;
526AssertCompileSize(PGMPAGE, 16);
527/** Pointer to a physical guest page. */
528typedef PGMPAGE *PPGMPAGE;
529/** Pointer to a const physical guest page. */
530typedef const PGMPAGE *PCPGMPAGE;
531/** Pointer to a physical guest page pointer. */
532typedef PPGMPAGE *PPPGMPAGE;
533
534
535/**
536 * Clears the page structure.
537 * @param pPage Pointer to the physical guest page tracking structure.
538 */
539#define PGM_PAGE_CLEAR(pPage) \
540 do { \
541 (pPage)->HCPhys = 0; \
542 (pPage)->u2StateX = 0; \
543 (pPage)->fWrittenToX = 0; \
544 (pPage)->fSomethingElse = 0; \
545 (pPage)->idPageX = 0; \
546 (pPage)->u3Type = 0; \
547 (pPage)->u29B = 0; \
548 } while (0)
549
550/**
551 * Initializes the page structure.
552 * @param pPage Pointer to the physical guest page tracking structure.
553 */
554#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
555 do { \
556 (pPage)->HCPhys = (_HCPhys); \
557 (pPage)->u2StateX = (_uState); \
558 (pPage)->fWrittenToX = 0; \
559 (pPage)->fSomethingElse = 0; \
560 (pPage)->idPageX = (_idPage); \
561 /*(pPage)->u3Type = (_uType); - later */ \
562 PGM_PAGE_SET_TYPE(pPage, _uType); \
563 (pPage)->u29B = 0; \
564 } while (0)
565
566/**
567 * Initializes the page structure of a ZERO page.
568 * @param pPage Pointer to the physical guest page tracking structure.
569 */
570#ifdef VBOX_WITH_NEW_PHYS_CODE
571# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
572 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
573#else
574# define PGM_PAGE_INIT_ZERO(pPage, pVM, _uType) \
575 PGM_PAGE_INIT(pPage, 0, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
576#endif
577/** Temporary hack. Replaced by PGM_PAGE_INIT_ZERO once the old code is kicked out. */
578# define PGM_PAGE_INIT_ZERO_REAL(pPage, pVM, _uType) \
579 PGM_PAGE_INIT(pPage, (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (_uType), PGM_PAGE_STATE_ZERO)
580
581
582/** @name The Page state, PGMPAGE::u2StateX.
583 * @{ */
584/** The zero page.
585 * This is a per-VM page that's never ever mapped writable. */
586#define PGM_PAGE_STATE_ZERO 0
587/** A allocated page.
588 * This is a per-VM page allocated from the page pool (or wherever
589 * we get MMIO2 pages from if the type is MMIO2).
590 */
591#define PGM_PAGE_STATE_ALLOCATED 1
592/** A allocated page that's being monitored for writes.
593 * The shadow page table mappings are read-only. When a write occurs, the
594 * fWrittenTo member is set, the page remapped as read-write and the state
595 * moved back to allocated. */
596#define PGM_PAGE_STATE_WRITE_MONITORED 2
597/** The page is shared, aka. copy-on-write.
598 * This is a page that's shared with other VMs. */
599#define PGM_PAGE_STATE_SHARED 3
600/** @} */
601
602
603/**
604 * Gets the page state.
605 * @returns page state (PGM_PAGE_STATE_*).
606 * @param pPage Pointer to the physical guest page tracking structure.
607 */
608#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->u2StateX )
609
610/**
611 * Sets the page state.
612 * @param pPage Pointer to the physical guest page tracking structure.
613 * @param _uState The new page state.
614 */
615#define PGM_PAGE_SET_STATE(pPage, _uState) \
616 do { (pPage)->u2StateX = (_uState); } while (0)
617
618
619/**
620 * Gets the host physical address of the guest page.
621 * @returns host physical address (RTHCPHYS).
622 * @param pPage Pointer to the physical guest page tracking structure.
623 */
624#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhys & UINT64_C(0x0000fffffffff000) )
625
626/**
627 * Sets the host physical address of the guest page.
628 * @param pPage Pointer to the physical guest page tracking structure.
629 * @param _HCPhys The new host physical address.
630 */
631#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
632 do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0xffff000000000fff)) \
633 | ((_HCPhys) & UINT64_C(0x0000fffffffff000)); } while (0)
634
635/**
636 * Get the Page ID.
637 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
638 * @param pPage Pointer to the physical guest page tracking structure.
639 */
640#define PGM_PAGE_GET_PAGEID(pPage) ( (pPage)->idPageX )
641/* later:
642#define PGM_PAGE_GET_PAGEID(pPage) ( ((uint32_t)(pPage)->HCPhys >> (48 - 12))
643 | ((uint32_t)(pPage)->HCPhys & 0xfff) )
644*/
645/**
646 * Sets the Page ID.
647 * @param pPage Pointer to the physical guest page tracking structure.
648 */
649#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->idPageX = (_idPage); } while (0)
650/* later:
651#define PGM_PAGE_SET_PAGEID(pPage, _idPage) do { (pPage)->HCPhys = (((pPage)->HCPhys) & UINT64_C(0x0000fffffffff000)) \
652 | ((_idPage) & 0xfff) \
653 | (((_idPage) & 0x0ffff000) << (48-12)); } while (0)
654*/
655
656/**
657 * Get the Chunk ID.
658 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
659 * @param pPage Pointer to the physical guest page tracking structure.
660 */
661#define PGM_PAGE_GET_CHUNKID(pPage) ( (pPage)->idPageX >> GMM_CHUNKID_SHIFT )
662/* later:
663#if GMM_CHUNKID_SHIFT == 12
664# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> 48) )
665#elif GMM_CHUNKID_SHIFT > 12
666# define PGM_PAGE_GET_CHUNKID(pPage) ( (uint32_t)((pPage)->HCPhys >> (48 + (GMM_CHUNKID_SHIFT - 12)) )
667#elif GMM_CHUNKID_SHIFT < 12
668# define PGM_PAGE_GET_CHUNKID(pPage) ( ( (uint32_t)((pPage)->HCPhys >> 48) << (12 - GMM_CHUNKID_SHIFT) ) \
669 | ( (uint32_t)((pPage)->HCPhys & 0xfff) >> GMM_CHUNKID_SHIFT ) )
670#else
671# error "GMM_CHUNKID_SHIFT isn't defined or something."
672#endif
673*/
674
675/**
676 * Get the index of the page within the allocaiton chunk.
677 * @returns The page index.
678 * @param pPage Pointer to the physical guest page tracking structure.
679 */
680#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (pPage)->idPageX & GMM_PAGEID_IDX_MASK )
681/* later:
682#if GMM_CHUNKID_SHIFT <= 12
683# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & GMM_PAGEID_IDX_MASK) )
684#else
685# define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhys & 0xfff) \
686 | ( (uint32_t)((pPage)->HCPhys >> 48) & (RT_BIT_32(GMM_CHUNKID_SHIFT - 12) - 1) ) )
687#endif
688*/
689
690
691/**
692 * Gets the page type.
693 * @returns The page type.
694 * @param pPage Pointer to the physical guest page tracking structure.
695 */
696#define PGM_PAGE_GET_TYPE(pPage) (pPage)->u3Type
697
698/**
699 * Sets the page type.
700 * @param pPage Pointer to the physical guest page tracking structure.
701 * @param _enmType The new page type (PGMPAGETYPE).
702 */
703#ifdef VBOX_WITH_NEW_PHYS_CODE
704#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
705 do { (pPage)->u3Type = (_enmType); } while (0)
706#else
707#define PGM_PAGE_SET_TYPE(pPage, _enmType) \
708 do { \
709 (pPage)->u3Type = (_enmType); \
710 if ((_enmType) == PGMPAGETYPE_ROM) \
711 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM; \
712 else if ((_enmType) == PGMPAGETYPE_ROM_SHADOW) \
713 (pPage)->HCPhys |= MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2; \
714 else if ((_enmType) == PGMPAGETYPE_MMIO2) \
715 (pPage)->HCPhys |= MM_RAM_FLAGS_MMIO2; \
716 } while (0)
717#endif
718
719
720/**
721 * Checks if the page is 'reserved'.
722 * @returns true/false.
723 * @param pPage Pointer to the physical guest page tracking structure.
724 */
725#define PGM_PAGE_IS_RESERVED(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_RESERVED) )
726
727/**
728 * Checks if the page is marked for MMIO.
729 * @returns true/false.
730 * @param pPage Pointer to the physical guest page tracking structure.
731 */
732#define PGM_PAGE_IS_MMIO(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_MMIO) )
733
734/**
735 * Checks if the page is backed by the ZERO page.
736 * @returns true/false.
737 * @param pPage Pointer to the physical guest page tracking structure.
738 */
739#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_ZERO )
740
741/**
742 * Checks if the page is backed by a SHARED page.
743 * @returns true/false.
744 * @param pPage Pointer to the physical guest page tracking structure.
745 */
746#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->u2StateX == PGM_PAGE_STATE_SHARED )
747
748
749/**
750 * Marks the paget as written to (for GMM change monitoring).
751 * @param pPage Pointer to the physical guest page tracking structure.
752 */
753#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 1; } while (0)
754
755/**
756 * Clears the written-to indicator.
757 * @param pPage Pointer to the physical guest page tracking structure.
758 */
759#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->fWrittenToX = 0; } while (0)
760
761/**
762 * Checks if the page was marked as written-to.
763 * @returns true/false.
764 * @param pPage Pointer to the physical guest page tracking structure.
765 */
766#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( (pPage)->fWrittenToX )
767
768
769/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateX).
770 *
771 * @remarks The values are assigned in order of priority, so we can calculate
772 * the correct state for a page with different handlers installed.
773 * @{ */
774/** No handler installed. */
775#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
776/** Monitoring is temporarily disabled. */
777#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
778/** Write access is monitored. */
779#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
780/** All access is monitored. */
781#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
782/** @} */
783
784/**
785 * Gets the physical access handler state of a page.
786 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
787 * @param pPage Pointer to the physical guest page tracking structure.
788 */
789#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) ( (pPage)->u2HandlerPhysStateX )
790
791/**
792 * Sets the physical access handler state of a page.
793 * @param pPage Pointer to the physical guest page tracking structure.
794 * @param _uState The new state value.
795 */
796#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
797 do { (pPage)->u2HandlerPhysStateX = (_uState); } while (0)
798
799/**
800 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
801 * @returns true/false
802 * @param pPage Pointer to the physical guest page tracking structure.
803 */
804#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE )
805
806/**
807 * Checks if the page has any active physical access handlers.
808 * @returns true/false
809 * @param pPage Pointer to the physical guest page tracking structure.
810 */
811#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
812
813
814/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateX).
815 *
816 * @remarks The values are assigned in order of priority, so we can calculate
817 * the correct state for a page with different handlers installed.
818 * @{ */
819/** No handler installed. */
820#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
821/* 1 is reserved so the lineup is identical with the physical ones. */
822/** Write access is monitored. */
823#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
824/** All access is monitored. */
825#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
826/** @} */
827
828/**
829 * Gets the virtual access handler state of a page.
830 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
831 * @param pPage Pointer to the physical guest page tracking structure.
832 */
833#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u2HandlerVirtStateX )
834
835/**
836 * Sets the virtual access handler state of a page.
837 * @param pPage Pointer to the physical guest page tracking structure.
838 * @param _uState The new state value.
839 */
840#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
841 do { (pPage)->u2HandlerVirtStateX = (_uState); } while (0)
842
843/**
844 * Checks if the page has any virtual access handlers.
845 * @returns true/false
846 * @param pPage Pointer to the physical guest page tracking structure.
847 */
848#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) ( (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
849
850/**
851 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
852 * virtual handlers.
853 * @returns true/false
854 * @param pPage Pointer to the physical guest page tracking structure.
855 */
856#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
857
858
859
860/**
861 * Checks if the page has any access handlers, including temporarily disabled ones.
862 * @returns true/false
863 * @param pPage Pointer to the physical guest page tracking structure.
864 */
865#define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
866 ( (pPage)->u2HandlerPhysStateX != PGM_PAGE_HNDL_PHYS_STATE_NONE \
867 || (pPage)->u2HandlerVirtStateX != PGM_PAGE_HNDL_VIRT_STATE_NONE )
868
869/**
870 * Checks if the page has any active access handlers.
871 * @returns true/false
872 * @param pPage Pointer to the physical guest page tracking structure.
873 */
874#define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
875 ( (pPage)->u2HandlerPhysStateX >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
876 || (pPage)->u2HandlerVirtStateX >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
877
878/**
879 * Checks if the page has any active access handlers catching all accesses.
880 * @returns true/false
881 * @param pPage Pointer to the physical guest page tracking structure.
882 */
883#define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
884 ( (pPage)->u2HandlerPhysStateX == PGM_PAGE_HNDL_PHYS_STATE_ALL \
885 || (pPage)->u2HandlerVirtStateX == PGM_PAGE_HNDL_VIRT_STATE_ALL )
886
887
888/**
889 * Ram range for GC Phys to HC Phys conversion.
890 *
891 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
892 * conversions too, but we'll let MM handle that for now.
893 *
894 * This structure is used by linked lists in both GC and HC.
895 */
896typedef struct PGMRAMRANGE
897{
898 /** Pointer to the next RAM range - for R3. */
899 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
900 /** Pointer to the next RAM range - for R0. */
901 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
902 /** Pointer to the next RAM range - for GC. */
903 GCPTRTYPE(struct PGMRAMRANGE *) pNextGC;
904#if GC_ARCH_BITS == 32
905 /** Pointer alignment. */
906 RTGCPTR GCPtrAlignment;
907#endif
908 /** Start of the range. Page aligned. */
909 RTGCPHYS GCPhys;
910 /** Last address in the range (inclusive). Page aligned (-1). */
911 RTGCPHYS GCPhysLast;
912 /** Size of the range. (Page aligned of course). */
913 RTGCPHYS cb;
914 /** MM_RAM_* flags */
915 uint32_t fFlags;
916#ifdef VBOX_WITH_NEW_PHYS_CODE
917 uint32_t u32Alignment; /**< alignment. */
918#else
919 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
920 GCPTRTYPE(PRTHCPTR) pavHCChunkGC;
921 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
922 R3R0PTRTYPE(PRTHCPTR) pavHCChunkHC;
923#endif
924 /** Start of the HC mapping of the range. This is only used for MMIO2. */
925 R3PTRTYPE(void *) pvHC;
926 /** The range description. */
927 R3PTRTYPE(const char *) pszDesc;
928
929 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
930#ifdef VBOX_WITH_NEW_PHYS_CODE
931 uint32_t au32Reserved[2];
932#elif HC_ARCH_BITS == 32
933 uint32_t au32Reserved[1];
934#endif
935
936 /** Array of physical guest page tracking structures. */
937 PGMPAGE aPages[1];
938} PGMRAMRANGE;
939/** Pointer to Ram range for GC Phys to HC Phys conversion. */
940typedef PGMRAMRANGE *PPGMRAMRANGE;
941
942/** Return hc ptr corresponding to the ram range and physical offset */
943#define PGMRAMRANGE_GETHCPTR(pRam, off) \
944 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[(off >> PGM_DYNAMIC_CHUNK_SHIFT)] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
945 : (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
946
947/**
948 * Per page tracking structure for ROM image.
949 *
950 * A ROM image may have a shadow page, in which case we may have
951 * two pages backing it. This structure contains the PGMPAGE for
952 * both while PGMRAMRANGE have a copy of the active one. It is
953 * important that these aren't out of sync in any regard other
954 * than page pool tracking data.
955 */
956typedef struct PGMROMPAGE
957{
958 /** The page structure for the virgin ROM page. */
959 PGMPAGE Virgin;
960 /** The page structure for the shadow RAM page. */
961 PGMPAGE Shadow;
962 /** The current protection setting. */
963 PGMROMPROT enmProt;
964 /** Pad the structure size to a multiple of 8. */
965 uint32_t u32Padding;
966} PGMROMPAGE;
967/** Pointer to a ROM page tracking structure. */
968typedef PGMROMPAGE *PPGMROMPAGE;
969
970
971/**
972 * A registered ROM image.
973 *
974 * This is needed to keep track of ROM image since they generally
975 * intrude into a PGMRAMRANGE. It also keeps track of additional
976 * info like the two page sets (read-only virgin and read-write shadow),
977 * the current state of each page.
978 *
979 * Because access handlers cannot easily be executed in a different
980 * context, the ROM ranges needs to be accessible and in all contexts.
981 */
982typedef struct PGMROMRANGE
983{
984 /** Pointer to the next range - R3. */
985 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
986 /** Pointer to the next range - R0. */
987 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
988 /** Pointer to the next range - GC. */
989 GCPTRTYPE(struct PGMROMRANGE *) pNextGC;
990#if GC_ARCH_BITS == 32
991 RTGCPTR GCPtrAlignment; /**< Pointer alignment. */
992#endif
993 /** Address of the range. */
994 RTGCPHYS GCPhys;
995 /** Address of the last byte in the range. */
996 RTGCPHYS GCPhysLast;
997 /** Size of the range. */
998 RTGCPHYS cb;
999 /** The flags (PGMPHYS_ROM_FLAG_*). */
1000 uint32_t fFlags;
1001 /**< Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1002 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 7 : 3];
1003 /** Pointer to the original bits when PGMPHYS_ROM_FLAG_PERMANENT_BINARY was specified.
1004 * This is used for strictness checks. */
1005 R3PTRTYPE(const void *) pvOriginal;
1006 /** The ROM description. */
1007 R3PTRTYPE(const char *) pszDesc;
1008 /** The per page tracking structures. */
1009 PGMROMPAGE aPages[1];
1010} PGMROMRANGE;
1011/** Pointer to a ROM range. */
1012typedef PGMROMRANGE *PPGMROMRANGE;
1013
1014
1015/**
1016 * A registered MMIO2 (= Device RAM) range.
1017 *
1018 * There are a few reason why we need to keep track of these
1019 * registrations. One of them is the deregistration & cleanup
1020 * stuff, while another is that the PGMRAMRANGE associated with
1021 * such a region may have to be removed from the ram range list.
1022 *
1023 * Overlapping with a RAM range has to be 100% or none at all. The
1024 * pages in the existing RAM range must not be ROM nor MMIO. A guru
1025 * meditation will be raised if a partial overlap or an overlap of
1026 * ROM pages is encountered. On an overlap we will free all the
1027 * existing RAM pages and put in the ram range pages instead.
1028 */
1029typedef struct PGMMMIO2RANGE
1030{
1031 /** The owner of the range. (a device) */
1032 PPDMDEVINSR3 pDevInsR3;
1033 /** Pointer to the ring-3 mapping of the allocation. */
1034 RTR3PTR pvR3;
1035 /** Pointer to the next range - R3. */
1036 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1037 /** Whether it's mapped or not. */
1038 bool fMapped;
1039 /** Whether it's overlapping or not. */
1040 bool fOverlapping;
1041 /** The PCI region number.
1042 * @remarks This ASSUMES that nobody will ever really need to have multiple
1043 * PCI devices with matching MMIO region numbers on a single device. */
1044 uint8_t iRegion;
1045 /**< Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1046 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 1 : 5];
1047 /** The associated RAM range. */
1048 PGMRAMRANGE RamRange;
1049} PGMMMIO2RANGE;
1050/** Pointer to a MMIO2 range. */
1051typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1052
1053
1054
1055
1056/** @todo r=bird: fix typename. */
1057/**
1058 * PGMPhysRead/Write cache entry
1059 */
1060typedef struct PGMPHYSCACHE_ENTRY
1061{
1062 /** HC pointer to physical page */
1063 R3PTRTYPE(uint8_t *) pbHC;
1064 /** GC Physical address for cache entry */
1065 RTGCPHYS GCPhys;
1066#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1067 RTGCPHYS u32Padding0; /**< alignment padding. */
1068#endif
1069} PGMPHYSCACHE_ENTRY;
1070
1071/**
1072 * PGMPhysRead/Write cache to reduce REM memory access overhead
1073 */
1074typedef struct PGMPHYSCACHE
1075{
1076 /** Bitmap of valid cache entries */
1077 uint64_t aEntries;
1078 /** Cache entries */
1079 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1080} PGMPHYSCACHE;
1081
1082
1083/** Pointer to an allocation chunk ring-3 mapping. */
1084typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1085/** Pointer to an allocation chunk ring-3 mapping pointer. */
1086typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1087
1088/**
1089 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1090 *
1091 * The primary tree (Core) uses the chunk id as key.
1092 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1093 */
1094typedef struct PGMCHUNKR3MAP
1095{
1096 /** The key is the chunk id. */
1097 AVLU32NODECORE Core;
1098 /** The key is the ageing sequence number. */
1099 AVLLU32NODECORE AgeCore;
1100 /** The current age thingy. */
1101 uint32_t iAge;
1102 /** The current reference count. */
1103 uint32_t volatile cRefs;
1104 /** The current permanent reference count. */
1105 uint32_t volatile cPermRefs;
1106 /** The mapping address. */
1107 void *pv;
1108} PGMCHUNKR3MAP;
1109
1110/**
1111 * Allocation chunk ring-3 mapping TLB entry.
1112 */
1113typedef struct PGMCHUNKR3MAPTLBE
1114{
1115 /** The chunk id. */
1116 uint32_t volatile idChunk;
1117#if HC_ARCH_BITS == 64
1118 uint32_t u32Padding; /**< alignment padding. */
1119#endif
1120 /** The chunk map. */
1121 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1122} PGMCHUNKR3MAPTLBE;
1123/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1124typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1125
1126/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1127 * @remark Must be a power of two value. */
1128#define PGM_CHUNKR3MAPTLB_ENTRIES 32
1129
1130/**
1131 * Allocation chunk ring-3 mapping TLB.
1132 *
1133 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1134 * At first glance this might look kinda odd since AVL trees are
1135 * supposed to give the most optimial lookup times of all trees
1136 * due to their balancing. However, take a tree with 1023 nodes
1137 * in it, that's 10 levels, meaning that most searches has to go
1138 * down 9 levels before they find what they want. This isn't fast
1139 * compared to a TLB hit. There is the factor of cache misses,
1140 * and of course the problem with trees and branch prediction.
1141 * This is why we use TLBs in front of most of the trees.
1142 *
1143 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1144 * difficult when we switch to inlined AVL trees (from kStuff).
1145 */
1146typedef struct PGMCHUNKR3MAPTLB
1147{
1148 /** The TLB entries. */
1149 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1150} PGMCHUNKR3MAPTLB;
1151
1152/**
1153 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1154 * @returns Chunk TLB index.
1155 * @param idChunk The Chunk ID.
1156 */
1157#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1158
1159
1160/**
1161 * Ring-3 guest page mapping TLB entry.
1162 * @remarks used in ring-0 as well at the moment.
1163 */
1164typedef struct PGMPAGER3MAPTLBE
1165{
1166 /** Address of the page. */
1167 RTGCPHYS volatile GCPhys;
1168 /** The guest page. */
1169 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1170 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1171 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1172 /** The address */
1173 R3R0PTRTYPE(void *) volatile pv;
1174#if HC_ARCH_BITS == 32
1175 uint32_t u32Padding; /**< alignment padding. */
1176#endif
1177} PGMPAGER3MAPTLBE;
1178/** Pointer to an entry in the HC physical TLB. */
1179typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1180
1181
1182/** The number of entries in the ring-3 guest page mapping TLB.
1183 * @remarks The value must be a power of two. */
1184#define PGM_PAGER3MAPTLB_ENTRIES 64
1185
1186/**
1187 * Ring-3 guest page mapping TLB.
1188 * @remarks used in ring-0 as well at the moment.
1189 */
1190typedef struct PGMPAGER3MAPTLB
1191{
1192 /** The TLB entries. */
1193 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1194} PGMPAGER3MAPTLB;
1195/** Pointer to the ring-3 guest page mapping TLB. */
1196typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1197
1198/**
1199 * Calculates the index of the TLB entry for the specified guest page.
1200 * @returns Physical TLB index.
1201 * @param GCPhys The guest physical address.
1202 */
1203#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1204
1205
1206/** @name Context neutrual page mapper TLB.
1207 *
1208 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1209 * code is writting in a kind of context neutrual way. Time will show whether
1210 * this actually makes sense or not...
1211 *
1212 * @{ */
1213/** @typedef PPGMPAGEMAPTLB
1214 * The page mapper TLB pointer type for the current context. */
1215/** @typedef PPGMPAGEMAPTLB
1216 * The page mapper TLB entry pointer type for the current context. */
1217/** @typedef PPGMPAGEMAPTLB
1218 * The page mapper TLB entry pointer pointer type for the current context. */
1219/** @def PGMPAGEMAPTLB_ENTRIES
1220 * The number of TLB entries in the page mapper TLB for the current context. */
1221/** @def PGM_PAGEMAPTLB_IDX
1222 * Calculate the TLB index for a guest physical address.
1223 * @returns The TLB index.
1224 * @param GCPhys The guest physical address. */
1225/** @typedef PPGMPAGEMAP
1226 * Pointer to a page mapper unit for current context. */
1227/** @typedef PPPGMPAGEMAP
1228 * Pointer to a page mapper unit pointer for current context. */
1229#ifdef IN_GC
1230// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1231// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1232// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1233# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1234# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1235 typedef void * PPGMPAGEMAP;
1236 typedef void ** PPPGMPAGEMAP;
1237//#elif IN_RING0
1238// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1239// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1240// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1241//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1242//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1243// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1244// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1245#else
1246 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1247 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1248 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1249# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1250# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1251 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1252 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1253#endif
1254/** @} */
1255
1256
1257/** @name PGM Pool Indexes.
1258 * Aka. the unique shadow page identifier.
1259 * @{ */
1260/** NIL page pool IDX. */
1261#define NIL_PGMPOOL_IDX 0
1262/** The first normal index. */
1263#define PGMPOOL_IDX_FIRST_SPECIAL 1
1264/** Page directory (32-bit root). */
1265#define PGMPOOL_IDX_PD 1
1266/** The extended PAE page directory (2048 entries, works as root currently). */
1267#define PGMPOOL_IDX_PAE_PD 2
1268 /** PAE Page Directory Table 0. */
1269#define PGMPOOL_IDX_PAE_PD_0 3
1270 /** PAE Page Directory Table 1. */
1271#define PGMPOOL_IDX_PAE_PD_1 4
1272 /** PAE Page Directory Table 2. */
1273#define PGMPOOL_IDX_PAE_PD_2 5
1274 /** PAE Page Directory Table 3. */
1275#define PGMPOOL_IDX_PAE_PD_3 6
1276/** Page Directory Pointer Table (PAE root, not currently used). */
1277#define PGMPOOL_IDX_PDPT 7
1278/** Page Map Level-4 (64-bit root). */
1279#define PGMPOOL_IDX_PML4 8
1280/** The first normal index. */
1281#define PGMPOOL_IDX_FIRST 9
1282/** The last valid index. (inclusive, 14 bits) */
1283#define PGMPOOL_IDX_LAST 0x3fff
1284/** @} */
1285
1286/** The NIL index for the parent chain. */
1287#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1288
1289/**
1290 * Node in the chain linking a shadowed page to it's parent (user).
1291 */
1292#pragma pack(1)
1293typedef struct PGMPOOLUSER
1294{
1295 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1296 uint16_t iNext;
1297 /** The user page index. */
1298 uint16_t iUser;
1299 /** Index into the user table. */
1300 uint16_t iUserTable;
1301} PGMPOOLUSER, *PPGMPOOLUSER;
1302typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1303#pragma pack()
1304
1305
1306/** The NIL index for the phys ext chain. */
1307#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1308
1309/**
1310 * Node in the chain of physical cross reference extents.
1311 */
1312#pragma pack(1)
1313typedef struct PGMPOOLPHYSEXT
1314{
1315 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1316 uint16_t iNext;
1317 /** The user page index. */
1318 uint16_t aidx[3];
1319} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1320typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1321#pragma pack()
1322
1323
1324/**
1325 * The kind of page that's being shadowed.
1326 */
1327typedef enum PGMPOOLKIND
1328{
1329 /** The virtual invalid 0 entry. */
1330 PGMPOOLKIND_INVALID = 0,
1331 /** The entry is free (=unused). */
1332 PGMPOOLKIND_FREE,
1333
1334 /** Shw: 32-bit page table; Gst: no paging */
1335 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1336 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1337 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1338 /** Shw: 32-bit page table; Gst: 4MB page. */
1339 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1340 /** Shw: PAE page table; Gst: no paging */
1341 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1342 /** Shw: PAE page table; Gst: 32-bit page table. */
1343 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1344 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1345 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1346 /** Shw: PAE page table; Gst: PAE page table. */
1347 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1348 /** Shw: PAE page table; Gst: 2MB page. */
1349 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1350
1351 /** Shw: PAE page directory; Gst: 32-bit page directory. */
1352 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD,
1353 /** Shw: PAE page directory; Gst: PAE page directory. */
1354 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1355
1356 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1357 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1358 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1359 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1360
1361 /** Shw: Root 32-bit page directory. */
1362 PGMPOOLKIND_ROOT_32BIT_PD,
1363 /** Shw: Root PAE page directory */
1364 PGMPOOLKIND_ROOT_PAE_PD,
1365 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
1366 PGMPOOLKIND_ROOT_PDPT,
1367 /** Shw: Root page map level-4 table. */
1368 PGMPOOLKIND_ROOT_PML4,
1369
1370 /** The last valid entry. */
1371 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_PML4
1372} PGMPOOLKIND;
1373
1374
1375/**
1376 * The tracking data for a page in the pool.
1377 */
1378typedef struct PGMPOOLPAGE
1379{
1380 /** AVL node code with the (HC) physical address of this page. */
1381 AVLOHCPHYSNODECORE Core;
1382 /** Pointer to the HC mapping of the page. */
1383 R3R0PTRTYPE(void *) pvPageHC;
1384 /** The guest physical address. */
1385 RTGCPHYS GCPhys;
1386 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1387 uint8_t enmKind;
1388 uint8_t bPadding;
1389 /** The index of this page. */
1390 uint16_t idx;
1391 /** The next entry in the list this page currently resides in.
1392 * It's either in the free list or in the GCPhys hash. */
1393 uint16_t iNext;
1394#ifdef PGMPOOL_WITH_USER_TRACKING
1395 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1396 uint16_t iUserHead;
1397 /** The number of present entries. */
1398 uint16_t cPresent;
1399 /** The first entry in the table which is present. */
1400 uint16_t iFirstPresent;
1401#endif
1402#ifdef PGMPOOL_WITH_MONITORING
1403 /** The number of modifications to the monitored page. */
1404 uint16_t cModifications;
1405 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1406 uint16_t iModifiedNext;
1407 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1408 uint16_t iModifiedPrev;
1409 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1410 uint16_t iMonitoredNext;
1411 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1412 uint16_t iMonitoredPrev;
1413#endif
1414#ifdef PGMPOOL_WITH_CACHE
1415 /** The next page in the age list. */
1416 uint16_t iAgeNext;
1417 /** The previous page in the age list. */
1418 uint16_t iAgePrev;
1419#endif /* PGMPOOL_WITH_CACHE */
1420 /** Used to indicate that the page is zeroed. */
1421 bool fZeroed;
1422 /** Used to indicate that a PT has non-global entries. */
1423 bool fSeenNonGlobal;
1424 /** Used to indicate that we're monitoring writes to the guest page. */
1425 bool fMonitored;
1426 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1427 * (All pages are in the age list.) */
1428 bool fCached;
1429 /** This is used by the R3 access handlers when invoked by an async thread.
1430 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1431 bool volatile fReusedFlushPending;
1432 /** Used to indicate that the guest is mapping the page is also used as a CR3.
1433 * In these cases the access handler acts differently and will check
1434 * for mapping conflicts like the normal CR3 handler.
1435 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
1436 * replaced by a list of pages which share access handler.
1437 */
1438 bool fCR3Mix;
1439} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1440
1441
1442#ifdef PGMPOOL_WITH_CACHE
1443/** The hash table size. */
1444# define PGMPOOL_HASH_SIZE 0x40
1445/** The hash function. */
1446# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1447#endif
1448
1449
1450/**
1451 * The shadow page pool instance data.
1452 *
1453 * It's all one big allocation made at init time, except for the
1454 * pages that is. The user nodes follows immediatly after the
1455 * page structures.
1456 */
1457typedef struct PGMPOOL
1458{
1459 /** The VM handle - HC Ptr. */
1460 R3R0PTRTYPE(PVM) pVMHC;
1461 /** The VM handle - GC Ptr. */
1462 GCPTRTYPE(PVM) pVMGC;
1463 /** The max pool size. This includes the special IDs. */
1464 uint16_t cMaxPages;
1465 /** The current pool size. */
1466 uint16_t cCurPages;
1467 /** The head of the free page list. */
1468 uint16_t iFreeHead;
1469 /* Padding. */
1470 uint16_t u16Padding;
1471#ifdef PGMPOOL_WITH_USER_TRACKING
1472 /** Head of the chain of free user nodes. */
1473 uint16_t iUserFreeHead;
1474 /** The number of user nodes we've allocated. */
1475 uint16_t cMaxUsers;
1476 /** The number of present page table entries in the entire pool. */
1477 uint32_t cPresent;
1478 /** Pointer to the array of user nodes - GC pointer. */
1479 GCPTRTYPE(PPGMPOOLUSER) paUsersGC;
1480 /** Pointer to the array of user nodes - HC pointer. */
1481 R3R0PTRTYPE(PPGMPOOLUSER) paUsersHC;
1482#endif /* PGMPOOL_WITH_USER_TRACKING */
1483#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1484 /** Head of the chain of free phys ext nodes. */
1485 uint16_t iPhysExtFreeHead;
1486 /** The number of user nodes we've allocated. */
1487 uint16_t cMaxPhysExts;
1488 /** Pointer to the array of physical xref extent - GC pointer. */
1489 GCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsGC;
1490 /** Pointer to the array of physical xref extent nodes - HC pointer. */
1491 R3R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsHC;
1492#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
1493#ifdef PGMPOOL_WITH_CACHE
1494 /** Hash table for GCPhys addresses. */
1495 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1496 /** The head of the age list. */
1497 uint16_t iAgeHead;
1498 /** The tail of the age list. */
1499 uint16_t iAgeTail;
1500 /** Set if the cache is enabled. */
1501 bool fCacheEnabled;
1502#endif /* PGMPOOL_WITH_CACHE */
1503#ifdef PGMPOOL_WITH_MONITORING
1504 /** Head of the list of modified pages. */
1505 uint16_t iModifiedHead;
1506 /** The current number of modified pages. */
1507 uint16_t cModifiedPages;
1508 /** Access handler, GC. */
1509 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnAccessHandlerGC;
1510 /** Access handler, R0. */
1511 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1512 /** Access handler, R3. */
1513 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1514 /** The access handler description (HC ptr). */
1515 R3PTRTYPE(const char *) pszAccessHandler;
1516#endif /* PGMPOOL_WITH_MONITORING */
1517 /** The number of pages currently in use. */
1518 uint16_t cUsedPages;
1519#ifdef VBOX_WITH_STATISTICS
1520 /** The high wather mark for cUsedPages. */
1521 uint16_t cUsedPagesHigh;
1522 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1523 /** Profiling pgmPoolAlloc(). */
1524 STAMPROFILEADV StatAlloc;
1525 /** Profiling pgmPoolClearAll(). */
1526 STAMPROFILE StatClearAll;
1527 /** Profiling pgmPoolFlushAllInt(). */
1528 STAMPROFILE StatFlushAllInt;
1529 /** Profiling pgmPoolFlushPage(). */
1530 STAMPROFILE StatFlushPage;
1531 /** Profiling pgmPoolFree(). */
1532 STAMPROFILE StatFree;
1533 /** Profiling time spent zeroing pages. */
1534 STAMPROFILE StatZeroPage;
1535# ifdef PGMPOOL_WITH_USER_TRACKING
1536 /** Profiling of pgmPoolTrackDeref. */
1537 STAMPROFILE StatTrackDeref;
1538 /** Profiling pgmTrackFlushGCPhysPT. */
1539 STAMPROFILE StatTrackFlushGCPhysPT;
1540 /** Profiling pgmTrackFlushGCPhysPTs. */
1541 STAMPROFILE StatTrackFlushGCPhysPTs;
1542 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1543 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1544 /** Number of times we've been out of user records. */
1545 STAMCOUNTER StatTrackFreeUpOneUser;
1546# endif
1547# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1548 /** Profiling deref activity related tracking GC physical pages. */
1549 STAMPROFILE StatTrackDerefGCPhys;
1550 /** Number of linear searches for a HCPhys in the ram ranges. */
1551 STAMCOUNTER StatTrackLinearRamSearches;
1552 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1553 STAMCOUNTER StamTrackPhysExtAllocFailures;
1554# endif
1555# ifdef PGMPOOL_WITH_MONITORING
1556 /** Profiling the GC PT access handler. */
1557 STAMPROFILE StatMonitorGC;
1558 /** Times we've failed interpreting the instruction. */
1559 STAMCOUNTER StatMonitorGCEmulateInstr;
1560 /** Profiling the pgmPoolFlushPage calls made from the GC PT access handler. */
1561 STAMPROFILE StatMonitorGCFlushPage;
1562 /** Times we've detected fork(). */
1563 STAMCOUNTER StatMonitorGCFork;
1564 /** Profiling the GC access we've handled (except REP STOSD). */
1565 STAMPROFILE StatMonitorGCHandled;
1566 /** Times we've failed interpreting a patch code instruction. */
1567 STAMCOUNTER StatMonitorGCIntrFailPatch1;
1568 /** Times we've failed interpreting a patch code instruction during flushing. */
1569 STAMCOUNTER StatMonitorGCIntrFailPatch2;
1570 /** The number of times we've seen rep prefixes we can't handle. */
1571 STAMCOUNTER StatMonitorGCRepPrefix;
1572 /** Profiling the REP STOSD cases we've handled. */
1573 STAMPROFILE StatMonitorGCRepStosd;
1574
1575 /** Profiling the HC PT access handler. */
1576 STAMPROFILE StatMonitorHC;
1577 /** Times we've failed interpreting the instruction. */
1578 STAMCOUNTER StatMonitorHCEmulateInstr;
1579 /** Profiling the pgmPoolFlushPage calls made from the HC PT access handler. */
1580 STAMPROFILE StatMonitorHCFlushPage;
1581 /** Times we've detected fork(). */
1582 STAMCOUNTER StatMonitorHCFork;
1583 /** Profiling the HC access we've handled (except REP STOSD). */
1584 STAMPROFILE StatMonitorHCHandled;
1585 /** The number of times we've seen rep prefixes we can't handle. */
1586 STAMCOUNTER StatMonitorHCRepPrefix;
1587 /** Profiling the REP STOSD cases we've handled. */
1588 STAMPROFILE StatMonitorHCRepStosd;
1589 /** The number of times we're called in an async thread an need to flush. */
1590 STAMCOUNTER StatMonitorHCAsync;
1591 /** The high wather mark for cModifiedPages. */
1592 uint16_t cModifiedPagesHigh;
1593 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
1594# endif
1595# ifdef PGMPOOL_WITH_CACHE
1596 /** The number of cache hits. */
1597 STAMCOUNTER StatCacheHits;
1598 /** The number of cache misses. */
1599 STAMCOUNTER StatCacheMisses;
1600 /** The number of times we've got a conflict of 'kind' in the cache. */
1601 STAMCOUNTER StatCacheKindMismatches;
1602 /** Number of times we've been out of pages. */
1603 STAMCOUNTER StatCacheFreeUpOne;
1604 /** The number of cacheable allocations. */
1605 STAMCOUNTER StatCacheCacheable;
1606 /** The number of uncacheable allocations. */
1607 STAMCOUNTER StatCacheUncacheable;
1608# endif
1609#elif HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1610 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1611#endif
1612 /** The AVL tree for looking up a page by its HC physical address. */
1613 AVLOHCPHYSTREE HCPhysTree;
1614 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
1615 /** Array of pages. (cMaxPages in length)
1616 * The Id is the index into thist array.
1617 */
1618 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
1619} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
1620
1621
1622/** @def PGMPOOL_PAGE_2_PTR
1623 * Maps a pool page pool into the current context.
1624 *
1625 * @returns VBox status code.
1626 * @param pVM The VM handle.
1627 * @param pPage The pool page.
1628 *
1629 * @remark In HC this uses PGMGCDynMapHCPage(), so it will consume of the
1630 * small page window employeed by that function. Be careful.
1631 * @remark There is no need to assert on the result.
1632 */
1633#ifdef IN_GC
1634# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmGCPoolMapPage((pVM), (pPage))
1635#else
1636# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC)
1637#endif
1638
1639
1640/**
1641 * Trees are using self relative offsets as pointers.
1642 * So, all its data, including the root pointer, must be in the heap for HC and GC
1643 * to have the same layout.
1644 */
1645typedef struct PGMTREES
1646{
1647 /** Physical access handlers (AVL range+offsetptr tree). */
1648 AVLROGCPHYSTREE PhysHandlers;
1649 /** Virtual access handlers (AVL range + GC ptr tree). */
1650 AVLROGCPTRTREE VirtHandlers;
1651 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
1652 AVLROGCPHYSTREE PhysToVirtHandlers;
1653 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
1654 AVLROGCPTRTREE HyperVirtHandlers;
1655} PGMTREES;
1656/** Pointer to PGM trees. */
1657typedef PGMTREES *PPGMTREES;
1658
1659
1660/** @name Paging mode macros
1661 * @{ */
1662#ifdef IN_GC
1663# define PGM_CTX(a,b) a##GC##b
1664# define PGM_CTX_STR(a,b) a "GC" b
1665# define PGM_CTX_DECL(type) PGMGCDECL(type)
1666#else
1667# ifdef IN_RING3
1668# define PGM_CTX(a,b) a##R3##b
1669# define PGM_CTX_STR(a,b) a "R3" b
1670# define PGM_CTX_DECL(type) DECLCALLBACK(type)
1671# else
1672# define PGM_CTX(a,b) a##R0##b
1673# define PGM_CTX_STR(a,b) a "R0" b
1674# define PGM_CTX_DECL(type) PGMDECL(type)
1675# endif
1676#endif
1677
1678#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
1679#define PGM_GST_NAME_GC_REAL_STR(name) "pgmGCGstReal" #name
1680#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
1681#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
1682#define PGM_GST_NAME_GC_PROT_STR(name) "pgmGCGstProt" #name
1683#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
1684#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
1685#define PGM_GST_NAME_GC_32BIT_STR(name) "pgmGCGst32Bit" #name
1686#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
1687#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
1688#define PGM_GST_NAME_GC_PAE_STR(name) "pgmGCGstPAE" #name
1689#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
1690#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
1691#define PGM_GST_NAME_GC_AMD64_STR(name) "pgmGCGstAMD64" #name
1692#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
1693#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
1694#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
1695
1696#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
1697#define PGM_SHW_NAME_GC_32BIT_STR(name) "pgmGCShw32Bit" #name
1698#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
1699#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
1700#define PGM_SHW_NAME_GC_PAE_STR(name) "pgmGCShwPAE" #name
1701#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
1702#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
1703#define PGM_SHW_NAME_GC_AMD64_STR(name) "pgmGCShwAMD64" #name
1704#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
1705#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
1706#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
1707
1708/* Shw_Gst */
1709#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
1710#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
1711#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
1712#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
1713#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
1714#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
1715#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
1716#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
1717#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name
1718#define PGM_BTH_NAME_GC_32BIT_PROT_STR(name) "pgmGCBth32BitProt" #name
1719#define PGM_BTH_NAME_GC_32BIT_32BIT_STR(name) "pgmGCBth32Bit32Bit" #name
1720#define PGM_BTH_NAME_GC_PAE_REAL_STR(name) "pgmGCBthPAEReal" #name
1721#define PGM_BTH_NAME_GC_PAE_PROT_STR(name) "pgmGCBthPAEProt" #name
1722#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name
1723#define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name
1724#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name
1725#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
1726#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
1727#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
1728#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
1729#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
1730#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
1731#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
1732#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
1733#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
1734#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
1735/** @} */
1736
1737/**
1738 * Data for each paging mode.
1739 */
1740typedef struct PGMMODEDATA
1741{
1742 /** The guest mode type. */
1743 uint32_t uGstType;
1744 /** The shadow mode type. */
1745 uint32_t uShwType;
1746
1747 /** @name Function pointers for Shadow paging.
1748 * @{
1749 */
1750 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1751 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1752 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1753 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1754
1755 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1756 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1757
1758 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1759 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1760 /** @} */
1761
1762 /** @name Function pointers for Guest paging.
1763 * @{
1764 */
1765 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1766 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1767 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1768 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1769 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1770 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1771 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1772 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1773 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1774 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
1775 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
1776 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
1777 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
1778
1779 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1780 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1781 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1782 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1783 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1784 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1785 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1786 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1787 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstPAEWriteHandlerCR3;
1788
1789 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1790 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1791 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1792 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1793 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1794 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1795 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1796 R0PTRTYPE(PFNPGMGCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
1797 R0PTRTYPE(PFNPGMGCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
1798 /** @} */
1799
1800 /** @name Function pointers for Both Shadow and Guest paging.
1801 * @{
1802 */
1803 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1804 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1805 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1806 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1807 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1808 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1809 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1810#ifdef VBOX_STRICT
1811 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1812#endif
1813
1814 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1815 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1816 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1817 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1818 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1819 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1820#ifdef VBOX_STRICT
1821 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1822#endif
1823
1824 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1825 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1826 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
1827 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1828 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1829 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1830#ifdef VBOX_STRICT
1831 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1832#endif
1833 /** @} */
1834} PGMMODEDATA, *PPGMMODEDATA;
1835
1836
1837
1838/**
1839 * Converts a PGM pointer into a VM pointer.
1840 * @returns Pointer to the VM structure the PGM is part of.
1841 * @param pPGM Pointer to PGM instance data.
1842 */
1843#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
1844
1845/**
1846 * PGM Data (part of VM)
1847 */
1848typedef struct PGM
1849{
1850 /** Offset to the VM structure. */
1851 RTINT offVM;
1852
1853 /*
1854 * This will be redefined at least two more times before we're done, I'm sure.
1855 * The current code is only to get on with the coding.
1856 * - 2004-06-10: initial version, bird.
1857 * - 2004-07-02: 1st time, bird.
1858 * - 2004-10-18: 2nd time, bird.
1859 * - 2005-07-xx: 3rd time, bird.
1860 */
1861
1862 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1863 GCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
1864 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1865 GCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
1866
1867 /** The host paging mode. (This is what SUPLib reports.) */
1868 SUPPAGINGMODE enmHostMode;
1869 /** The shadow paging mode. */
1870 PGMMODE enmShadowMode;
1871 /** The guest paging mode. */
1872 PGMMODE enmGuestMode;
1873
1874 /** The current physical address representing in the guest CR3 register. */
1875 RTGCPHYS GCPhysCR3;
1876 /** Pointer to the 5 page CR3 content mapping.
1877 * The first page is always the CR3 (in some form) while the 4 other pages
1878 * are used of the PDs in PAE mode. */
1879 RTGCPTR GCPtrCR3Mapping;
1880#if HC_ARCH_BITS == 64
1881 uint32_t u32Alignment;
1882#endif
1883 /** The physical address of the currently monitored guest CR3 page.
1884 * When this value is NIL_RTGCPHYS no page is being monitored. */
1885 RTGCPHYS GCPhysGstCR3Monitored;
1886
1887 /** @name 32-bit Guest Paging.
1888 * @{ */
1889 /** The guest's page directory, HC pointer. */
1890 R3R0PTRTYPE(PX86PD) pGuestPDHC;
1891 /** The guest's page directory, static GC mapping. */
1892 GCPTRTYPE(PX86PD) pGuestPDGC;
1893 /** @} */
1894
1895 /** @name PAE Guest Paging.
1896 * @{ */
1897 /** The guest's page directory pointer table, static GC mapping. */
1898 GCPTRTYPE(PX86PDPT) pGstPaePDPTGC;
1899 /** The guest's page directory pointer table, HC pointer. */
1900 R3R0PTRTYPE(PX86PDPT) pGstPaePDPTHC;
1901 /** The guest's page directories, HC pointers.
1902 * These are individual pointers and don't have to be adjecent.
1903 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
1904 R3R0PTRTYPE(PX86PDPAE) apGstPaePDsHC[4];
1905 /** The guest's page directories, static GC mapping.
1906 * Unlike the HC array the first entry can be accessed as a 2048 entry PD.
1907 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
1908 GCPTRTYPE(PX86PDPAE) apGstPaePDsGC[4];
1909 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
1910 RTGCPHYS aGCPhysGstPaePDs[4];
1911 /** The physical addresses of the monitored guest page directories (PAE). */
1912 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
1913 /** @} */
1914
1915 /** @name AMD64 Guest Paging.
1916 * @{ */
1917 /** The guest's page directory pointer table, HC pointer. */
1918 R3R0PTRTYPE(PX86PML4) pGstPaePML4HC;
1919 /** @} */
1920
1921 /** @name 32-bit Shadow Paging
1922 * @{ */
1923 /** The 32-Bit PD - HC Ptr. */
1924 R3R0PTRTYPE(PX86PD) pHC32BitPD;
1925 /** The 32-Bit PD - GC Ptr. */
1926 GCPTRTYPE(PX86PD) pGC32BitPD;
1927#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1928 uint32_t u32Padding1; /**< alignment padding. */
1929#endif
1930 /** The Physical Address (HC) of the 32-Bit PD. */
1931 RTHCPHYS HCPhys32BitPD;
1932 /** @} */
1933
1934 /** @name PAE Shadow Paging
1935 * @{ */
1936 /** The four PDs for the low 4GB - HC Ptr.
1937 * Even though these are 4 pointers, what they point at is a single table.
1938 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
1939 R3R0PTRTYPE(PX86PDPAE) apHCPaePDs[4];
1940 /** The four PDs for the low 4GB - GC Ptr.
1941 * Same kind of mapping as apHCPaePDs. */
1942 GCPTRTYPE(PX86PDPAE) apGCPaePDs[4];
1943 /** The Physical Address (HC) of the four PDs for the low 4GB.
1944 * These are *NOT* 4 contiguous pages. */
1945 RTHCPHYS aHCPhysPaePDs[4];
1946 /** The PAE PDP - HC Ptr. */
1947 R3R0PTRTYPE(PX86PDPT) pHCPaePDPT;
1948 /** The Physical Address (HC) of the PAE PDPT. */
1949 RTHCPHYS HCPhysPaePDPT;
1950 /** The PAE PDPT - GC Ptr. */
1951 GCPTRTYPE(PX86PDPT) pGCPaePDPT;
1952 /** @} */
1953
1954 /** @name AMD64 Shadow Paging
1955 * Extends PAE Paging.
1956 * @{ */
1957#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
1958 RTGCPTR alignment5; /**< structure size alignment. */
1959#endif
1960 /** The Page Map Level 4 table - HC Ptr. */
1961 R3R0PTRTYPE(PX86PML4) pHCPaePML4;
1962 /** The Physical Address (HC) of the Page Map Level 4 table. */
1963 RTHCPHYS HCPhysPaePML4;
1964 /** @}*/
1965
1966 /** @name Function pointers for Shadow paging.
1967 * @{
1968 */
1969 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1970 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1971 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1972 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1973
1974 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1975 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1976
1977 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1978 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1979
1980 /** @} */
1981
1982 /** @name Function pointers for Guest paging.
1983 * @{
1984 */
1985 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1986 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1987 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1988 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1989 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1990 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1991 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1992 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1993 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1994 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;
1995 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;
1996 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;
1997 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;
1998
1999 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2000 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2001 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
2002 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2003 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
2004 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2005 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
2006 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
2007 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstPAEWriteHandlerCR3;
2008#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
2009 RTGCPTR alignment3; /**< structure size alignment. */
2010#endif
2011
2012 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2013 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2014 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
2015 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2016 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
2017 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
2018 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
2019 R0PTRTYPE(PFNPGMGCPHYSHANDLER) pfnR0GstWriteHandlerCR3;
2020 R0PTRTYPE(PFNPGMGCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;
2021 /** @} */
2022
2023 /** @name Function pointers for Both Shadow and Guest paging.
2024 * @{
2025 */
2026 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
2027 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2028 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2029 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2030 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2031 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2032 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2033 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2034
2035 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2036 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2037 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2038 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2039 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2040 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2041 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2042
2043 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
2044 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
2045 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2046 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, X86PDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
2047 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
2048 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
2049 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint64_t cr3, uint64_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
2050#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
2051 RTGCPTR alignment2; /**< structure size alignment. */
2052#endif
2053 /** @} */
2054
2055 /** Pointer to SHW+GST mode data (function pointers).
2056 * The index into this table is made up from */
2057 R3PTRTYPE(PPGMMODEDATA) paModeData;
2058
2059 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2060 * This is sorted by physical address and contains no overlapping ranges. */
2061 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2062 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2063 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2064 /** GC pointer corresponding to PGM::pRamRangesR3. */
2065 GCPTRTYPE(PPGMRAMRANGE) pRamRangesGC;
2066 /** The configured RAM size. */
2067 RTUINT cbRamSize;
2068
2069 /** Pointer to the list of ROM ranges - for R3.
2070 * This is sorted by physical address and contains no overlapping ranges. */
2071 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2072 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2073 R0PTRTYPE(PPGMRAMRANGE) pRomRangesR0;
2074 /** GC pointer corresponding to PGM::pRomRangesR3. */
2075 GCPTRTYPE(PPGMRAMRANGE) pRomRangesGC;
2076 /** Alignment padding. */
2077 RTGCPTR GCPtrPadding2;
2078
2079 /** Pointer to the list of MMIO2 ranges - for R3.
2080 * Registration order. */
2081 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2082
2083 /** PGM offset based trees - HC Ptr. */
2084 R3R0PTRTYPE(PPGMTREES) pTreesHC;
2085 /** PGM offset based trees - GC Ptr. */
2086 GCPTRTYPE(PPGMTREES) pTreesGC;
2087
2088 /** Linked list of GC mappings - for GC.
2089 * The list is sorted ascending on address.
2090 */
2091 GCPTRTYPE(PPGMMAPPING) pMappingsGC;
2092 /** Linked list of GC mappings - for HC.
2093 * The list is sorted ascending on address.
2094 */
2095 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2096 /** Linked list of GC mappings - for R0.
2097 * The list is sorted ascending on address.
2098 */
2099 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2100
2101 /** If set no conflict checks are required. (boolean) */
2102 bool fMappingsFixed;
2103 /** If set, then no mappings are put into the shadow page table. (boolean) */
2104 bool fDisableMappings;
2105 /** Size of fixed mapping */
2106 uint32_t cbMappingFixed;
2107 /** Base address (GC) of fixed mapping */
2108 RTGCPTR GCPtrMappingFixed;
2109#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2110 uint32_t u32Padding0; /**< alignment padding. */
2111#endif
2112
2113
2114 /** @name Intermediate Context
2115 * @{ */
2116 /** Pointer to the intermediate page directory - Normal. */
2117 R3PTRTYPE(PX86PD) pInterPD;
2118 /** Pointer to the intermedate page tables - Normal.
2119 * There are two page tables, one for the identity mapping and one for
2120 * the host context mapping (of the core code). */
2121 R3PTRTYPE(PX86PT) apInterPTs[2];
2122 /** Pointer to the intermedate page tables - PAE. */
2123 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2124 /** Pointer to the intermedate page directory - PAE. */
2125 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2126 /** Pointer to the intermedate page directory - PAE. */
2127 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2128 /** Pointer to the intermedate page-map level 4 - AMD64. */
2129 R3PTRTYPE(PX86PML4) pInterPaePML4;
2130 /** Pointer to the intermedate page directory - AMD64. */
2131 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2132 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2133 RTHCPHYS HCPhysInterPD;
2134 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2135 RTHCPHYS HCPhysInterPaePDPT;
2136 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2137 RTHCPHYS HCPhysInterPaePML4;
2138 /** @} */
2139
2140 /** Base address of the dynamic page mapping area.
2141 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2142 */
2143 GCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2144 /** The index of the last entry used in the dynamic page mapping area. */
2145 RTUINT iDynPageMapLast;
2146 /** Cache containing the last entries in the dynamic page mapping area.
2147 * The cache size is covering half of the mapping area. */
2148 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2149
2150 /** A20 gate mask.
2151 * Our current approach to A20 emulation is to let REM do it and don't bother
2152 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2153 * But whould need arrise, we'll subject physical addresses to this mask. */
2154 RTGCPHYS GCPhysA20Mask;
2155 /** A20 gate state - boolean! */
2156 RTUINT fA20Enabled;
2157
2158 /** What needs syncing (PGM_SYNC_*).
2159 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2160 * PGMFlushTLB, and PGMR3Load. */
2161 RTUINT fSyncFlags;
2162
2163 /** PGM critical section.
2164 * This protects the physical & virtual access handlers, ram ranges,
2165 * and the page flag updating (some of it anyway).
2166 */
2167 PDMCRITSECT CritSect;
2168
2169 /** Shadow Page Pool - HC Ptr. */
2170 R3R0PTRTYPE(PPGMPOOL) pPoolHC;
2171 /** Shadow Page Pool - GC Ptr. */
2172 GCPTRTYPE(PPGMPOOL) pPoolGC;
2173
2174 /** We're not in a state which permits writes to guest memory.
2175 * (Only used in strict builds.) */
2176 bool fNoMorePhysWrites;
2177
2178 /** Flush the cache on the next access. */
2179 bool fPhysCacheFlushPending;
2180/** @todo r=bird: Fix member names!*/
2181 /** PGMPhysRead cache */
2182 PGMPHYSCACHE pgmphysreadcache;
2183 /** PGMPhysWrite cache */
2184 PGMPHYSCACHE pgmphyswritecache;
2185
2186 /**
2187 * Data associated with managing the ring-3 mappings of the allocation chunks.
2188 */
2189 struct
2190 {
2191 /** The chunk tree, ordered by chunk id. */
2192 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2193 /** The chunk mapping TLB. */
2194 PGMCHUNKR3MAPTLB Tlb;
2195 /** The number of mapped chunks. */
2196 uint32_t c;
2197 /** The maximum number of mapped chunks.
2198 * @cfgm PGM/MaxRing3Chunks */
2199 uint32_t cMax;
2200 /** The chunk age tree, ordered by ageing sequence number. */
2201 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2202 /** The current time. */
2203 uint32_t iNow;
2204 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2205 uint32_t AgeingCountdown;
2206 } ChunkR3Map;
2207
2208 /**
2209 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2210 */
2211 PGMPAGER3MAPTLB PhysTlbHC;
2212
2213 /** @name The zero page.
2214 * @{ */
2215 /** The host physical address of the zero page. */
2216 RTHCPHYS HCPhysZeroPg;
2217 /** The ring-3 mapping of the zero page. */
2218 RTR3PTR pvZeroPgR3;
2219 /** The ring-0 mapping of the zero page. */
2220 RTR0PTR pvZeroPgR0;
2221 /** The GC mapping of the zero page. */
2222 RTGCPTR pvZeroPgGC;
2223#if GC_ARCH_BITS != 32
2224 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2225#endif
2226 /** @}*/
2227
2228 /** The number of handy pages. */
2229 uint32_t cHandyPages;
2230 /**
2231 * Array of handy pages.
2232 *
2233 * This array is used in a two way communication between pgmPhysAllocPage
2234 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2235 * an intermediary.
2236 *
2237 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2238 * (The current size of 32 pages, means 128 KB of handy memory.)
2239 */
2240 GMMPAGEDESC aHandyPages[32];
2241
2242 /** @name Release Statistics
2243 * @{ */
2244 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero.) */
2245 uint32_t cPrivatePages; /**< The number of private pages. */
2246 uint32_t cSharedPages; /**< The number of shared pages. */
2247 uint32_t cZeroPages; /**< The number of zero backed pages. */
2248 /** The number of times the guest has switched mode since last reset or statistics reset. */
2249 STAMCOUNTER cGuestModeChanges;
2250 /** @} */
2251
2252#ifdef VBOX_WITH_STATISTICS
2253 /** GC: Which statistic this \#PF should be attributed to. */
2254 GCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionGC;
2255 RTGCPTR padding0;
2256 /** HC: Which statistic this \#PF should be attributed to. */
2257 R3R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionHC;
2258 RTHCPTR padding1;
2259 STAMPROFILE StatGCTrap0e; /**< GC: PGMGCTrap0eHandler() profiling. */
2260 STAMPROFILE StatTrap0eCSAM; /**< Profiling of the Trap0eHandler body when the cause is CSAM. */
2261 STAMPROFILE StatTrap0eDirtyAndAccessedBits; /**< Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
2262 STAMPROFILE StatTrap0eGuestTrap; /**< Profiling of the Trap0eHandler body when the cause is a guest trap. */
2263 STAMPROFILE StatTrap0eHndPhys; /**< Profiling of the Trap0eHandler body when the cause is a physical handler. */
2264 STAMPROFILE StatTrap0eHndVirt; /**< Profiling of the Trap0eHandler body when the cause is a virtual handler. */
2265 STAMPROFILE StatTrap0eHndUnhandled; /**< Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
2266 STAMPROFILE StatTrap0eMisc; /**< Profiling of the Trap0eHandler body when the cause is not known. */
2267 STAMPROFILE StatTrap0eOutOfSync; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
2268 STAMPROFILE StatTrap0eOutOfSyncHndPhys; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
2269 STAMPROFILE StatTrap0eOutOfSyncHndVirt; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
2270 STAMPROFILE StatTrap0eOutOfSyncObsHnd; /**< Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
2271 STAMPROFILE StatTrap0eSyncPT; /**< Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
2272
2273 STAMCOUNTER StatTrap0eMapHandler; /**< Number of traps due to access handlers in mappings. */
2274 STAMCOUNTER StatGCTrap0eConflicts; /**< GC: The number of times \#PF was caused by an undetected conflict. */
2275
2276 STAMCOUNTER StatGCTrap0eUSNotPresentRead;
2277 STAMCOUNTER StatGCTrap0eUSNotPresentWrite;
2278 STAMCOUNTER StatGCTrap0eUSWrite;
2279 STAMCOUNTER StatGCTrap0eUSReserved;
2280 STAMCOUNTER StatGCTrap0eUSNXE;
2281 STAMCOUNTER StatGCTrap0eUSRead;
2282
2283 STAMCOUNTER StatGCTrap0eSVNotPresentRead;
2284 STAMCOUNTER StatGCTrap0eSVNotPresentWrite;
2285 STAMCOUNTER StatGCTrap0eSVWrite;
2286 STAMCOUNTER StatGCTrap0eSVReserved;
2287 STAMCOUNTER StatGCTrap0eSNXE;
2288
2289 STAMCOUNTER StatTrap0eWPEmulGC;
2290 STAMCOUNTER StatTrap0eWPEmulR3;
2291
2292 STAMCOUNTER StatGCTrap0eUnhandled;
2293 STAMCOUNTER StatGCTrap0eMap;
2294
2295 /** GC: PGMSyncPT() profiling. */
2296 STAMPROFILE StatGCSyncPT;
2297 /** GC: The number of times PGMSyncPT() needed to allocate page tables. */
2298 STAMCOUNTER StatGCSyncPTAlloc;
2299 /** GC: The number of times PGMSyncPT() detected conflicts. */
2300 STAMCOUNTER StatGCSyncPTConflict;
2301 /** GC: The number of times PGMSyncPT() failed. */
2302 STAMCOUNTER StatGCSyncPTFailed;
2303 /** GC: PGMGCInvalidatePage() profiling. */
2304 STAMPROFILE StatGCInvalidatePage;
2305 /** GC: The number of times PGMGCInvalidatePage() was called for a 4KB page. */
2306 STAMCOUNTER StatGCInvalidatePage4KBPages;
2307 /** GC: The number of times PGMGCInvalidatePage() was called for a 4MB page. */
2308 STAMCOUNTER StatGCInvalidatePage4MBPages;
2309 /** GC: The number of times PGMGCInvalidatePage() skipped a 4MB page. */
2310 STAMCOUNTER StatGCInvalidatePage4MBPagesSkip;
2311 /** GC: The number of times PGMGCInvalidatePage() was called for a not accessed page directory. */
2312 STAMCOUNTER StatGCInvalidatePagePDNAs;
2313 /** GC: The number of times PGMGCInvalidatePage() was called for a not present page directory. */
2314 STAMCOUNTER StatGCInvalidatePagePDNPs;
2315 /** GC: The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict). */
2316 STAMCOUNTER StatGCInvalidatePagePDMappings;
2317 /** GC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
2318 STAMCOUNTER StatGCInvalidatePagePDOutOfSync;
2319 /** HC: The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2320 STAMCOUNTER StatGCInvalidatePageSkipped;
2321 /** GC: The number of times user page is out of sync was detected in GC. */
2322 STAMCOUNTER StatGCPageOutOfSyncUser;
2323 /** GC: The number of times supervisor page is out of sync was detected in GC. */
2324 STAMCOUNTER StatGCPageOutOfSyncSupervisor;
2325 /** GC: The number of dynamic page mapping cache hits */
2326 STAMCOUNTER StatDynMapCacheMisses;
2327 /** GC: The number of dynamic page mapping cache misses */
2328 STAMCOUNTER StatDynMapCacheHits;
2329 /** GC: The number of times pgmGCGuestPDWriteHandler() was successfully called. */
2330 STAMCOUNTER StatGCGuestCR3WriteHandled;
2331 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and we had to fall back to the recompiler. */
2332 STAMCOUNTER StatGCGuestCR3WriteUnhandled;
2333 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and a conflict was detected. */
2334 STAMCOUNTER StatGCGuestCR3WriteConflict;
2335 /** GC: Number of out-of-sync handled pages. */
2336 STAMCOUNTER StatHandlersOutOfSync;
2337 /** GC: Number of traps due to physical access handlers. */
2338 STAMCOUNTER StatHandlersPhysical;
2339 /** GC: Number of traps due to virtual access handlers. */
2340 STAMCOUNTER StatHandlersVirtual;
2341 /** GC: Number of traps due to virtual access handlers found by physical address. */
2342 STAMCOUNTER StatHandlersVirtualByPhys;
2343 /** GC: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
2344 STAMCOUNTER StatHandlersVirtualUnmarked;
2345 /** GC: Number of traps due to access outside range of monitored page(s). */
2346 STAMCOUNTER StatHandlersUnhandled;
2347 /** GC: Number of traps due to access to invalid physical memory. */
2348 STAMCOUNTER StatHandlersInvalid;
2349
2350 /** GC: The number of times pgmGCGuestROMWriteHandler() was successfully called. */
2351 STAMCOUNTER StatGCGuestROMWriteHandled;
2352 /** GC: The number of times pgmGCGuestROMWriteHandler() was called and we had to fall back to the recompiler */
2353 STAMCOUNTER StatGCGuestROMWriteUnhandled;
2354
2355 /** HC: PGMR3InvalidatePage() profiling. */
2356 STAMPROFILE StatHCInvalidatePage;
2357 /** HC: The number of times PGMR3InvalidatePage() was called for a 4KB page. */
2358 STAMCOUNTER StatHCInvalidatePage4KBPages;
2359 /** HC: The number of times PGMR3InvalidatePage() was called for a 4MB page. */
2360 STAMCOUNTER StatHCInvalidatePage4MBPages;
2361 /** HC: The number of times PGMR3InvalidatePage() skipped a 4MB page. */
2362 STAMCOUNTER StatHCInvalidatePage4MBPagesSkip;
2363 /** HC: The number of times PGMR3InvalidatePage() was called for a not accessed page directory. */
2364 STAMCOUNTER StatHCInvalidatePagePDNAs;
2365 /** HC: The number of times PGMR3InvalidatePage() was called for a not present page directory. */
2366 STAMCOUNTER StatHCInvalidatePagePDNPs;
2367 /** HC: The number of times PGMR3InvalidatePage() was called for a page directory containing mappings (no conflict). */
2368 STAMCOUNTER StatHCInvalidatePagePDMappings;
2369 /** HC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
2370 STAMCOUNTER StatHCInvalidatePagePDOutOfSync;
2371 /** HC: The number of times PGMR3InvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
2372 STAMCOUNTER StatHCInvalidatePageSkipped;
2373 /** HC: PGMR3SyncPT() profiling. */
2374 STAMPROFILE StatHCSyncPT;
2375 /** HC: pgmr3SyncPTResolveConflict() profiling (includes the entire relocation). */
2376 STAMPROFILE StatHCResolveConflict;
2377 /** HC: Number of times PGMR3CheckMappingConflicts() detected a conflict. */
2378 STAMCOUNTER StatHCDetectedConflicts;
2379 /** HC: The total number of times pgmHCGuestPDWriteHandler() was called. */
2380 STAMCOUNTER StatHCGuestPDWrite;
2381 /** HC: The number of times pgmHCGuestPDWriteHandler() detected a conflict */
2382 STAMCOUNTER StatHCGuestPDWriteConflict;
2383
2384 /** HC: The number of pages marked not present for accessed bit emulation. */
2385 STAMCOUNTER StatHCAccessedPage;
2386 /** HC: The number of pages marked read-only for dirty bit tracking. */
2387 STAMCOUNTER StatHCDirtyPage;
2388 /** HC: The number of pages marked read-only for dirty bit tracking. */
2389 STAMCOUNTER StatHCDirtyPageBig;
2390 /** HC: The number of traps generated for dirty bit tracking. */
2391 STAMCOUNTER StatHCDirtyPageTrap;
2392 /** HC: The number of pages already dirty or readonly. */
2393 STAMCOUNTER StatHCDirtyPageSkipped;
2394
2395 /** GC: The number of pages marked not present for accessed bit emulation. */
2396 STAMCOUNTER StatGCAccessedPage;
2397 /** GC: The number of pages marked read-only for dirty bit tracking. */
2398 STAMCOUNTER StatGCDirtyPage;
2399 /** GC: The number of pages marked read-only for dirty bit tracking. */
2400 STAMCOUNTER StatGCDirtyPageBig;
2401 /** GC: The number of traps generated for dirty bit tracking. */
2402 STAMCOUNTER StatGCDirtyPageTrap;
2403 /** GC: The number of pages already dirty or readonly. */
2404 STAMCOUNTER StatGCDirtyPageSkipped;
2405 /** GC: The number of pages marked dirty because of write accesses. */
2406 STAMCOUNTER StatGCDirtiedPage;
2407 /** GC: The number of pages already marked dirty because of write accesses. */
2408 STAMCOUNTER StatGCPageAlreadyDirty;
2409 /** GC: The number of real pages faults during dirty bit tracking. */
2410 STAMCOUNTER StatGCDirtyTrackRealPF;
2411
2412 /** GC: Profiling of the PGMTrackDirtyBit() body */
2413 STAMPROFILE StatGCDirtyBitTracking;
2414 /** HC: Profiling of the PGMTrackDirtyBit() body */
2415 STAMPROFILE StatHCDirtyBitTracking;
2416
2417 /** GC: Profiling of the PGMGstModifyPage() body */
2418 STAMPROFILE StatGCGstModifyPage;
2419 /** HC: Profiling of the PGMGstModifyPage() body */
2420 STAMPROFILE StatHCGstModifyPage;
2421
2422 /** GC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2423 STAMCOUNTER StatGCSyncPagePDNAs;
2424 /** GC: The number of time we've encountered an out-of-sync PD in SyncPage. */
2425 STAMCOUNTER StatGCSyncPagePDOutOfSync;
2426 /** HC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
2427 STAMCOUNTER StatHCSyncPagePDNAs;
2428 /** HC: The number of time we've encountered an out-of-sync PD in SyncPage. */
2429 STAMCOUNTER StatHCSyncPagePDOutOfSync;
2430
2431 STAMCOUNTER StatSynPT4kGC;
2432 STAMCOUNTER StatSynPT4kHC;
2433 STAMCOUNTER StatSynPT4MGC;
2434 STAMCOUNTER StatSynPT4MHC;
2435
2436 /** Profiling of the PGMFlushTLB() body. */
2437 STAMPROFILE StatFlushTLB;
2438 /** The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
2439 STAMCOUNTER StatFlushTLBNewCR3;
2440 /** The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
2441 STAMCOUNTER StatFlushTLBNewCR3Global;
2442 /** The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
2443 STAMCOUNTER StatFlushTLBSameCR3;
2444 /** The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
2445 STAMCOUNTER StatFlushTLBSameCR3Global;
2446
2447 STAMPROFILE StatGCSyncCR3; /**< GC: PGMSyncCR3() profiling. */
2448 STAMPROFILE StatGCSyncCR3Handlers; /**< GC: Profiling of the PGMSyncCR3() update handler section. */
2449 STAMPROFILE StatGCSyncCR3HandlerVirtualReset; /**< GC: Profiling of the virtual handler resets. */
2450 STAMPROFILE StatGCSyncCR3HandlerVirtualUpdate; /**< GC: Profiling of the virtual handler updates. */
2451 STAMCOUNTER StatGCSyncCR3Global; /**< GC: The number of global CR3 syncs. */
2452 STAMCOUNTER StatGCSyncCR3NotGlobal; /**< GC: The number of non-global CR3 syncs. */
2453 STAMCOUNTER StatGCSyncCR3DstFreed; /**< GC: The number of times we've had to free a shadow entry. */
2454 STAMCOUNTER StatGCSyncCR3DstFreedSrcNP; /**< GC: The number of times we've had to free a shadow entry for which the source entry was not present. */
2455 STAMCOUNTER StatGCSyncCR3DstNotPresent; /**< GC: The number of times we've encountered a not present shadow entry for a present guest entry. */
2456 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPD; /**< GC: The number of times a global page directory wasn't flushed. */
2457 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPT; /**< GC: The number of times a page table with only global entries wasn't flushed. */
2458 STAMCOUNTER StatGCSyncCR3DstCacheHit; /**< GC: The number of times we got some kind of cache hit on a page table. */
2459
2460 STAMPROFILE StatHCSyncCR3; /**< HC: PGMSyncCR3() profiling. */
2461 STAMPROFILE StatHCSyncCR3Handlers; /**< HC: Profiling of the PGMSyncCR3() update handler section. */
2462 STAMPROFILE StatHCSyncCR3HandlerVirtualReset; /**< HC: Profiling of the virtual handler resets. */
2463 STAMPROFILE StatHCSyncCR3HandlerVirtualUpdate; /**< HC: Profiling of the virtual handler updates. */
2464 STAMCOUNTER StatHCSyncCR3Global; /**< HC: The number of global CR3 syncs. */
2465 STAMCOUNTER StatHCSyncCR3NotGlobal; /**< HC: The number of non-global CR3 syncs. */
2466 STAMCOUNTER StatHCSyncCR3DstFreed; /**< HC: The number of times we've had to free a shadow entry. */
2467 STAMCOUNTER StatHCSyncCR3DstFreedSrcNP; /**< HC: The number of times we've had to free a shadow entry for which the source entry was not present. */
2468 STAMCOUNTER StatHCSyncCR3DstNotPresent; /**< HC: The number of times we've encountered a not present shadow entry for a present guest entry. */
2469 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPD; /**< HC: The number of times a global page directory wasn't flushed. */
2470 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPT; /**< HC: The number of times a page table with only global entries wasn't flushed. */
2471 STAMCOUNTER StatHCSyncCR3DstCacheHit; /**< HC: The number of times we got some kind of cache hit on a page table. */
2472
2473 /** GC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2474 STAMPROFILE StatVirtHandleSearchByPhysGC;
2475 /** HC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2476 STAMPROFILE StatVirtHandleSearchByPhysHC;
2477 /** HC: The number of times PGMR3HandlerPhysicalReset is called. */
2478 STAMCOUNTER StatHandlePhysicalReset;
2479
2480 STAMPROFILE StatCheckPageFault;
2481 STAMPROFILE StatLazySyncPT;
2482 STAMPROFILE StatMapping;
2483 STAMPROFILE StatOutOfSync;
2484 STAMPROFILE StatHandlers;
2485 STAMPROFILE StatEIPHandlers;
2486 STAMPROFILE StatHCPrefetch;
2487
2488# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2489 /** The number of first time shadowings. */
2490 STAMCOUNTER StatTrackVirgin;
2491 /** The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2492 STAMCOUNTER StatTrackAliased;
2493 /** The number of times we're tracking using cRef2. */
2494 STAMCOUNTER StatTrackAliasedMany;
2495 /** The number of times we're hitting pages which has overflowed cRef2. */
2496 STAMCOUNTER StatTrackAliasedLots;
2497 /** The number of times the extent list grows to long. */
2498 STAMCOUNTER StatTrackOverflows;
2499 /** Profiling of SyncPageWorkerTrackDeref (expensive). */
2500 STAMPROFILE StatTrackDeref;
2501# endif
2502
2503 /** Ring-3/0 page mapper TLB hits. */
2504 STAMCOUNTER StatPageHCMapTlbHits;
2505 /** Ring-3/0 page mapper TLB misses. */
2506 STAMCOUNTER StatPageHCMapTlbMisses;
2507 /** Ring-3/0 chunk mapper TLB hits. */
2508 STAMCOUNTER StatChunkR3MapTlbHits;
2509 /** Ring-3/0 chunk mapper TLB misses. */
2510 STAMCOUNTER StatChunkR3MapTlbMisses;
2511 /** Times a shared page has been replaced by a private one. */
2512 STAMCOUNTER StatPageReplaceShared;
2513 /** Times the zero page has been replaced by a private one. */
2514 STAMCOUNTER StatPageReplaceZero;
2515 /** The number of times we've executed GMMR3AllocateHandyPages. */
2516 STAMCOUNTER StatPageHandyAllocs;
2517
2518 /** Allocated mbs of guest ram */
2519 STAMCOUNTER StatDynRamTotal;
2520 /** Nr of pgmr3PhysGrowRange calls. */
2521 STAMCOUNTER StatDynRamGrow;
2522
2523 STAMCOUNTER StatGCTrap0ePD[X86_PG_ENTRIES];
2524 STAMCOUNTER StatGCSyncPtPD[X86_PG_ENTRIES];
2525 STAMCOUNTER StatGCSyncPagePD[X86_PG_ENTRIES];
2526#endif
2527} PGM, *PPGM;
2528
2529
2530/** @name PGM::fSyncFlags Flags
2531 * @{
2532 */
2533/** Updates the virtual access handler state bit in PGMPAGE. */
2534#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
2535/** Always sync CR3. */
2536#define PGM_SYNC_ALWAYS RT_BIT(1)
2537/** Check monitoring on next CR3 (re)load and invalidate page. */
2538#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
2539/** Clear the page pool (a light weight flush). */
2540#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(8)
2541/** @} */
2542
2543
2544__BEGIN_DECLS
2545
2546int pgmLock(PVM pVM);
2547void pgmUnlock(PVM pVM);
2548
2549PGMGCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
2550PGMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
2551int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode);
2552
2553int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
2554int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
2555PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
2556void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping, RTGCPTR GCPtrNewMapping);
2557DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2558
2559void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
2560int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
2561DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
2562#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
2563void pgmHandlerVirtualDumpPhysPages(PVM pVM);
2564#else
2565# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
2566#endif
2567DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
2568
2569
2570void pgmPhysFreePage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2571int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
2572int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
2573int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv);
2574#ifdef IN_RING3
2575int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
2576int pgmR3PhysRamReset(PVM pVM);
2577int pgmR3PhysRomReset(PVM pVM);
2578#ifndef VBOX_WITH_NEW_PHYS_CODE
2579int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
2580#endif
2581
2582int pgmR3PoolInit(PVM pVM);
2583void pgmR3PoolRelocate(PVM pVM);
2584void pgmR3PoolReset(PVM pVM);
2585
2586#endif /* IN_RING3 */
2587#ifdef IN_GC
2588void *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
2589#endif
2590int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint16_t iUserTable, PPPGMPOOLPAGE ppPage);
2591PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
2592void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint16_t iUserTable);
2593void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint16_t iUserTable);
2594int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2595void pgmPoolFlushAll(PVM pVM);
2596void pgmPoolClearAll(PVM pVM);
2597void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
2598void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
2599int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
2600PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
2601void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
2602void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
2603uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
2604void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
2605#ifdef PGMPOOL_WITH_MONITORING
2606# ifdef IN_RING3
2607void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu);
2608# else
2609void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu);
2610# endif
2611int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2612void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
2613void pgmPoolMonitorModifiedClearAll(PVM pVM);
2614int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
2615int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
2616#endif
2617
2618__END_DECLS
2619
2620
2621/**
2622 * Gets the PGMRAMRANGE structure for a guest page.
2623 *
2624 * @returns Pointer to the RAM range on success.
2625 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2626 *
2627 * @param pPGM PGM handle.
2628 * @param GCPhys The GC physical address.
2629 */
2630DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
2631{
2632 /*
2633 * Optimize for the first range.
2634 */
2635 PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges);
2636 RTGCPHYS off = GCPhys - pRam->GCPhys;
2637 if (RT_UNLIKELY(off >= pRam->cb))
2638 {
2639 do
2640 {
2641 pRam = CTXALLSUFF(pRam->pNext);
2642 if (RT_UNLIKELY(!pRam))
2643 break;
2644 off = GCPhys - pRam->GCPhys;
2645 } while (off >= pRam->cb);
2646 }
2647 return pRam;
2648}
2649
2650
2651/**
2652 * Gets the PGMPAGE structure for a guest page.
2653 *
2654 * @returns Pointer to the page on success.
2655 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2656 *
2657 * @param pPGM PGM handle.
2658 * @param GCPhys The GC physical address.
2659 */
2660DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
2661{
2662 /*
2663 * Optimize for the first range.
2664 */
2665 PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges);
2666 RTGCPHYS off = GCPhys - pRam->GCPhys;
2667 if (RT_UNLIKELY(off >= pRam->cb))
2668 {
2669 do
2670 {
2671 pRam = CTXALLSUFF(pRam->pNext);
2672 if (RT_UNLIKELY(!pRam))
2673 return NULL;
2674 off = GCPhys - pRam->GCPhys;
2675 } while (off >= pRam->cb);
2676 }
2677 return &pRam->aPages[off >> PAGE_SHIFT];
2678}
2679
2680
2681/**
2682 * Gets the PGMPAGE structure for a guest page.
2683 *
2684 * Old Phys code: Will make sure the page is present.
2685 *
2686 * @returns VBox status code.
2687 * @retval VINF_SUCCESS and a valid *ppPage on success.
2688 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
2689 *
2690 * @param pPGM PGM handle.
2691 * @param GCPhys The GC physical address.
2692 * @param ppPage Where to store the page poitner on success.
2693 */
2694DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
2695{
2696 /*
2697 * Optimize for the first range.
2698 */
2699 PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges);
2700 RTGCPHYS off = GCPhys - pRam->GCPhys;
2701 if (RT_UNLIKELY(off >= pRam->cb))
2702 {
2703 do
2704 {
2705 pRam = CTXALLSUFF(pRam->pNext);
2706 if (RT_UNLIKELY(!pRam))
2707 {
2708 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
2709 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2710 }
2711 off = GCPhys - pRam->GCPhys;
2712 } while (off >= pRam->cb);
2713 }
2714 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2715#ifndef VBOX_WITH_NEW_PHYS_CODE
2716
2717 /*
2718 * Make sure it's present.
2719 */
2720 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2721 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2722 {
2723#ifdef IN_RING3
2724 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2725#else
2726 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2727#endif
2728 if (VBOX_FAILURE(rc))
2729 {
2730 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
2731 return rc;
2732 }
2733 Assert(rc == VINF_SUCCESS);
2734 }
2735#endif
2736 return VINF_SUCCESS;
2737}
2738
2739
2740
2741
2742/**
2743 * Gets the PGMPAGE structure for a guest page.
2744 *
2745 * Old Phys code: Will make sure the page is present.
2746 *
2747 * @returns VBox status code.
2748 * @retval VINF_SUCCESS and a valid *ppPage on success.
2749 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
2750 *
2751 * @param pPGM PGM handle.
2752 * @param GCPhys The GC physical address.
2753 * @param ppPage Where to store the page poitner on success.
2754 * @param ppRamHint Where to read and store the ram list hint.
2755 * The caller initializes this to NULL before the call.
2756 */
2757DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
2758{
2759 RTGCPHYS off;
2760 PPGMRAMRANGE pRam = *ppRamHint;
2761 if ( !pRam
2762 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
2763 {
2764 pRam = CTXALLSUFF(pPGM->pRamRanges);
2765 off = GCPhys - pRam->GCPhys;
2766 if (RT_UNLIKELY(off >= pRam->cb))
2767 {
2768 do
2769 {
2770 pRam = CTXALLSUFF(pRam->pNext);
2771 if (RT_UNLIKELY(!pRam))
2772 {
2773 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
2774 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2775 }
2776 off = GCPhys - pRam->GCPhys;
2777 } while (off >= pRam->cb);
2778 }
2779 *ppRamHint = pRam;
2780 }
2781 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2782#ifndef VBOX_WITH_NEW_PHYS_CODE
2783
2784 /*
2785 * Make sure it's present.
2786 */
2787 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2788 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2789 {
2790#ifdef IN_RING3
2791 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2792#else
2793 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2794#endif
2795 if (VBOX_FAILURE(rc))
2796 {
2797 *ppPage = NULL; /* Shut up annoying smart ass. */
2798 return rc;
2799 }
2800 Assert(rc == VINF_SUCCESS);
2801 }
2802#endif
2803 return VINF_SUCCESS;
2804}
2805
2806
2807/**
2808 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2809 *
2810 * @returns Pointer to the page on success.
2811 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2812 *
2813 * @param pPGM PGM handle.
2814 * @param GCPhys The GC physical address.
2815 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
2816 */
2817DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
2818{
2819 /*
2820 * Optimize for the first range.
2821 */
2822 PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges);
2823 RTGCPHYS off = GCPhys - pRam->GCPhys;
2824 if (RT_UNLIKELY(off >= pRam->cb))
2825 {
2826 do
2827 {
2828 pRam = CTXALLSUFF(pRam->pNext);
2829 if (RT_UNLIKELY(!pRam))
2830 return NULL;
2831 off = GCPhys - pRam->GCPhys;
2832 } while (off >= pRam->cb);
2833 }
2834 *ppRam = pRam;
2835 return &pRam->aPages[off >> PAGE_SHIFT];
2836}
2837
2838
2839
2840
2841/**
2842 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2843 *
2844 * @returns Pointer to the page on success.
2845 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2846 *
2847 * @param pPGM PGM handle.
2848 * @param GCPhys The GC physical address.
2849 * @param ppPage Where to store the pointer to the PGMPAGE structure.
2850 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
2851 */
2852DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
2853{
2854 /*
2855 * Optimize for the first range.
2856 */
2857 PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges);
2858 RTGCPHYS off = GCPhys - pRam->GCPhys;
2859 if (RT_UNLIKELY(off >= pRam->cb))
2860 {
2861 do
2862 {
2863 pRam = CTXALLSUFF(pRam->pNext);
2864 if (RT_UNLIKELY(!pRam))
2865 {
2866 *ppRam = NULL; /* Shut up silly GCC warnings. */
2867 *ppPage = NULL; /* ditto */
2868 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2869 }
2870 off = GCPhys - pRam->GCPhys;
2871 } while (off >= pRam->cb);
2872 }
2873 *ppRam = pRam;
2874 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2875#ifndef VBOX_WITH_NEW_PHYS_CODE
2876
2877 /*
2878 * Make sure it's present.
2879 */
2880 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2881 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2882 {
2883#ifdef IN_RING3
2884 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2885#else
2886 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2887#endif
2888 if (VBOX_FAILURE(rc))
2889 {
2890 *ppPage = NULL; /* Shut up silly GCC warnings. */
2891 *ppPage = NULL; /* ditto */
2892 return rc;
2893 }
2894 Assert(rc == VINF_SUCCESS);
2895
2896 }
2897#endif
2898 return VINF_SUCCESS;
2899}
2900
2901
2902/**
2903 * Convert GC Phys to HC Phys.
2904 *
2905 * @returns VBox status.
2906 * @param pPGM PGM handle.
2907 * @param GCPhys The GC physical address.
2908 * @param pHCPhys Where to store the corresponding HC physical address.
2909 *
2910 * @deprecated Doesn't deal with zero, shared or write monitored pages.
2911 * Avoid when writing new code!
2912 */
2913DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
2914{
2915 PPGMPAGE pPage;
2916 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
2917 if (VBOX_FAILURE(rc))
2918 return rc;
2919 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
2920 return VINF_SUCCESS;
2921}
2922
2923
2924#ifndef IN_GC
2925/**
2926 * Queries the Physical TLB entry for a physical guest page,
2927 * attemting to load the TLB entry if necessary.
2928 *
2929 * @returns VBox status code.
2930 * @retval VINF_SUCCESS on success
2931 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2932 * @param pPGM The PGM instance handle.
2933 * @param GCPhys The address of the guest page.
2934 * @param ppTlbe Where to store the pointer to the TLB entry.
2935 */
2936
2937DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
2938{
2939 int rc;
2940 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
2941 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
2942 {
2943 STAM_COUNTER_INC(&pPGM->CTXMID(StatPage,MapTlbHits));
2944 rc = VINF_SUCCESS;
2945 }
2946 else
2947 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
2948 *ppTlbe = pTlbe;
2949 return rc;
2950}
2951#endif /* !IN_GC */
2952
2953
2954#ifndef VBOX_WITH_NEW_PHYS_CODE
2955/**
2956 * Convert GC Phys to HC Virt.
2957 *
2958 * @returns VBox status.
2959 * @param pPGM PGM handle.
2960 * @param GCPhys The GC physical address.
2961 * @param pHCPtr Where to store the corresponding HC virtual address.
2962 *
2963 * @deprecated This will be eliminated by PGMPhysGCPhys2CCPtr.
2964 */
2965DECLINLINE(int) pgmRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
2966{
2967 PPGMRAMRANGE pRam;
2968 PPGMPAGE pPage;
2969 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
2970 if (VBOX_FAILURE(rc))
2971 {
2972 *pHCPtr = 0; /* Shut up silly GCC warnings. */
2973 return rc;
2974 }
2975 RTGCPHYS off = GCPhys - pRam->GCPhys;
2976
2977 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2978 {
2979 unsigned iChunk = off >> PGM_DYNAMIC_CHUNK_SHIFT;
2980 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2981 return VINF_SUCCESS;
2982 }
2983 if (pRam->pvHC)
2984 {
2985 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
2986 return VINF_SUCCESS;
2987 }
2988 *pHCPtr = 0; /* Shut up silly GCC warnings. */
2989 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2990}
2991#endif /* !VBOX_WITH_NEW_PHYS_CODE */
2992
2993
2994/**
2995 * Convert GC Phys to HC Virt.
2996 *
2997 * @returns VBox status.
2998 * @param PVM VM handle.
2999 * @param pRam Ram range
3000 * @param GCPhys The GC physical address.
3001 * @param pHCPtr Where to store the corresponding HC virtual address.
3002 *
3003 * @deprecated This will be eliminated. Don't use it.
3004 */
3005DECLINLINE(int) pgmRamGCPhys2HCPtrWithRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
3006{
3007 RTGCPHYS off = GCPhys - pRam->GCPhys;
3008 Assert(off < pRam->cb);
3009
3010 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3011 {
3012 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3013 /* Physical chunk in dynamically allocated range not present? */
3014 if (RT_UNLIKELY(!CTXSUFF(pRam->pavHCChunk)[idx]))
3015 {
3016#ifdef IN_RING3
3017 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
3018#else
3019 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
3020#endif
3021 if (rc != VINF_SUCCESS)
3022 {
3023 *pHCPtr = 0; /* GCC crap */
3024 return rc;
3025 }
3026 }
3027 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3028 return VINF_SUCCESS;
3029 }
3030 if (pRam->pvHC)
3031 {
3032 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
3033 return VINF_SUCCESS;
3034 }
3035 *pHCPtr = 0; /* GCC crap */
3036 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3037}
3038
3039
3040/**
3041 * Convert GC Phys to HC Virt and HC Phys.
3042 *
3043 * @returns VBox status.
3044 * @param pPGM PGM handle.
3045 * @param GCPhys The GC physical address.
3046 * @param pHCPtr Where to store the corresponding HC virtual address.
3047 * @param pHCPhys Where to store the HC Physical address and its flags.
3048 *
3049 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
3050 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
3051 */
3052DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
3053{
3054 PPGMRAMRANGE pRam;
3055 PPGMPAGE pPage;
3056 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
3057 if (VBOX_FAILURE(rc))
3058 {
3059 *pHCPtr = 0; /* Shut up crappy GCC warnings */
3060 *pHCPhys = 0; /* ditto */
3061 return rc;
3062 }
3063 RTGCPHYS off = GCPhys - pRam->GCPhys;
3064
3065 *pHCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
3066 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
3067 {
3068 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
3069 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
3070 return VINF_SUCCESS;
3071 }
3072 if (pRam->pvHC)
3073 {
3074 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
3075 return VINF_SUCCESS;
3076 }
3077 *pHCPtr = 0;
3078 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3079}
3080
3081
3082/**
3083 * Clears flags associated with a RAM address.
3084 *
3085 * @returns VBox status code.
3086 * @param pPGM PGM handle.
3087 * @param GCPhys Guest context physical address.
3088 * @param fFlags fFlags to clear. (Bits 0-11.)
3089 */
3090DECLINLINE(int) pgmRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3091{
3092 PPGMPAGE pPage;
3093 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3094 if (VBOX_FAILURE(rc))
3095 return rc;
3096
3097 fFlags &= ~X86_PTE_PAE_PG_MASK;
3098 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3099 return VINF_SUCCESS;
3100}
3101
3102
3103/**
3104 * Clears flags associated with a RAM address.
3105 *
3106 * @returns VBox status code.
3107 * @param pPGM PGM handle.
3108 * @param GCPhys Guest context physical address.
3109 * @param fFlags fFlags to clear. (Bits 0-11.)
3110 * @param ppRamHint Where to read and store the ram list hint.
3111 * The caller initializes this to NULL before the call.
3112 */
3113DECLINLINE(int) pgmRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3114{
3115 PPGMPAGE pPage;
3116 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3117 if (VBOX_FAILURE(rc))
3118 return rc;
3119
3120 fFlags &= ~X86_PTE_PAE_PG_MASK;
3121 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
3122 return VINF_SUCCESS;
3123}
3124
3125/**
3126 * Sets (bitwise OR) flags associated with a RAM address.
3127 *
3128 * @returns VBox status code.
3129 * @param pPGM PGM handle.
3130 * @param GCPhys Guest context physical address.
3131 * @param fFlags fFlags to set clear. (Bits 0-11.)
3132 */
3133DECLINLINE(int) pgmRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
3134{
3135 PPGMPAGE pPage;
3136 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
3137 if (VBOX_FAILURE(rc))
3138 return rc;
3139
3140 fFlags &= ~X86_PTE_PAE_PG_MASK;
3141 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3142 return VINF_SUCCESS;
3143}
3144
3145
3146/**
3147 * Sets (bitwise OR) flags associated with a RAM address.
3148 *
3149 * @returns VBox status code.
3150 * @param pPGM PGM handle.
3151 * @param GCPhys Guest context physical address.
3152 * @param fFlags fFlags to set clear. (Bits 0-11.)
3153 * @param ppRamHint Where to read and store the ram list hint.
3154 * The caller initializes this to NULL before the call.
3155 */
3156DECLINLINE(int) pgmRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
3157{
3158 PPGMPAGE pPage;
3159 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
3160 if (VBOX_FAILURE(rc))
3161 return rc;
3162
3163 fFlags &= ~X86_PTE_PAE_PG_MASK;
3164 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
3165 return VINF_SUCCESS;
3166}
3167
3168
3169/**
3170 * Gets the page directory for the specified address.
3171 *
3172 * @returns Pointer to the page directory in question.
3173 * @returns NULL if the page directory is not present or on an invalid page.
3174 * @param pPGM Pointer to the PGM instance data.
3175 * @param GCPtr The address.
3176 */
3177DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr)
3178{
3179 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3180 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3181 {
3182 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3183 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt];
3184
3185 /* cache is out-of-sync. */
3186 PX86PDPAE pPD;
3187 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3188 if (VBOX_SUCCESS(rc))
3189 return pPD;
3190 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3191 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3192 }
3193 return NULL;
3194}
3195
3196
3197/**
3198 * Gets the page directory entry for the specified address.
3199 *
3200 * @returns Pointer to the page directory entry in question.
3201 * @returns NULL if the page directory is not present or on an invalid page.
3202 * @param pPGM Pointer to the PGM instance data.
3203 * @param GCPtr The address.
3204 */
3205DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr)
3206{
3207 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3208 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3209 {
3210 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3211 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3212 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD];
3213
3214 /* The cache is out-of-sync. */
3215 PX86PDPAE pPD;
3216 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3217 if (VBOX_SUCCESS(rc))
3218 return &pPD->a[iPD];
3219 AssertMsgFailed(("Impossible! rc=%Vrc PDPE=%RX64\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3220 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */
3221 }
3222 return NULL;
3223}
3224
3225
3226/**
3227 * Gets the page directory entry for the specified address.
3228 *
3229 * @returns The page directory entry in question.
3230 * @returns A non-present entry if the page directory is not present or on an invalid page.
3231 * @param pPGM Pointer to the PGM instance data.
3232 * @param GCPtr The address.
3233 */
3234DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr)
3235{
3236 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3237 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3238 {
3239 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3240 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3241 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt]->a[iPD].u;
3242
3243 /* cache is out-of-sync. */
3244 PX86PDPAE pPD;
3245 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3246 if (VBOX_SUCCESS(rc))
3247 return pPD->a[iPD].u;
3248 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3249 }
3250 return 0ULL;
3251}
3252
3253
3254/**
3255 * Gets the page directory pointer table entry for the specified address
3256 * and returns the index into the page directory
3257 *
3258 * @returns Pointer to the page directory in question.
3259 * @returns NULL if the page directory is not present or on an invalid page.
3260 * @param pPGM Pointer to the PGM instance data.
3261 * @param GCPtr The address.
3262 * @param piPD Receives the index into the returned page directory
3263 */
3264DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGM pPGM, RTGCUINTPTR GCPtr, unsigned *piPD)
3265{
3266 const unsigned iPdPt = GCPtr >> X86_PDPT_SHIFT;
3267 if (CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].n.u1Present)
3268 {
3269 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3270 if ((CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPt])
3271 {
3272 *piPD = iPD;
3273 return CTXSUFF(pPGM->apGstPaePDs)[iPdPt];
3274 }
3275
3276 /* cache is out-of-sync. */
3277 PX86PDPAE pPD;
3278 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3279 if (VBOX_SUCCESS(rc))
3280 {
3281 *piPD = iPD;
3282 return pPD;
3283 }
3284 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt].u));
3285 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
3286 }
3287 return NULL;
3288}
3289
3290#ifndef IN_GC
3291/**
3292 * Gets the page directory pointer entry for the specified address.
3293 *
3294 * @returns Pointer to the page directory pointer entry in question.
3295 * @returns NULL if the page directory is not present or on an invalid page.
3296 * @param pPGM Pointer to the PGM instance data.
3297 * @param GCPtr The address.
3298 * @param ppPml4e Page Map Level-4 Entry (out)
3299 */
3300DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e)
3301{
3302 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3303
3304 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3305 if ((*ppPml4e)->n.u1Present)
3306 {
3307 PX86PDPT pPdpt;
3308 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdpt);
3309 if (VBOX_FAILURE(rc))
3310 {
3311 AssertFailed();
3312 return NULL;
3313 }
3314 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3315 return &pPdpt->a[iPdPt];
3316 }
3317 return NULL;
3318}
3319
3320/**
3321 * Gets the page directory entry for the specified address.
3322 *
3323 * @returns The page directory entry in question.
3324 * @returns A non-present entry if the page directory is not present or on an invalid page.
3325 * @param pPGM Pointer to the PGM instance data.
3326 * @param GCPtr The address.
3327 * @param ppPml4e Page Map Level-4 Entry (out)
3328 * @param pPdpe Page directory pointer table entry (out)
3329 */
3330DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
3331{
3332 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3333
3334 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3335 if ((*ppPml4e)->n.u1Present)
3336 {
3337 PX86PDPT pPdptTemp;
3338 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp);
3339 if (VBOX_FAILURE(rc))
3340 {
3341 AssertFailed();
3342 return 0ULL;
3343 }
3344
3345 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3346 *pPdpe = pPdptTemp->a[iPdPt];
3347 if (pPdpe->n.u1Present)
3348 {
3349 PX86PDPAE pPD;
3350
3351 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD);
3352 if (VBOX_FAILURE(rc))
3353 {
3354 AssertFailed();
3355 return 0ULL;
3356 }
3357 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3358 return pPD->a[iPD].u;
3359 }
3360 }
3361 return 0ULL;
3362}
3363
3364/**
3365 * Gets the page directory entry for the specified address.
3366 *
3367 * @returns The page directory entry in question.
3368 * @returns A non-present entry if the page directory is not present or on an invalid page.
3369 * @param pPGM Pointer to the PGM instance data.
3370 * @param GCPtr The address.
3371 */
3372DECLINLINE(uint64_t) pgmGstGetLongModePDE(PPGM pPGM, RTGCUINTPTR64 GCPtr)
3373{
3374 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3375
3376 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present)
3377 {
3378 PX86PDPT pPdptTemp;
3379 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp);
3380 if (VBOX_FAILURE(rc))
3381 {
3382 AssertFailed();
3383 return 0ULL;
3384 }
3385
3386 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3387 if (pPdptTemp->a[iPdPt].n.u1Present)
3388 {
3389 PX86PDPAE pPD;
3390
3391 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3392 if (VBOX_FAILURE(rc))
3393 {
3394 AssertFailed();
3395 return 0ULL;
3396 }
3397 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3398 return pPD->a[iPD].u;
3399 }
3400 }
3401 return 0ULL;
3402}
3403
3404/**
3405 * Gets the page directory entry for the specified address.
3406 *
3407 * @returns Pointer to the page directory entry in question.
3408 * @returns NULL if the page directory is not present or on an invalid page.
3409 * @param pPGM Pointer to the PGM instance data.
3410 * @param GCPtr The address.
3411 */
3412DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr)
3413{
3414 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3415
3416 if (pPGM->pGstPaePML4HC->a[iPml4e].n.u1Present)
3417 {
3418 PX86PDPT pPdptTemp;
3419 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPGM->pGstPaePML4HC->a[iPml4e].u & X86_PML4E_PG_MASK, &pPdptTemp);
3420 if (VBOX_FAILURE(rc))
3421 {
3422 AssertFailed();
3423 return NULL;
3424 }
3425
3426 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3427 if (pPdptTemp->a[iPdPt].n.u1Present)
3428 {
3429 PX86PDPAE pPD;
3430
3431 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdptTemp->a[iPdPt].u & X86_PDPE_PG_MASK, &pPD);
3432 if (VBOX_FAILURE(rc))
3433 {
3434 AssertFailed();
3435 return NULL;
3436 }
3437 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3438 return &pPD->a[iPD];
3439 }
3440 }
3441 return NULL;
3442}
3443
3444
3445/**
3446 * Gets the GUEST page directory pointer for the specified address.
3447 *
3448 * @returns The page directory in question.
3449 * @returns NULL if the page directory is not present or on an invalid page.
3450 * @param pPGM Pointer to the PGM instance data.
3451 * @param GCPtr The address.
3452 * @param ppPml4e Page Map Level-4 Entry (out)
3453 * @param pPdpe Page directory pointer table entry (out)
3454 * @param piPD Receives the index into the returned page directory
3455 */
3456DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGM pPGM, RTGCUINTPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
3457{
3458 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
3459
3460 *ppPml4e = &pPGM->pGstPaePML4HC->a[iPml4e];
3461 if ((*ppPml4e)->n.u1Present)
3462 {
3463 PX86PDPT pPdptTemp;
3464 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), (*ppPml4e)->u & X86_PML4E_PG_MASK, &pPdptTemp);
3465 if (VBOX_FAILURE(rc))
3466 {
3467 AssertFailed();
3468 return 0ULL;
3469 }
3470
3471 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
3472 *pPdpe = pPdptTemp->a[iPdPt];
3473 if (pPdpe->n.u1Present)
3474 {
3475 PX86PDPAE pPD;
3476
3477 rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), pPdpe->u & X86_PDPE_PG_MASK, &pPD);
3478 if (VBOX_FAILURE(rc))
3479 {
3480 AssertFailed();
3481 return 0ULL;
3482 }
3483 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
3484 return pPD;
3485 }
3486 }
3487 return 0ULL;
3488}
3489#endif /* !IN_GC */
3490
3491/**
3492 * Checks if any of the specified page flags are set for the given page.
3493 *
3494 * @returns true if any of the flags are set.
3495 * @returns false if all the flags are clear.
3496 * @param pPGM PGM handle.
3497 * @param GCPhys The GC physical address.
3498 * @param fFlags The flags to check for.
3499 */
3500DECLINLINE(bool) pgmRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
3501{
3502 PPGMPAGE pPage = pgmPhysGetPage(pPGM, GCPhys);
3503 return pPage
3504 && (pPage->HCPhys & fFlags) != 0; /** @todo PAGE FLAGS */
3505}
3506
3507
3508/**
3509 * Gets the page state for a physical handler.
3510 *
3511 * @returns The physical handler page state.
3512 * @param pCur The physical handler in question.
3513 */
3514DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
3515{
3516 switch (pCur->enmType)
3517 {
3518 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
3519 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
3520
3521 case PGMPHYSHANDLERTYPE_MMIO:
3522 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
3523 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
3524
3525 default:
3526 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
3527 }
3528}
3529
3530
3531/**
3532 * Gets the page state for a virtual handler.
3533 *
3534 * @returns The virtual handler page state.
3535 * @param pCur The virtual handler in question.
3536 * @remarks This should never be used on a hypervisor access handler.
3537 */
3538DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
3539{
3540 switch (pCur->enmType)
3541 {
3542 case PGMVIRTHANDLERTYPE_WRITE:
3543 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
3544 case PGMVIRTHANDLERTYPE_ALL:
3545 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
3546 default:
3547 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
3548 }
3549}
3550
3551
3552/**
3553 * Clears one physical page of a virtual handler
3554 *
3555 * @param pPGM Pointer to the PGM instance.
3556 * @param pCur Virtual handler structure
3557 * @param iPage Physical page index
3558 *
3559 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
3560 * need to care about other handlers in the same page.
3561 */
3562DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
3563{
3564 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
3565
3566 /*
3567 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
3568 */
3569#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3570 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
3571 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3572 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
3573#endif
3574 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
3575 {
3576 /* We're the head of the alias chain. */
3577 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
3578#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3579 AssertReleaseMsg(pRemove != NULL,
3580 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3581 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
3582 AssertReleaseMsg(pRemove == pPhys2Virt,
3583 ("wanted: pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
3584 " got: pRemove=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3585 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
3586 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
3587#endif
3588 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
3589 {
3590 /* Insert the next list in the alias chain into the tree. */
3591 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3592#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3593 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
3594 ("pNext=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
3595 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
3596#endif
3597 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
3598 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
3599 AssertRelease(fRc);
3600 }
3601 }
3602 else
3603 {
3604 /* Locate the previous node in the alias chain. */
3605 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
3606#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3607 AssertReleaseMsg(pPrev != pPhys2Virt,
3608 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
3609 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
3610#endif
3611 for (;;)
3612 {
3613 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3614 if (pNext == pPhys2Virt)
3615 {
3616 /* unlink. */
3617 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%VGp-%VGp]\n",
3618 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
3619 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
3620 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
3621 else
3622 {
3623 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
3624 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
3625 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
3626 }
3627 break;
3628 }
3629
3630 /* next */
3631 if (pNext == pPrev)
3632 {
3633#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
3634 AssertReleaseMsg(pNext != pPrev,
3635 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
3636 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
3637#endif
3638 break;
3639 }
3640 pPrev = pNext;
3641 }
3642 }
3643 Log2(("PHYS2VIRT: Removing %VGp-%VGp %#RX32 %s\n",
3644 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, HCSTRING(pCur->pszDesc)));
3645 pPhys2Virt->offNextAlias = 0;
3646 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
3647
3648 /*
3649 * Clear the ram flags for this page.
3650 */
3651 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
3652 AssertReturnVoid(pPage);
3653 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
3654}
3655
3656
3657/**
3658 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
3659 *
3660 * @returns Pointer to the shadow page structure.
3661 * @param pPool The pool.
3662 * @param HCPhys The HC physical address of the shadow page.
3663 */
3664DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
3665{
3666 /*
3667 * Look up the page.
3668 */
3669 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
3670 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%VHp pPage=%p type=%d\n", HCPhys, pPage, (pPage) ? pPage->enmKind : 0));
3671 return pPage;
3672}
3673
3674
3675/**
3676 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
3677 *
3678 * @returns Pointer to the shadow page structure.
3679 * @param pPool The pool.
3680 * @param idx The pool page index.
3681 */
3682DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
3683{
3684 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
3685 return &pPool->aPages[idx];
3686}
3687
3688
3689#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
3690/**
3691 * Clear references to guest physical memory.
3692 *
3693 * @param pPool The pool.
3694 * @param pPoolPage The pool page.
3695 * @param pPhysPage The physical guest page tracking structure.
3696 */
3697DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
3698{
3699 /*
3700 * Just deal with the simple case here.
3701 */
3702#ifdef LOG_ENABLED
3703 const RTHCPHYS HCPhysOrg = pPhysPage->HCPhys; /** @todo PAGE FLAGS */
3704#endif
3705 const unsigned cRefs = pPhysPage->HCPhys >> MM_RAM_FLAGS_CREFS_SHIFT; /** @todo PAGE FLAGS */
3706 if (cRefs == 1)
3707 {
3708 Assert(pPoolPage->idx == ((pPhysPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK));
3709 pPhysPage->HCPhys = pPhysPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK;
3710 }
3711 else
3712 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
3713 LogFlow(("pgmTrackDerefGCPhys: HCPhys=%RHp -> %RHp\n", HCPhysOrg, pPhysPage->HCPhys));
3714}
3715#endif
3716
3717
3718#ifdef PGMPOOL_WITH_CACHE
3719/**
3720 * Moves the page to the head of the age list.
3721 *
3722 * This is done when the cached page is used in one way or another.
3723 *
3724 * @param pPool The pool.
3725 * @param pPage The cached page.
3726 * @todo inline in PGMInternal.h!
3727 */
3728DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
3729{
3730 /*
3731 * Move to the head of the age list.
3732 */
3733 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
3734 {
3735 /* unlink */
3736 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
3737 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
3738 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
3739 else
3740 pPool->iAgeTail = pPage->iAgePrev;
3741
3742 /* insert at head */
3743 pPage->iAgePrev = NIL_PGMPOOL_IDX;
3744 pPage->iAgeNext = pPool->iAgeHead;
3745 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
3746 pPool->iAgeHead = pPage->idx;
3747 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
3748 }
3749}
3750#endif /* PGMPOOL_WITH_CACHE */
3751
3752/**
3753 * Tells if mappings are to be put into the shadow page table or not
3754 *
3755 * @returns boolean result
3756 * @param pVM VM handle.
3757 */
3758
3759DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
3760{
3761#ifdef IN_RING0
3762 /* There are no mappings in VT-x and AMD-V mode. */
3763 Assert(pPGM->fDisableMappings);
3764 return false;
3765#else
3766 return !pPGM->fDisableMappings;
3767#endif
3768}
3769
3770/** @} */
3771
3772#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette