VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 2217

Last change on this file since 2217 was 2089, checked in by vboxsync, 18 years ago

Added PGMPOOLKIND_32BIT_PT_FOR_PHYS & PGMPOOLKIND_PAE_PT_FOR_PHYS

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 110.7 KB
Line 
1/* $Id: PGMInternal.h 2089 2007-04-14 15:20:19Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22#ifndef __PGMInternal_h__
23#define __PGMInternal_h__
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdm.h>
33#include <iprt/avl.h>
34#include <iprt/assert.h>
35#include <iprt/critsect.h>
36
37#if !defined(IN_PGM_R3) && !defined(IN_PGM_R0) && !defined(IN_PGM_GC)
38# error "Not in PGM! This is an internal header!"
39#endif
40
41
42/** @defgroup grp_pgm_int Internals
43 * @ingroup grp_pgm
44 * @internal
45 * @{
46 */
47
48
49/** @name PGM Compile Time Config
50 * @{
51 */
52
53/**
54 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
55 * Comment it if it will break something.
56 */
57#define PGM_OUT_OF_SYNC_IN_GC
58
59/**
60 * Virtualize the dirty bit
61 * This also makes a half-hearted attempt at the accessed bit. For full
62 * accessed bit virtualization define PGM_SYNC_ACCESSED_BIT.
63 */
64#define PGM_SYNC_DIRTY_BIT
65
66/**
67 * Fully virtualize the accessed bit.
68 * @remark This requires SYNC_DIRTY_ACCESSED_BITS to be defined!
69 */
70#define PGM_SYNC_ACCESSED_BIT
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Sync N pages instead of a whole page table
79 */
80#define PGM_SYNC_N_PAGES
81
82/**
83 * Number of pages to sync during a page fault
84 *
85 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
86 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
87 */
88#define PGM_SYNC_NR_PAGES 8
89
90/**
91 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
92 */
93#define PGM_MAX_PHYSCACHE_ENTRIES 64
94#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
95
96/**
97 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
98 */
99#define PGM_PHYSMEMACCESS_CACHING
100
101/*
102 * Assert Sanity.
103 */
104#if defined(PGM_SYNC_ACCESSED_BIT) && !defined(PGM_SYNC_DIRTY_BIT)
105# error "PGM_SYNC_ACCESSED_BIT requires PGM_SYNC_DIRTY_BIT!"
106#endif
107
108/** @def PGMPOOL_WITH_CACHE
109 * Enable agressive caching using the page pool.
110 *
111 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
112 */
113#define PGMPOOL_WITH_CACHE
114
115/** @def PGMPOOL_WITH_MIXED_PT_CR3
116 * When defined, we'll deal with 'uncachable' pages.
117 */
118#ifdef PGMPOOL_WITH_CACHE
119# define PGMPOOL_WITH_MIXED_PT_CR3
120#endif
121
122/** @def PGMPOOL_WITH_MONITORING
123 * Monitor the guest pages which are shadowed.
124 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
125 * be enabled as well.
126 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
127 */
128#ifdef PGMPOOL_WITH_CACHE
129# define PGMPOOL_WITH_MONITORING
130#endif
131
132/** @def PGMPOOL_WITH_GCPHYS_TRACKING
133 * Tracking the of shadow pages mapping guest physical pages.
134 *
135 * This is very expensive, the current cache prototype is trying to figure out
136 * whether it will be acceptable with an agressive caching policy.
137 */
138#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
139# define PGMPOOL_WITH_GCPHYS_TRACKING
140#endif
141
142/** @def PGMPOOL_WITH_USER_TRACKING
143 * Tracking users of shadow pages. This is required for the linking of shadow page
144 * tables and physical guest addresses.
145 */
146#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
147# define PGMPOOL_WITH_USER_TRACKING
148#endif
149
150/** @def PGMPOOL_CFG_MAX_GROW
151 * The maximum number of pages to add to the pool in one go.
152 */
153#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
154
155/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
156 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
157 */
158#ifdef VBOX_STRICT
159# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
160#endif
161/** @} */
162
163
164/** @name PDPTR and PML4 flags.
165 * These are placed in the three bits available for system programs in
166 * the PDPTR and PML4 entries.
167 * @{ */
168/** The entry is a permanent one and it's must always be present.
169 * Never free such an entry. */
170#define PGM_PLXFLAGS_PERMANENT BIT64(10)
171/** @} */
172
173/** @name Page directory flags.
174 * These are placed in the three bits available for system programs in
175 * the page directory entries.
176 * @{ */
177/** Mapping (hypervisor allocated pagetable). */
178#define PGM_PDFLAGS_MAPPING BIT64(10)
179/** Made read-only to facilitate dirty bit tracking. */
180#define PGM_PDFLAGS_TRACK_DIRTY BIT64(11)
181/** @} */
182
183/** @name Page flags.
184 * These are placed in the three bits available for system programs in
185 * the page entries.
186 * @{ */
187/** Made read-only to facilitate dirty bit tracking. */
188#define PGM_PTFLAGS_TRACK_DIRTY BIT64(9)
189
190#ifndef PGM_PTFLAGS_CSAM_VALIDATED
191/** Scanned and approved by CSAM (tm).
192 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
193 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
194#define PGM_PTFLAGS_CSAM_VALIDATED BIT64(11)
195#endif
196/** @} */
197
198/** @name Defines used to indicate the shadow and guest paging in the templates.
199 * @{ */
200#define PGM_TYPE_REAL 1
201#define PGM_TYPE_PROT 2
202#define PGM_TYPE_32BIT 3
203#define PGM_TYPE_PAE 4
204#define PGM_TYPE_AMD64 5
205/** @} */
206
207/** @name Defines used to check if the guest is using paging
208 * @{ */
209#define PGM_WITH_PAGING(a) (a == PGM_TYPE_32BIT || a == PGM_TYPE_PAE || a == PGM_TYPE_AMD64)
210/** @} */
211
212/** @def PGM_HCPHYS_2_PTR
213 * Maps a HC physical page pool address to a virtual address.
214 *
215 * @returns VBox status code.
216 * @param pVM The VM handle.
217 * @param HCPhys The HC physical address to map to a virtual one.
218 * @param ppv Where to store the virtual address. No need to cast this.
219 *
220 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
221 * small page window employeed by that function. Be careful.
222 * @remark There is no need to assert on the result.
223 */
224#ifdef IN_GC
225# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv))
226#else
227# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
228#endif
229
230/** @def PGM_GCPHYS_2_PTR
231 * Maps a GC physical page address to a virtual address.
232 *
233 * @returns VBox status code.
234 * @param pVM The VM handle.
235 * @param GCPhys The GC physical address to map to a virtual one.
236 * @param ppv Where to store the virtual address. No need to cast this.
237 *
238 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
239 * small page window employeed by that function. Be careful.
240 * @remark There is no need to assert on the result.
241 */
242#ifdef IN_GC
243# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv))
244#else
245# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
246#endif
247
248/** @def PGM_GCPHYS_2_PTR_EX
249 * Maps a unaligned GC physical page address to a virtual address.
250 *
251 * @returns VBox status code.
252 * @param pVM The VM handle.
253 * @param GCPhys The GC physical address to map to a virtual one.
254 * @param ppv Where to store the virtual address. No need to cast this.
255 *
256 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
257 * small page window employeed by that function. Be careful.
258 * @remark There is no need to assert on the result.
259 */
260#ifdef IN_GC
261# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
262#else
263# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
264#endif
265
266/** @def PGM_INVL_PG
267 * Invalidates a page when in GC does nothing in HC.
268 *
269 * @param GCVirt The virtual address of the page to invalidate.
270 */
271#ifdef IN_GC
272# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
273#else
274# define PGM_INVL_PG(GCVirt) ((void)0)
275#endif
276
277/** @def PGM_INVL_BIG_PG
278 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
279 *
280 * @param GCVirt The virtual address within the page directory to invalidate.
281 */
282#ifdef IN_GC
283# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
284#else
285# define PGM_INVL_BIG_PG(GCVirt) ((void)0)
286#endif
287
288/** @def PGM_INVL_GUEST_TLBS()
289 * Invalidates all guest TLBs.
290 */
291#ifdef IN_GC
292# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
293#else
294# define PGM_INVL_GUEST_TLBS() ((void)0)
295#endif
296
297
298/**
299 * Structure for tracking GC Mappings.
300 *
301 * This structure is used by linked list in both GC and HC.
302 */
303typedef struct PGMMAPPING
304{
305 /** Pointer to next entry. */
306 HCPTRTYPE(struct PGMMAPPING *) pNextHC;
307 /** Pointer to next entry. */
308 GCPTRTYPE(struct PGMMAPPING *) pNextGC;
309 /** Start Virtual address. */
310 RTGCUINTPTR GCPtr;
311 /** Last Virtual address (inclusive). */
312 RTGCUINTPTR GCPtrLast;
313 /** Range size (bytes). */
314 RTGCUINTPTR cb;
315 /** Pointer to relocation callback function. */
316 HCPTRTYPE(PFNPGMRELOCATE) pfnRelocate;
317 /** User argument to the callback. */
318 HCPTRTYPE(void *) pvUser;
319 /** Mapping description / name. For easing debugging. */
320 HCPTRTYPE(const char *) pszDesc;
321 /** Number of page tables. */
322 RTUINT cPTs;
323#if HC_ARCH_BITS != GC_ARCH_BITS
324 RTUINT uPadding0; /**< Alignment padding. */
325#endif
326 /** Array of page table mapping data. Each entry
327 * describes one page table. The array can be longer
328 * than the declared length.
329 */
330 struct
331 {
332 /** The HC physical address of the page table. */
333 RTHCPHYS HCPhysPT;
334 /** The HC physical address of the first PAE page table. */
335 RTHCPHYS HCPhysPaePT0;
336 /** The HC physical address of the second PAE page table. */
337 RTHCPHYS HCPhysPaePT1;
338 /** The HC virtual address of the 32-bit page table. */
339 HCPTRTYPE(PVBOXPT) pPTHC;
340 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
341 HCPTRTYPE(PX86PTPAE) paPaePTsHC;
342 /** The GC virtual address of the 32-bit page table. */
343 GCPTRTYPE(PVBOXPT) pPTGC;
344 /** The GC virtual address of the two PAE page table. */
345 GCPTRTYPE(PX86PTPAE) paPaePTsGC;
346 } aPTs[1];
347} PGMMAPPING;
348/** Pointer to structure for tracking GC Mappings. */
349typedef struct PGMMAPPING *PPGMMAPPING;
350
351
352/**
353 * Physical page access handler structure.
354 *
355 * This is used to keep track of physical address ranges
356 * which are being monitored in some kind of way.
357 */
358typedef struct PGMPHYSHANDLER
359{
360 AVLROGCPHYSNODECORE Core;
361 /** Alignment padding. */
362 uint32_t u32Padding;
363 /** Access type. */
364 PGMPHYSHANDLERTYPE enmType;
365 /** Number of pages to update. */
366 uint32_t cPages;
367 /** Pointer to R3 callback function. */
368 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
369 /** User argument for R3 handlers. */
370 HCPTRTYPE(void *) pvUserR3;
371 /** Pointer to R0 callback function. */
372 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
373 /** User argument for R0 handlers. */
374 HCPTRTYPE(void *) pvUserR0;
375 /** Pointer to GC callback function. */
376 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC;
377 /** User argument for GC handlers. */
378 GCPTRTYPE(void *) pvUserGC;
379 /** Description / Name. For easing debugging. */
380 HCPTRTYPE(const char *) pszDesc;
381#ifdef VBOX_WITH_STATISTICS
382 /** Profiling of this handler. */
383 STAMPROFILE Stat;
384#endif
385} PGMPHYSHANDLER;
386/** Pointer to a physical page access handler structure. */
387typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
388
389
390/**
391 * Cache node for the physical addresses covered by a virtual handler.
392 */
393typedef struct PGMPHYS2VIRTHANDLER
394{
395 /** Core node for the tree based on physical ranges. */
396 AVLROGCPHYSNODECORE Core;
397 /** Offset from this struct to the PGMVIRTHANDLER structure. */
398 RTGCINTPTR offVirtHandler;
399 /** Offset of the next alias relativer to this one.
400 * Bit 0 is used for indicating whether we're in the tree.
401 * Bit 1 is used for indicating that we're the head node.
402 */
403 int32_t offNextAlias;
404} PGMPHYS2VIRTHANDLER;
405/** Pointer to a phys to virtual handler structure. */
406typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
407
408/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
409 * node is in the tree. */
410#define PGMPHYS2VIRTHANDLER_IN_TREE BIT(0)
411/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
412 * node is in the head of an alias chain.
413 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
414#define PGMPHYS2VIRTHANDLER_IS_HEAD BIT(1)
415/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
416#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
417
418
419/**
420 * Virtual page access handler structure.
421 *
422 * This is used to keep track of virtual address ranges
423 * which are being monitored in some kind of way.
424 */
425typedef struct PGMVIRTHANDLER
426{
427 /** Core node for the tree based on virtual ranges. */
428 AVLROGCPTRNODECORE Core;
429 /** Number of cache pages. */
430 uint32_t u32Padding;
431 /** Access type. */
432 PGMVIRTHANDLERTYPE enmType;
433 /** Number of cache pages. */
434 uint32_t cPages;
435
436/** @todo The next two members are redundant. It adds some readability though. */
437 /** Start of the range. */
438 RTGCPTR GCPtr;
439 /** End of the range (exclusive). */
440 RTGCPTR GCPtrLast;
441 /** Size of the range (in bytes). */
442 RTGCUINTPTR cb;
443 /** Pointer to the GC callback function. */
444 GCPTRTYPE(PFNPGMGCVIRTHANDLER) pfnHandlerGC;
445 /** Pointer to the HC callback function for invalidation. */
446 HCPTRTYPE(PFNPGMHCVIRTINVALIDATE) pfnInvalidateHC;
447 /** Pointer to the HC callback function. */
448 HCPTRTYPE(PFNPGMHCVIRTHANDLER) pfnHandlerHC;
449 /** Description / Name. For easing debugging. */
450 HCPTRTYPE(const char *) pszDesc;
451#ifdef VBOX_WITH_STATISTICS
452 /** Profiling of this handler. */
453 STAMPROFILE Stat;
454#endif
455 /** Array of cached physical addresses for the monitored ranged. */
456 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
457} PGMVIRTHANDLER;
458/** Pointer to a virtual page access handler structure. */
459typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
460
461
462/**
463 * Ram range for GC Phys to HC Phys conversion.
464 *
465 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
466 * conversions too, but we'll let MM handle that for now.
467 *
468 * This structure is used by linked lists in both GC and HC.
469 */
470typedef struct PGMRAMRANGE
471{
472 /** Pointer to the next RAM range - for HC. */
473 HCPTRTYPE(struct PGMRAMRANGE *) pNextHC;
474 /** Pointer to the next RAM range - for GC. */
475 GCPTRTYPE(struct PGMRAMRANGE *) pNextGC;
476 /** Start of the range. Page aligned. */
477 RTGCPHYS GCPhys;
478 /** Last address in the range (inclusive). Page aligned (-1). */
479 RTGCPHYS GCPhysLast;
480 /** Size of the range. (Page aligned of course). */
481 RTGCPHYS cb;
482 /** MM_RAM_* flags */
483 uint32_t fFlags;
484
485 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
486 GCPTRTYPE(PRTHCPTR) pavHCChunkGC;
487 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
488 HCPTRTYPE(PRTHCPTR) pavHCChunkHC;
489
490 /** Start of the HC mapping of the range.
491 * For pure MMIO and dynamically allocated ranges this is NULL, while for all ranges this is a valid pointer. */
492 HCPTRTYPE(void *) pvHC;
493
494 /** Array of the flags and HC physical addresses corresponding to the range.
495 * The index is the page number in the range. The size is cb >> PAGE_SHIFT.
496 *
497 * The 12 lower bits of the physical address are flags and must be masked
498 * off to get the correct physical address.
499 *
500 * For pure MMIO ranges only the flags are valid.
501 */
502 RTHCPHYS aHCPhys[1];
503} PGMRAMRANGE;
504/** Pointer to Ram range for GC Phys to HC Phys conversion. */
505typedef PGMRAMRANGE *PPGMRAMRANGE;
506
507/** Return hc ptr corresponding to the ram range and physical offset */
508#define PGMRAMRANGE_GETHCPTR(pRam, off) \
509 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[(off >> PGM_DYNAMIC_CHUNK_SHIFT)] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
510 : (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
511
512/** @todo r=bird: fix typename. */
513/**
514 * PGMPhysRead/Write cache entry
515 */
516typedef struct PGMPHYSCACHE_ENTRY
517{
518 /** HC pointer to physical page */
519 HCPTRTYPE(uint8_t *) pbHC;
520 /** GC Physical address for cache entry */
521 RTGCPHYS GCPhys;
522#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
523 RTGCPHYS u32Padding0; /**< alignment padding. */
524#endif
525} PGMPHYSCACHE_ENTRY;
526
527/**
528 * PGMPhysRead/Write cache to reduce REM memory access overhead
529 */
530typedef struct PGMPHYSCACHE
531{
532 /** Bitmap of valid cache entries */
533 uint64_t aEntries;
534 /** Cache entries */
535 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
536} PGMPHYSCACHE;
537
538
539/** @name PGM Pool Indexes.
540 * Aka. the unique shadow page identifier.
541 * @{ */
542/** NIL page pool IDX. */
543#define NIL_PGMPOOL_IDX 0
544/** The first normal index. */
545#define PGMPOOL_IDX_FIRST_SPECIAL 1
546/** Page directory (32-bit root). */
547#define PGMPOOL_IDX_PD 1
548/** The extended PAE page directory (2048 entries, works as root currently). */
549#define PGMPOOL_IDX_PAE_PD 2
550/** Page Directory Pointer Table (PAE root, not currently used). */
551#define PGMPOOL_IDX_PDPTR 3
552/** Page Map Level-4 (64-bit root). */
553#define PGMPOOL_IDX_PML4 4
554/** The first normal index. */
555#define PGMPOOL_IDX_FIRST 5
556/** The last valid index. (inclusive, 14 bits) */
557#define PGMPOOL_IDX_LAST 0x3fff
558/** @} */
559
560/** The NIL index for the parent chain. */
561#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
562
563/**
564 * Node in the chain linking a shadowed page to it's parent (user).
565 */
566#pragma pack(1)
567typedef struct PGMPOOLUSER
568{
569 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
570 uint16_t iNext;
571 /** The user page index. */
572 uint16_t iUser;
573 /** Index into the user table. */
574 uint16_t iUserTable;
575} PGMPOOLUSER, *PPGMPOOLUSER;
576typedef const PGMPOOLUSER *PCPGMPOOLUSER;
577#pragma pack()
578
579
580/** The NIL index for the phys ext chain. */
581#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
582
583/**
584 * Node in the chain of physical cross reference extents.
585 */
586#pragma pack(1)
587typedef struct PGMPOOLPHYSEXT
588{
589 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
590 uint16_t iNext;
591 /** The user page index. */
592 uint16_t aidx[3];
593} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
594typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
595#pragma pack()
596
597
598/**
599 * The kind of page that's being shadowed.
600 */
601typedef enum PGMPOOLKIND
602{
603 /** The virtual invalid 0 entry. */
604 PGMPOOLKIND_INVALID = 0,
605 /** The entry is free (=unused). */
606 PGMPOOLKIND_FREE,
607
608 /** Shw: 32-bit page table; Gst: no paging */
609 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
610 /** Shw: 32-bit page table; Gst: 32-bit page table. */
611 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
612 /** Shw: 32-bit page table; Gst: 4MB page. */
613 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
614 /** Shw: PAE page table; Gst: no paging */
615 PGMPOOLKIND_PAE_PT_FOR_PHYS,
616 /** Shw: PAE page table; Gst: 32-bit page table. */
617 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
618 /** Shw: PAE page table; Gst: Half of a 4MB page. */
619 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
620 /** Shw: PAE page table; Gst: PAE page table. */
621 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
622 /** Shw: PAE page table; Gst: 2MB page. */
623 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
624
625 /** Shw: PAE page directory; Gst: 32-bit page directory. */
626 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD,
627 /** Shw: PAE page directory; Gst: PAE page directory. */
628 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
629
630 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
631 PGMPOOLKIND_64BIT_PDPTR_FOR_64BIT_PDPTR,
632
633 /** Shw: Root 32-bit page directory. */
634 PGMPOOLKIND_ROOT_32BIT_PD,
635 /** Shw: Root PAE page directory */
636 PGMPOOLKIND_ROOT_PAE_PD,
637 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
638 PGMPOOLKIND_ROOT_PDPTR,
639 /** Shw: Root page map level-4 table. */
640 PGMPOOLKIND_ROOT_PML4,
641
642 /** The last valid entry. */
643 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_PML4
644} PGMPOOLKIND;
645
646
647/**
648 * The tracking data for a page in the pool.
649 */
650typedef struct PGMPOOLPAGE
651{
652 /** AVL node code with the (HC) physical address of this page. */
653 AVLOHCPHYSNODECORE Core;
654 /** Pointer to the HC mapping of the page. */
655 HCPTRTYPE(void *) pvPageHC;
656 /** The guest physical address. */
657 RTGCPHYS GCPhys;
658 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
659 uint8_t enmKind;
660 uint8_t bPadding;
661 /** The index of this page. */
662 uint16_t idx;
663 /** The next entry in the list this page currently resides in.
664 * It's either in the free list or in the GCPhys hash. */
665 uint16_t iNext;
666#ifdef PGMPOOL_WITH_USER_TRACKING
667 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
668 uint16_t iUserHead;
669 /** The number of present entries. */
670 uint16_t cPresent;
671 /** The first entry in the table which is present. */
672 uint16_t iFirstPresent;
673#endif
674#ifdef PGMPOOL_WITH_MONITORING
675 /** The number of modifications to the monitored page. */
676 uint16_t cModifications;
677 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
678 uint16_t iModifiedNext;
679 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
680 uint16_t iModifiedPrev;
681 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
682 uint16_t iMonitoredNext;
683 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
684 uint16_t iMonitoredPrev;
685#endif
686#ifdef PGMPOOL_WITH_CACHE
687 /** The next page in the age list. */
688 uint16_t iAgeNext;
689 /** The previous page in the age list. */
690 uint16_t iAgePrev;
691/** @todo add more from PGMCache.h when merging with it. */
692#endif /* PGMPOOL_WITH_CACHE */
693 /** Used to indicate that the page is zeroed. */
694 bool fZeroed;
695 /** Used to indicate that a PT has non-global entries. */
696 bool fSeenNonGlobal;
697 /** Used to indicate that we're monitoring writes to the guest page. */
698 bool fMonitored;
699 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
700 * (All pages are in the age list.) */
701 bool fCached;
702 /** This is used by the R3 access handlers when invoked by an async thread.
703 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
704 bool volatile fReusedFlushPending;
705 /** Used to indicate that the guest is mapping the page is also used as a CR3.
706 * In these cases the access handler acts differently and will check
707 * for mapping conflicts like the normal CR3 handler.
708 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
709 * replaced by a list of pages which share access handler.
710 */
711 bool fCR3Mix;
712#if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64
713 bool Alignment[4]; /**< Align the structure size on a 64-bit boundrary. */
714#endif
715} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
716
717
718#ifdef PGMPOOL_WITH_CACHE
719/** The hash table size. */
720# define PGMPOOL_HASH_SIZE 0x40
721/** The hash function. */
722# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
723#endif
724
725
726/**
727 * The shadow page pool instance data.
728 *
729 * It's all one big allocation made at init time, except for the
730 * pages that is. The user nodes follows immediatly after the
731 * page structures.
732 */
733typedef struct PGMPOOL
734{
735 /** The VM handle - HC Ptr. */
736 HCPTRTYPE(PVM) pVMHC;
737 /** The VM handle - GC Ptr. */
738 GCPTRTYPE(PVM) pVMGC;
739 /** The max pool size. This includes the special IDs. */
740 uint16_t cMaxPages;
741 /** The current pool size. */
742 uint16_t cCurPages;
743 /** The head of the free page list. */
744 uint16_t iFreeHead;
745 /* Padding. */
746 uint16_t u16Padding;
747#ifdef PGMPOOL_WITH_USER_TRACKING
748 /** Head of the chain of free user nodes. */
749 uint16_t iUserFreeHead;
750 /** The number of user nodes we've allocated. */
751 uint16_t cMaxUsers;
752 /** The number of present page table entries in the entire pool. */
753 uint32_t cPresent;
754 /** Pointer to the array of user nodes - GC pointer. */
755 GCPTRTYPE(PPGMPOOLUSER) paUsersGC;
756 /** Pointer to the array of user nodes - HC pointer. */
757 HCPTRTYPE(PPGMPOOLUSER) paUsersHC;
758#endif /* PGMPOOL_WITH_USER_TRACKING */
759#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
760 /** Head of the chain of free phys ext nodes. */
761 uint16_t iPhysExtFreeHead;
762 /** The number of user nodes we've allocated. */
763 uint16_t cMaxPhysExts;
764 /** Pointer to the array of physical xref extent - GC pointer. */
765 GCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsGC;
766 /** Pointer to the array of physical xref extent nodes - HC pointer. */
767 HCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsHC;
768#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
769#ifdef PGMPOOL_WITH_CACHE
770 /** Hash table for GCPhys addresses. */
771 uint16_t aiHash[PGMPOOL_HASH_SIZE];
772 /** The head of the age list. */
773 uint16_t iAgeHead;
774 /** The tail of the age list. */
775 uint16_t iAgeTail;
776 /** Set if the cache is enabled. */
777 bool fCacheEnabled;
778#endif /* PGMPOOL_WITH_CACHE */
779#ifdef PGMPOOL_WITH_MONITORING
780 /** Head of the list of modified pages. */
781 uint16_t iModifiedHead;
782 /** The current number of modified pages. */
783 uint16_t cModifiedPages;
784 /** Access handler, GC. */
785 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnAccessHandlerGC;
786 /** Access handler, R0. */
787 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
788 /** Access handler, R3. */
789 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
790 /** The access handler description (HC ptr). */
791 HCPTRTYPE(const char *) pszAccessHandler;
792#endif /* PGMPOOL_WITH_MONITORING */
793 /** The number of pages currently in use. */
794 uint16_t cUsedPages;
795#ifdef VBOX_WITH_STATISTICS
796 /** The high wather mark for cUsedPages. */
797 uint16_t cUsedPagesHigh;
798 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
799 /** Profiling pgmPoolAlloc(). */
800 STAMPROFILEADV StatAlloc;
801 /** Profiling pgmPoolClearAll(). */
802 STAMPROFILE StatClearAll;
803 /** Profiling pgmPoolFlushAllInt(). */
804 STAMPROFILE StatFlushAllInt;
805 /** Profiling pgmPoolFlushPage(). */
806 STAMPROFILE StatFlushPage;
807 /** Profiling pgmPoolFree(). */
808 STAMPROFILE StatFree;
809 /** Profiling time spent zeroing pages. */
810 STAMPROFILE StatZeroPage;
811# ifdef PGMPOOL_WITH_USER_TRACKING
812 /** Profiling of pgmPoolTrackDeref. */
813 STAMPROFILE StatTrackDeref;
814 /** Profiling pgmTrackFlushGCPhysPT. */
815 STAMPROFILE StatTrackFlushGCPhysPT;
816 /** Profiling pgmTrackFlushGCPhysPTs. */
817 STAMPROFILE StatTrackFlushGCPhysPTs;
818 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
819 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
820 /** Number of times we've been out of user records. */
821 STAMCOUNTER StatTrackFreeUpOneUser;
822# endif
823# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
824 /** Profiling deref activity related tracking GC physical pages. */
825 STAMPROFILE StatTrackDerefGCPhys;
826 /** Number of linear searches for a HCPhys in the ram ranges. */
827 STAMCOUNTER StatTrackLinearRamSearches;
828 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
829 STAMCOUNTER StamTrackPhysExtAllocFailures;
830# endif
831# ifdef PGMPOOL_WITH_MONITORING
832 /** Profiling the GC PT access handler. */
833 STAMPROFILE StatMonitorGC;
834 /** Times we've failed interpreting the instruction. */
835 STAMCOUNTER StatMonitorGCEmulateInstr;
836 /** Profiling the pgmPoolFlushPage calls made from the GC PT access handler. */
837 STAMPROFILE StatMonitorGCFlushPage;
838 /** Times we've detected fork(). */
839 STAMCOUNTER StatMonitorGCFork;
840 /** Profiling the GC access we've handled (except REP STOSD). */
841 STAMPROFILE StatMonitorGCHandled;
842 /** Times we've failed interpreting a patch code instruction. */
843 STAMCOUNTER StatMonitorGCIntrFailPatch1;
844 /** Times we've failed interpreting a patch code instruction during flushing. */
845 STAMCOUNTER StatMonitorGCIntrFailPatch2;
846 /** The number of times we've seen rep prefixes we can't handle. */
847 STAMCOUNTER StatMonitorGCRepPrefix;
848 /** Profiling the REP STOSD cases we've handled. */
849 STAMPROFILE StatMonitorGCRepStosd;
850
851 /** Profiling the HC PT access handler. */
852 STAMPROFILE StatMonitorHC;
853 /** Times we've failed interpreting the instruction. */
854 STAMCOUNTER StatMonitorHCEmulateInstr;
855 /** Profiling the pgmPoolFlushPage calls made from the HC PT access handler. */
856 STAMPROFILE StatMonitorHCFlushPage;
857 /** Times we've detected fork(). */
858 STAMCOUNTER StatMonitorHCFork;
859 /** Profiling the HC access we've handled (except REP STOSD). */
860 STAMPROFILE StatMonitorHCHandled;
861 /** The number of times we've seen rep prefixes we can't handle. */
862 STAMCOUNTER StatMonitorHCRepPrefix;
863 /** Profiling the REP STOSD cases we've handled. */
864 STAMPROFILE StatMonitorHCRepStosd;
865 /** The number of times we're called in an async thread an need to flush. */
866 STAMCOUNTER StatMonitorHCAsync;
867 /** The high wather mark for cModifiedPages. */
868 uint16_t cModifiedPagesHigh;
869 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
870# endif
871# ifdef PGMPOOL_WITH_CACHE
872 /** The number of cache hits. */
873 STAMCOUNTER StatCacheHits;
874 /** The number of cache misses. */
875 STAMCOUNTER StatCacheMisses;
876 /** The number of times we've got a conflict of 'kind' in the cache. */
877 STAMCOUNTER StatCacheKindMismatches;
878 /** Number of times we've been out of pages. */
879 STAMCOUNTER StatCacheFreeUpOne;
880 /** The number of cacheable allocations. */
881 STAMCOUNTER StatCacheCacheable;
882 /** The number of uncacheable allocations. */
883 STAMCOUNTER StatCacheUncacheable;
884# endif
885#elif HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
886 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
887#endif
888 /** The AVL tree for looking up a page by its HC physical address. */
889 AVLOHCPHYSTREE HCPhysTree;
890 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
891 /** Array of pages. (cMaxPages in length)
892 * The Id is the index into thist array.
893 */
894 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
895} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
896
897
898/** @def PGMPOOL_PAGE_2_PTR
899 * Maps a pool page pool into the current context.
900 *
901 * @returns VBox status code.
902 * @param pVM The VM handle.
903 * @param pPage The pool page.
904 *
905 * @remark In HC this uses PGMGCDynMapHCPage(), so it will consume of the
906 * small page window employeed by that function. Be careful.
907 * @remark There is no need to assert on the result.
908 */
909#ifdef IN_GC
910# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmGCPoolMapPage((pVM), (pPage))
911#else
912# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC)
913#endif
914
915
916/**
917 * Trees are using self relative offsets as pointers.
918 * So, all its data, including the root pointer, must be in the heap for HC and GC
919 * to have the same layout.
920 */
921typedef struct PGMTREES
922{
923 /** Physical access handlers (AVL range+offsetptr tree). */
924 AVLROGCPHYSTREE PhysHandlers;
925 /** Virtual access handlers (AVL range + GC ptr tree). */
926 AVLROGCPTRTREE VirtHandlers;
927 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
928 AVLROGCPHYSTREE PhysToVirtHandlers;
929 uint32_t auPadding[1];
930} PGMTREES;
931/** Pointer to PGM trees. */
932typedef PGMTREES *PPGMTREES;
933
934
935/** @name Paging mode macros
936 * @{ */
937#ifdef IN_GC
938# define PGM_CTX(a,b) a##GC##b
939# define PGM_CTX_STR(a,b) a "GC" b
940# define PGM_CTX_DECL(type) PGMGCDECL(type)
941#else
942# ifdef IN_RING3
943# define PGM_CTX(a,b) a##R3##b
944# define PGM_CTX_STR(a,b) a "R3" b
945# define PGM_CTX_DECL(type) DECLCALLBACK(type)
946# else
947# define PGM_CTX(a,b) a##R0##b
948# define PGM_CTX_STR(a,b) a "R0" b
949# define PGM_CTX_DECL(type) PGMDECL(type)
950# endif
951#endif
952
953#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
954#define PGM_GST_NAME_GC_REAL_STR(name) "pgmGCGstReal" #name
955#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
956#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
957#define PGM_GST_NAME_GC_PROT_STR(name) "pgmGCGstProt" #name
958#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
959#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
960#define PGM_GST_NAME_GC_32BIT_STR(name) "pgmGCGst32Bit" #name
961#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
962#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
963#define PGM_GST_NAME_GC_PAE_STR(name) "pgmGCGstPAE" #name
964#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
965#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
966#define PGM_GST_NAME_GC_AMD64_STR(name) "pgmGCGstAMD64" #name
967#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
968#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
969#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
970
971#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
972#define PGM_SHW_NAME_GC_32BIT_STR(name) "pgmGCShw32Bit" #name
973#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
974#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
975#define PGM_SHW_NAME_GC_PAE_STR(name) "pgmGCShwPAE" #name
976#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
977#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
978#define PGM_SHW_NAME_GC_AMD64_STR(name) "pgmGCShwAMD64" #name
979#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
980#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
981#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
982
983/* Shw_Gst */
984#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
985#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
986#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
987#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
988#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
989#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
990#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
991#define PGM_BTH_NAME_AMD64_REAL(name) PGM_CTX(pgm,BthAMD64Real##name)
992#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
993#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
994#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name
995#define PGM_BTH_NAME_GC_32BIT_PROT_STR(name) "pgmGCBth32BitProt" #name
996#define PGM_BTH_NAME_GC_32BIT_32BIT_STR(name) "pgmGCBth32Bit32Bit" #name
997#define PGM_BTH_NAME_GC_PAE_REAL_STR(name) "pgmGCBthPAEReal" #name
998#define PGM_BTH_NAME_GC_PAE_PROT_STR(name) "pgmGCBthPAEProt" #name
999#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name
1000#define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name
1001#define PGM_BTH_NAME_GC_AMD64_REAL_STR(name) "pgmGCBthAMD64Real" #name
1002#define PGM_BTH_NAME_GC_AMD64_PROT_STR(name) "pgmGCBthAMD64Prot" #name
1003#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name
1004#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
1005#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
1006#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
1007#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
1008#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
1009#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
1010#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
1011#define PGM_BTH_NAME_R0_AMD64_REAL_STR(name) "pgmR0BthAMD64Real" #name
1012#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
1013#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
1014#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
1015#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
1016/** @} */
1017
1018/**
1019 * Data for each paging mode.
1020 */
1021typedef struct PGMMODEDATA
1022{
1023 /** The guest mode type. */
1024 uint32_t uGstType;
1025 /** The shadow mode type. */
1026 uint32_t uShwType;
1027
1028 /** @name Function pointers for Shadow paging.
1029 * @{
1030 */
1031 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1032 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1033 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1034 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1035 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1036 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1037 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1038
1039 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1040 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1041 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1042 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1043 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1044
1045 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1046 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1047 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1048 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1049 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1050 /** @} */
1051
1052 /** @name Function pointers for Guest paging.
1053 * @{
1054 */
1055 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1056 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1057 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1058 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1059 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1060 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1061 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1062 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1063 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1064 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1065 HCPTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1066
1067 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1068 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1069 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1070 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1071 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1072 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1073 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1074 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1075
1076 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1077 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1078 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1079 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1080 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1081 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1082 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1083 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1084 /** @} */
1085
1086 /** @name Function pointers for Both Shadow and Guest paging.
1087 * @{
1088 */
1089 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1090 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1091 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1092 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1093 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1094 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1095 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1096#ifdef VBOX_STRICT
1097 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1098#endif
1099
1100 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1101 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1102 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1103 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1104 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1105 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1106#ifdef VBOX_STRICT
1107 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1108#endif
1109
1110 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1111 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1112 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1113 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1114 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1115 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1116#ifdef VBOX_STRICT
1117 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1118#endif
1119 /** @} */
1120} PGMMODEDATA, *PPGMMODEDATA;
1121
1122
1123
1124/**
1125 * Converts a PGM pointer into a VM pointer.
1126 * @returns Pointer to the VM structure the PGM is part of.
1127 * @param pPGM Pointer to PGM instance data.
1128 */
1129#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
1130
1131/**
1132 * PGM Data (part of VM)
1133 */
1134typedef struct PGM
1135{
1136 /** Offset to the VM structure. */
1137 RTINT offVM;
1138
1139 /*
1140 * This will be redefined at least two more times before we're done, I'm sure.
1141 * The current code is only to get on with the coding.
1142 * - 2004-06-10: initial version, bird.
1143 * - 2004-07-02: 1st time, bird.
1144 * - 2004-10-18: 2nd time, bird.
1145 * - 2005-07-xx: 3rd time, bird.
1146 */
1147
1148 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1149 GCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
1150 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1151 GCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
1152
1153 /** The host paging mode. (This is what SUPLib reports.) */
1154 SUPPAGINGMODE enmHostMode;
1155 /** The shadow paging mode. */
1156 PGMMODE enmShadowMode;
1157 /** The guest paging mode. */
1158 PGMMODE enmGuestMode;
1159
1160 /** The current physical address representing in the guest CR3 register. */
1161 RTGCPHYS GCPhysCR3;
1162 /** Pointer to the 5 page CR3 content mapping.
1163 * The first page is always the CR3 (in some form) while the 4 other pages
1164 * are used of the PDs in PAE mode. */
1165 RTGCPTR GCPtrCR3Mapping;
1166 /** The physical address of the currently monitored guest CR3 page.
1167 * When this value is NIL_RTGCPHYS no page is being monitored. */
1168 RTGCPHYS GCPhysGstCR3Monitored;
1169#if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64
1170 RTGCPHYS GCPhysPadding0; /**< alignment padding. */
1171#endif
1172
1173 /** @name 32-bit Guest Paging.
1174 * @{ */
1175 /** The guest's page directory, HC pointer. */
1176 HCPTRTYPE(PVBOXPD) pGuestPDHC;
1177 /** The guest's page directory, static GC mapping. */
1178 GCPTRTYPE(PVBOXPD) pGuestPDGC;
1179 /** @} */
1180
1181 /** @name PAE Guest Paging.
1182 * @{ */
1183 /** The guest's page directory pointer table, static GC mapping. */
1184 GCPTRTYPE(PX86PDPTR) pGstPaePDPTRGC;
1185 /** The guest's page directory pointer table, HC pointer. */
1186 HCPTRTYPE(PX86PDPTR) pGstPaePDPTRHC;
1187 /** The guest's page directories, HC pointers.
1188 * These are individual pointers and doesn't have to be adjecent.
1189 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1190 HCPTRTYPE(PX86PDPAE) apGstPaePDsHC[4];
1191 /** The guest's page directories, static GC mapping.
1192 * Unlike the HC array the first entry can be accessed as a 2048 entry PD.
1193 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1194 GCPTRTYPE(PX86PDPAE) apGstPaePDsGC[4];
1195 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
1196 RTGCPHYS aGCPhysGstPaePDs[4];
1197 /** The physical addresses of the monitored guest page directories (PAE). */
1198 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
1199 /** @} */
1200
1201
1202 /** @name 32-bit Shadow Paging
1203 * @{ */
1204 /** The 32-Bit PD - HC Ptr. */
1205 HCPTRTYPE(PX86PD) pHC32BitPD;
1206 /** The 32-Bit PD - GC Ptr. */
1207 GCPTRTYPE(PX86PD) pGC32BitPD;
1208#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1209 uint32_t u32Padding1; /**< alignment padding. */
1210#endif
1211 /** The Physical Address (HC) of the 32-Bit PD. */
1212 RTHCPHYS HCPhys32BitPD;
1213 /** @} */
1214
1215 /** @name PAE Shadow Paging
1216 * @{ */
1217 /** The four PDs for the low 4GB - HC Ptr.
1218 * Even though these are 4 pointers, what they point at is a single table.
1219 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
1220 HCPTRTYPE(PX86PDPAE) apHCPaePDs[4];
1221 /** The four PDs for the low 4GB - GC Ptr.
1222 * Same kind of mapping as apHCPaePDs. */
1223 GCPTRTYPE(PX86PDPAE) apGCPaePDs[4];
1224 /** The Physical Address (HC) of the four PDs for the low 4GB.
1225 * These are *NOT* 4 contiguous pages. */
1226 RTHCPHYS aHCPhysPaePDs[4];
1227 /** The PAE PDPTR - HC Ptr. */
1228 HCPTRTYPE(PX86PDPTR) pHCPaePDPTR;
1229 /** The Physical Address (HC) of the PAE PDPTR. */
1230 RTHCPHYS HCPhysPaePDPTR;
1231 /** The PAE PDPTR - GC Ptr. */
1232 GCPTRTYPE(PX86PDPTR) pGCPaePDPTR;
1233 /** @} */
1234
1235 /** @name AMD64 Shadow Paging
1236 * Extends PAE Paging.
1237 * @{ */
1238 /** The Page Map Level 4 table - HC Ptr. */
1239 GCPTRTYPE(PX86PML4) pGCPaePML4;
1240 /** The Page Map Level 4 table - GC Ptr. */
1241 HCPTRTYPE(PX86PML4) pHCPaePML4;
1242 /** The Physical Address (HC) of the Page Map Level 4 table. */
1243 RTHCPHYS HCPhysPaePML4;
1244 /** @}*/
1245
1246 /** @name Function pointers for Shadow paging.
1247 * @{
1248 */
1249 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1250 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1251 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1252 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1253 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1254 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1255 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1256
1257 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1258 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1259 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1260 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1261 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1262#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
1263 RTGCPTR alignment0; /**< structure size alignment. */
1264#endif
1265
1266 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1267 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1268 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1269 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1270 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1271
1272 /** @} */
1273
1274 /** @name Function pointers for Guest paging.
1275 * @{
1276 */
1277 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1278 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1279 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1280 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1281 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1282 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1283 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1284 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1285 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1286 HCPTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1287 HCPTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1288
1289 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1290 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1291 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1292 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1293 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1294 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1295 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1296 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1297
1298 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1299 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1300 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1301 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1302 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1303 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1304 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1305 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1306 /** @} */
1307
1308 /** @name Function pointers for Both Shadow and Guest paging.
1309 * @{
1310 */
1311 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1312 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1313 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1314 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1315 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1316 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1317 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1318 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1319
1320 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1321 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1322 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1323 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1324 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1325 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1326 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1327
1328 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1329 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1330 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1331 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1332 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1333 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1334 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1335#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
1336 RTGCPTR alignment2; /**< structure size alignment. */
1337#endif
1338 /** @} */
1339
1340 /** Pointer to SHW+GST mode data (function pointers).
1341 * The index into this table is made up from */
1342 R3PTRTYPE(PPGMMODEDATA) paModeData;
1343
1344
1345 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for HC.
1346 * This is sorted by physical address and contains no overlaps.
1347 * The memory locks and other conversions are managed by MM at the moment.
1348 */
1349 HCPTRTYPE(PPGMRAMRANGE) pRamRangesHC;
1350 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for GC.
1351 * This is sorted by physical address and contains no overlaps.
1352 * The memory locks and other conversions are managed by MM at the moment.
1353 */
1354 GCPTRTYPE(PPGMRAMRANGE) pRamRangesGC;
1355 /** The configured RAM size. */
1356 RTUINT cbRamSize;
1357
1358 /** PGM offset based trees - HC Ptr. */
1359 HCPTRTYPE(PPGMTREES) pTreesHC;
1360 /** PGM offset based trees - GC Ptr. */
1361 GCPTRTYPE(PPGMTREES) pTreesGC;
1362
1363 /** Linked list of GC mappings - for GC.
1364 * The list is sorted ascending on address.
1365 */
1366 GCPTRTYPE(PPGMMAPPING) pMappingsGC;
1367 /** Linked list of GC mappings - for HC.
1368 * The list is sorted ascending on address.
1369 */
1370 HCPTRTYPE(PPGMMAPPING) pMappingsHC;
1371
1372 /** If set no conflict checks are required. (boolean) */
1373 bool fMappingsFixed;
1374 /** If set, then no mappings are put into the shadow page table. (boolean) */
1375 bool fDisableMappings;
1376 /** Size of fixed mapping */
1377 uint32_t cbMappingFixed;
1378 /** Base address (GC) of fixed mapping */
1379 RTGCPTR GCPtrMappingFixed;
1380#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1381 uint32_t u32Padding0; /**< alignment padding. */
1382#endif
1383
1384
1385 /** @name Intermediate Context
1386 * @{ */
1387 /** Pointer to the intermediate page directory - Normal. */
1388 HCPTRTYPE(PX86PD) pInterPD;
1389 /** Pointer to the intermedate page tables - Normal.
1390 * There are two page tables, one for the identity mapping and one for
1391 * the host context mapping (of the core code). */
1392 HCPTRTYPE(PX86PT) apInterPTs[2];
1393 /** Pointer to the intermedate page tables - PAE. */
1394 HCPTRTYPE(PX86PTPAE) apInterPaePTs[2];
1395 /** Pointer to the intermedate page directory - PAE. */
1396 HCPTRTYPE(PX86PDPAE) apInterPaePDs[4];
1397 /** Pointer to the intermedate page directory - PAE. */
1398 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR;
1399 /** Pointer to the intermedate page-map level 4 - AMD64. */
1400 HCPTRTYPE(PX86PML4) pInterPaePML4;
1401 /** Pointer to the intermedate page directory - AMD64. */
1402 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR64;
1403 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
1404 RTHCPHYS HCPhysInterPD;
1405 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
1406 RTHCPHYS HCPhysInterPaePDPTR;
1407 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
1408 RTHCPHYS HCPhysInterPaePML4;
1409 /** @} */
1410
1411 /** Base address of the dynamic page mapping area.
1412 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
1413 */
1414 GCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
1415 /** The index of the last entry used in the dynamic page mapping area. */
1416 RTUINT iDynPageMapLast;
1417 /** Cache containing the last entries in the dynamic page mapping area.
1418 * The cache size is covering half of the mapping area. */
1419 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
1420
1421 /** A20 gate mask.
1422 * Our current approach to A20 emulation is to let REM do it and don't bother
1423 * anywhere else. The interesting Guests will be operating with it enabled anyway.
1424 * But whould need arrise, we'll subject physical addresses to this mask. */
1425 RTGCPHYS GCPhysA20Mask;
1426 /** A20 gate state - boolean! */
1427 RTUINT fA20Enabled;
1428
1429 /** What needs syncing (PGM_SYNC_*).
1430 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
1431 * PGMFlushTLB, and PGMR3Load. */
1432 RTUINT fSyncFlags;
1433
1434#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1435 RTUINT uPadding3; /**< alignment padding. */
1436#endif
1437 /** PGM critical section.
1438 * This protects the physical & virtual access handlers, ram ranges,
1439 * and the page flag updating (some of it anyway).
1440 */
1441 PDMCRITSECT CritSect;
1442
1443 /** Shadow Page Pool - HC Ptr. */
1444 HCPTRTYPE(PPGMPOOL) pPoolHC;
1445 /** Shadow Page Pool - GC Ptr. */
1446 GCPTRTYPE(PPGMPOOL) pPoolGC;
1447
1448 /** Flush the cache on the next access. */
1449 bool fPhysCacheFlushPending;
1450/** @todo r=bird: Fix member names!*/
1451 /** PGMPhysRead cache */
1452 PGMPHYSCACHE pgmphysreadcache;
1453 /** PGMPhysWrite cache */
1454 PGMPHYSCACHE pgmphyswritecache;
1455
1456 /** @name Release Statistics
1457 * @{ */
1458 /** The number of times the guest has switched mode since last reset or statistics reset. */
1459 STAMCOUNTER cGuestModeChanges;
1460 /** @} */
1461
1462#ifdef VBOX_WITH_STATISTICS
1463 /** GC: Which statistic this \#PF should be attributed to. */
1464 GCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionGC;
1465 RTGCPTR padding0;
1466 /** HC: Which statistic this \#PF should be attributed to. */
1467 HCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionHC;
1468 RTHCPTR padding1;
1469 STAMPROFILE StatGCTrap0e; /**< GC: PGMGCTrap0eHandler() profiling. */
1470 STAMPROFILE StatTrap0eCSAM; /**< Profiling of the Trap0eHandler body when the cause is CSAM. */
1471 STAMPROFILE StatTrap0eDirtyAndAccessedBits; /**< Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
1472 STAMPROFILE StatTrap0eGuestTrap; /**< Profiling of the Trap0eHandler body when the cause is a guest trap. */
1473 STAMPROFILE StatTrap0eHndPhys; /**< Profiling of the Trap0eHandler body when the cause is a physical handler. */
1474 STAMPROFILE StatTrap0eHndVirt; /**< Profiling of the Trap0eHandler body when the cause is a virtual handler. */
1475 STAMPROFILE StatTrap0eHndUnhandled; /**< Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
1476 STAMPROFILE StatTrap0eMisc; /**< Profiling of the Trap0eHandler body when the cause is not known. */
1477 STAMPROFILE StatTrap0eOutOfSync; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
1478 STAMPROFILE StatTrap0eOutOfSyncHndPhys; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
1479 STAMPROFILE StatTrap0eOutOfSyncHndVirt; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
1480 STAMPROFILE StatTrap0eOutOfSyncObsHnd; /**< Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
1481 STAMPROFILE StatTrap0eSyncPT; /**< Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
1482
1483 STAMCOUNTER StatTrap0eMapHandler; /**< Number of traps due to access handlers in mappings. */
1484 STAMCOUNTER StatGCTrap0eConflicts; /**< GC: The number of times \#PF was caused by an undetected conflict. */
1485
1486 STAMCOUNTER StatGCTrap0eUSNotPresentRead;
1487 STAMCOUNTER StatGCTrap0eUSNotPresentWrite;
1488 STAMCOUNTER StatGCTrap0eUSWrite;
1489 STAMCOUNTER StatGCTrap0eUSReserved;
1490 STAMCOUNTER StatGCTrap0eUSRead;
1491
1492 STAMCOUNTER StatGCTrap0eSVNotPresentRead;
1493 STAMCOUNTER StatGCTrap0eSVNotPresentWrite;
1494 STAMCOUNTER StatGCTrap0eSVWrite;
1495 STAMCOUNTER StatGCTrap0eSVReserved;
1496
1497 STAMCOUNTER StatGCTrap0eUnhandled;
1498 STAMCOUNTER StatGCTrap0eMap;
1499
1500 /** GC: PGMSyncPT() profiling. */
1501 STAMPROFILE StatGCSyncPT;
1502 /** GC: The number of times PGMSyncPT() needed to allocate page tables. */
1503 STAMCOUNTER StatGCSyncPTAlloc;
1504 /** GC: The number of times PGMSyncPT() detected conflicts. */
1505 STAMCOUNTER StatGCSyncPTConflict;
1506 /** GC: The number of times PGMSyncPT() failed. */
1507 STAMCOUNTER StatGCSyncPTFailed;
1508 /** GC: PGMGCInvalidatePage() profiling. */
1509 STAMPROFILE StatGCInvalidatePage;
1510 /** GC: The number of times PGMGCInvalidatePage() was called for a 4KB page. */
1511 STAMCOUNTER StatGCInvalidatePage4KBPages;
1512 /** GC: The number of times PGMGCInvalidatePage() was called for a 4MB page. */
1513 STAMCOUNTER StatGCInvalidatePage4MBPages;
1514 /** GC: The number of times PGMGCInvalidatePage() skipped a 4MB page. */
1515 STAMCOUNTER StatGCInvalidatePage4MBPagesSkip;
1516 /** GC: The number of times PGMGCInvalidatePage() was called for a not accessed page directory. */
1517 STAMCOUNTER StatGCInvalidatePagePDNAs;
1518 /** GC: The number of times PGMGCInvalidatePage() was called for a not present page directory. */
1519 STAMCOUNTER StatGCInvalidatePagePDNPs;
1520 /** GC: The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict). */
1521 STAMCOUNTER StatGCInvalidatePagePDMappings;
1522 /** GC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1523 STAMCOUNTER StatGCInvalidatePagePDOutOfSync;
1524 /** HC: The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1525 STAMCOUNTER StatGCInvalidatePageSkipped;
1526 /** GC: The number of times user page is out of sync was detected in GC. */
1527 STAMCOUNTER StatGCPageOutOfSyncUser;
1528 /** GC: The number of times supervisor page is out of sync was detected in GC. */
1529 STAMCOUNTER StatGCPageOutOfSyncSupervisor;
1530 /** GC: The number of dynamic page mapping cache hits */
1531 STAMCOUNTER StatDynMapCacheMisses;
1532 /** GC: The number of dynamic page mapping cache misses */
1533 STAMCOUNTER StatDynMapCacheHits;
1534 /** GC: The number of times pgmGCGuestPDWriteHandler() was successfully called. */
1535 STAMCOUNTER StatGCGuestCR3WriteHandled;
1536 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and we had to fall back to the recompiler. */
1537 STAMCOUNTER StatGCGuestCR3WriteUnhandled;
1538 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and a conflict was detected. */
1539 STAMCOUNTER StatGCGuestCR3WriteConflict;
1540 /** GC: Number of out-of-sync handled pages. */
1541 STAMCOUNTER StatHandlersOutOfSync;
1542 /** GC: Number of traps due to physical access handlers. */
1543 STAMCOUNTER StatHandlersPhysical;
1544 /** GC: Number of traps due to virtual access handlers. */
1545 STAMCOUNTER StatHandlersVirtual;
1546 /** GC: Number of traps due to virtual access handlers found by physical address. */
1547 STAMCOUNTER StatHandlersVirtualByPhys;
1548 /** GC: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
1549 STAMCOUNTER StatHandlersVirtualUnmarked;
1550 /** GC: Number of traps due to access outside range of monitored page(s). */
1551 STAMCOUNTER StatHandlersUnhandled;
1552
1553 /** GC: The number of times pgmGCGuestROMWriteHandler() was successfully called. */
1554 STAMCOUNTER StatGCGuestROMWriteHandled;
1555 /** GC: The number of times pgmGCGuestROMWriteHandler() was called and we had to fall back to the recompiler */
1556 STAMCOUNTER StatGCGuestROMWriteUnhandled;
1557
1558 /** HC: PGMR3InvalidatePage() profiling. */
1559 STAMPROFILE StatHCInvalidatePage;
1560 /** HC: The number of times PGMR3InvalidatePage() was called for a 4KB page. */
1561 STAMCOUNTER StatHCInvalidatePage4KBPages;
1562 /** HC: The number of times PGMR3InvalidatePage() was called for a 4MB page. */
1563 STAMCOUNTER StatHCInvalidatePage4MBPages;
1564 /** HC: The number of times PGMR3InvalidatePage() skipped a 4MB page. */
1565 STAMCOUNTER StatHCInvalidatePage4MBPagesSkip;
1566 /** HC: The number of times PGMR3InvalidatePage() was called for a not accessed page directory. */
1567 STAMCOUNTER StatHCInvalidatePagePDNAs;
1568 /** HC: The number of times PGMR3InvalidatePage() was called for a not present page directory. */
1569 STAMCOUNTER StatHCInvalidatePagePDNPs;
1570 /** HC: The number of times PGMR3InvalidatePage() was called for a page directory containing mappings (no conflict). */
1571 STAMCOUNTER StatHCInvalidatePagePDMappings;
1572 /** HC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1573 STAMCOUNTER StatHCInvalidatePagePDOutOfSync;
1574 /** HC: The number of times PGMR3InvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1575 STAMCOUNTER StatHCInvalidatePageSkipped;
1576 /** HC: PGMR3SyncPT() profiling. */
1577 STAMPROFILE StatHCSyncPT;
1578 /** HC: pgmr3SyncPTResolveConflict() profiling (includes the entire relocation). */
1579 STAMPROFILE StatHCResolveConflict;
1580 /** HC: Number of times PGMR3CheckMappingConflicts() detected a conflict. */
1581 STAMCOUNTER StatHCDetectedConflicts;
1582 /** HC: The total number of times pgmHCGuestPDWriteHandler() was called. */
1583 STAMCOUNTER StatHCGuestPDWrite;
1584 /** HC: The number of times pgmHCGuestPDWriteHandler() detected a conflict */
1585 STAMCOUNTER StatHCGuestPDWriteConflict;
1586
1587 /** HC: The number of pages marked not present for accessed bit emulation. */
1588 STAMCOUNTER StatHCAccessedPage;
1589 /** HC: The number of pages marked read-only for dirty bit tracking. */
1590 STAMCOUNTER StatHCDirtyPage;
1591 /** HC: The number of pages marked read-only for dirty bit tracking. */
1592 STAMCOUNTER StatHCDirtyPageBig;
1593 /** HC: The number of traps generated for dirty bit tracking. */
1594 STAMCOUNTER StatHCDirtyPageTrap;
1595 /** HC: The number of pages already dirty or readonly. */
1596 STAMCOUNTER StatHCDirtyPageSkipped;
1597
1598 /** GC: The number of pages marked not present for accessed bit emulation. */
1599 STAMCOUNTER StatGCAccessedPage;
1600 /** GC: The number of pages marked read-only for dirty bit tracking. */
1601 STAMCOUNTER StatGCDirtyPage;
1602 /** GC: The number of pages marked read-only for dirty bit tracking. */
1603 STAMCOUNTER StatGCDirtyPageBig;
1604 /** GC: The number of traps generated for dirty bit tracking. */
1605 STAMCOUNTER StatGCDirtyPageTrap;
1606 /** GC: The number of pages already dirty or readonly. */
1607 STAMCOUNTER StatGCDirtyPageSkipped;
1608 /** GC: The number of pages marked dirty because of write accesses. */
1609 STAMCOUNTER StatGCDirtiedPage;
1610 /** GC: The number of pages already marked dirty because of write accesses. */
1611 STAMCOUNTER StatGCPageAlreadyDirty;
1612 /** GC: The number of real pages faults during dirty bit tracking. */
1613 STAMCOUNTER StatGCDirtyTrackRealPF;
1614
1615 /** GC: Profiling of the PGMTrackDirtyBit() body */
1616 STAMPROFILE StatGCDirtyBitTracking;
1617 /** HC: Profiling of the PGMTrackDirtyBit() body */
1618 STAMPROFILE StatHCDirtyBitTracking;
1619
1620 /** GC: Profiling of the PGMGstModifyPage() body */
1621 STAMPROFILE StatGCGstModifyPage;
1622 /** HC: Profiling of the PGMGstModifyPage() body */
1623 STAMPROFILE StatHCGstModifyPage;
1624
1625 /** GC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1626 STAMCOUNTER StatGCSyncPagePDNAs;
1627 /** GC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1628 STAMCOUNTER StatGCSyncPagePDOutOfSync;
1629 /** HC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1630 STAMCOUNTER StatHCSyncPagePDNAs;
1631 /** HC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1632 STAMCOUNTER StatHCSyncPagePDOutOfSync;
1633
1634 STAMCOUNTER StatSynPT4kGC;
1635 STAMCOUNTER StatSynPT4kHC;
1636 STAMCOUNTER StatSynPT4MGC;
1637 STAMCOUNTER StatSynPT4MHC;
1638
1639 /** Profiling of the PGMFlushTLB() body. */
1640 STAMPROFILE StatFlushTLB;
1641 /** The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
1642 STAMCOUNTER StatFlushTLBNewCR3;
1643 /** The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
1644 STAMCOUNTER StatFlushTLBNewCR3Global;
1645 /** The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
1646 STAMCOUNTER StatFlushTLBSameCR3;
1647 /** The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
1648 STAMCOUNTER StatFlushTLBSameCR3Global;
1649
1650 STAMPROFILE StatGCSyncCR3; /**< GC: PGMSyncCR3() profiling. */
1651 STAMPROFILE StatGCSyncCR3Handlers; /**< GC: Profiling of the PGMSyncCR3() update handler section. */
1652 STAMPROFILE StatGCSyncCR3HandlerVirtualReset; /**< GC: Profiling of the virtual handler resets. */
1653 STAMPROFILE StatGCSyncCR3HandlerVirtualUpdate; /**< GC: Profiling of the virtual handler updates. */
1654 STAMCOUNTER StatGCSyncCR3Global; /**< GC: The number of global CR3 syncs. */
1655 STAMCOUNTER StatGCSyncCR3NotGlobal; /**< GC: The number of non-global CR3 syncs. */
1656 STAMCOUNTER StatGCSyncCR3DstFreed; /**< GC: The number of times we've had to free a shadow entry. */
1657 STAMCOUNTER StatGCSyncCR3DstFreedSrcNP; /**< GC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1658 STAMCOUNTER StatGCSyncCR3DstNotPresent; /**< GC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1659 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPD; /**< GC: The number of times a global page directory wasn't flushed. */
1660 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPT; /**< GC: The number of times a page table with only global entries wasn't flushed. */
1661 STAMCOUNTER StatGCSyncCR3DstCacheHit; /**< GC: The number of times we got some kind of cache hit on a page table. */
1662
1663 STAMPROFILE StatHCSyncCR3; /**< HC: PGMSyncCR3() profiling. */
1664 STAMPROFILE StatHCSyncCR3Handlers; /**< HC: Profiling of the PGMSyncCR3() update handler section. */
1665 STAMPROFILE StatHCSyncCR3HandlerVirtualReset; /**< HC: Profiling of the virtual handler resets. */
1666 STAMPROFILE StatHCSyncCR3HandlerVirtualUpdate; /**< HC: Profiling of the virtual handler updates. */
1667 STAMCOUNTER StatHCSyncCR3Global; /**< HC: The number of global CR3 syncs. */
1668 STAMCOUNTER StatHCSyncCR3NotGlobal; /**< HC: The number of non-global CR3 syncs. */
1669 STAMCOUNTER StatHCSyncCR3DstFreed; /**< HC: The number of times we've had to free a shadow entry. */
1670 STAMCOUNTER StatHCSyncCR3DstFreedSrcNP; /**< HC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1671 STAMCOUNTER StatHCSyncCR3DstNotPresent; /**< HC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1672 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPD; /**< HC: The number of times a global page directory wasn't flushed. */
1673 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPT; /**< HC: The number of times a page table with only global entries wasn't flushed. */
1674 STAMCOUNTER StatHCSyncCR3DstCacheHit; /**< HC: The number of times we got some kind of cache hit on a page table. */
1675
1676 /** GC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1677 STAMPROFILE StatVirtHandleSearchByPhysGC;
1678 /** HC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1679 STAMPROFILE StatVirtHandleSearchByPhysHC;
1680 /** HC: The number of times PGMR3HandlerPhysicalReset is called. */
1681 STAMCOUNTER StatHandlePhysicalReset;
1682
1683 STAMPROFILE StatCheckPageFault;
1684 STAMPROFILE StatLazySyncPT;
1685 STAMPROFILE StatMapping;
1686 STAMPROFILE StatOutOfSync;
1687 STAMPROFILE StatHandlers;
1688 STAMPROFILE StatEIPHandlers;
1689 STAMPROFILE StatHCPrefetch;
1690
1691# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1692 /** The number of first time shadowings. */
1693 STAMCOUNTER StatTrackVirgin;
1694 /** The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
1695 STAMCOUNTER StatTrackAliased;
1696 /** The number of times we're tracking using cRef2. */
1697 STAMCOUNTER StatTrackAliasedMany;
1698 /** The number of times we're hitting pages which has overflowed cRef2. */
1699 STAMCOUNTER StatTrackAliasedLots;
1700 /** The number of times the extent list grows to long. */
1701 STAMCOUNTER StatTrackOverflows;
1702 /** Profiling of SyncPageWorkerTrackDeref (expensive). */
1703 STAMPROFILE StatTrackDeref;
1704# endif
1705
1706 /** Allocated mbs of guest ram */
1707 STAMCOUNTER StatDynRamTotal;
1708 /** Nr of pgmr3PhysGrowRange calls. */
1709 STAMCOUNTER StatDynRamGrow;
1710
1711 STAMCOUNTER StatGCTrap0ePD[X86_PG_ENTRIES];
1712 STAMCOUNTER StatGCSyncPtPD[X86_PG_ENTRIES];
1713 STAMCOUNTER StatGCSyncPagePD[X86_PG_ENTRIES];
1714#endif
1715} PGM, *PPGM;
1716
1717
1718/** @name PGM::fSyncFlags Flags
1719 * @{
1720 */
1721/** Updates the MM_RAM_FLAGS_VIRTUAL_HANDLER page bit. */
1722#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL BIT(0)
1723/** Always sync CR3. */
1724#define PGM_SYNC_ALWAYS BIT(1)
1725/** Check monitoring on next CR3 (re)load and invalidate page. */
1726#define PGM_SYNC_MONITOR_CR3 BIT(2)
1727/** Clear the page pool (a light weight flush). */
1728#define PGM_SYNC_CLEAR_PGM_POOL BIT(8)
1729/** @} */
1730
1731
1732__BEGIN_DECLS
1733
1734PGMGCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1735PGMDECL(int) pgmGuestROMWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1736PGMGCDECL(int) pgmCachePTWriteGC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1737int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PVBOXPD pPDSrc, int iPDOld);
1738PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
1739void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew);
1740int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode);
1741int pgmLock(PVM pVM);
1742void pgmUnlock(PVM pVM);
1743
1744void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
1745int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
1746DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
1747#ifdef VBOX_STRICT
1748void pgmHandlerVirtualDumpPhysPages(PVM pVM);
1749#else
1750# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
1751#endif
1752DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
1753
1754
1755#ifdef IN_RING3
1756int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
1757
1758int pgmR3PoolInit(PVM pVM);
1759void pgmR3PoolRelocate(PVM pVM);
1760void pgmR3PoolReset(PVM pVM);
1761
1762#endif
1763#ifdef IN_GC
1764void *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
1765#endif
1766int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint16_t iUserTable, PPPGMPOOLPAGE ppPage);
1767PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
1768void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint16_t iUserTable);
1769void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint16_t iUserTable);
1770int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1771void pgmPoolFlushAll(PVM pVM);
1772void pgmPoolClearAll(PVM pVM);
1773void pgmPoolTrackFlushGCPhysPT(PVM pVM, PRTHCPHYS pHCPhys, uint16_t iShw, uint16_t cRefs);
1774void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PRTHCPHYS pHCPhys, uint16_t iPhysExt);
1775int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PRTHCPHYS pHCPhys);
1776PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
1777void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
1778void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
1779uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
1780void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PRTHCPHYS pHCPhys);
1781#ifdef PGMPOOL_WITH_MONITORING
1782# ifdef IN_RING3
1783void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu);
1784# else
1785void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu);
1786# endif
1787int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1788void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1789void pgmPoolMonitorModifiedClearAll(PVM pVM);
1790int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
1791int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
1792#endif
1793
1794__END_DECLS
1795
1796
1797/**
1798 * Convert GC Phys to HC Phys.
1799 *
1800 * @returns VBox status.
1801 * @param pPGM PGM handle.
1802 * @param GCPhys The GC physical address.
1803 * @param pHCPhys Where to store the corresponding HC physical address.
1804 */
1805DECLINLINE(int) PGMRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
1806{
1807 /*
1808 * Walk range list.
1809 */
1810 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1811 while (pRam)
1812 {
1813 RTGCPHYS off = GCPhys - pRam->GCPhys;
1814 if (off < pRam->cb)
1815 {
1816 unsigned iPage = off >> PAGE_SHIFT;
1817 /* Physical chunk in dynamically allocated range not present? */
1818 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
1819 {
1820#ifdef IN_RING3
1821 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1822#else
1823 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1824#endif
1825 if (rc != VINF_SUCCESS)
1826 return rc;
1827 }
1828 *pHCPhys = (pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
1829 return VINF_SUCCESS;
1830 }
1831
1832 pRam = CTXSUFF(pRam->pNext);
1833 }
1834 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1835}
1836
1837
1838/**
1839 * Convert GC Phys to HC Virt.
1840 *
1841 * @returns VBox status.
1842 * @param pPGM PGM handle.
1843 * @param GCPhys The GC physical address.
1844 * @param pHCPtr Where to store the corresponding HC virtual address.
1845 */
1846DECLINLINE(int) PGMRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
1847{
1848 /*
1849 * Walk range list.
1850 */
1851 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1852 while (pRam)
1853 {
1854 RTGCPHYS off = GCPhys - pRam->GCPhys;
1855 if (off < pRam->cb)
1856 {
1857 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1858 {
1859 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1860 /* Physical chunk in dynamically allocated range not present? */
1861 if (RT_UNLIKELY(!CTXSUFF(pRam->pavHCChunk)[idx]))
1862 {
1863#ifdef IN_RING3
1864 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1865#else
1866 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1867#endif
1868 if (rc != VINF_SUCCESS)
1869 return rc;
1870 }
1871 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1872 return VINF_SUCCESS;
1873 }
1874 if (pRam->pvHC)
1875 {
1876 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1877 return VINF_SUCCESS;
1878 }
1879 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1880 }
1881
1882 pRam = CTXSUFF(pRam->pNext);
1883 }
1884 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1885}
1886
1887
1888/**
1889 * Convert GC Phys to HC Virt.
1890 *
1891 * @returns VBox status.
1892 * @param PVM VM handle.
1893 * @param pRam Ram range
1894 * @param GCPhys The GC physical address.
1895 * @param pHCPtr Where to store the corresponding HC virtual address.
1896 */
1897DECLINLINE(int) PGMRamGCPhys2HCPtr(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
1898{
1899 RTGCPHYS off = GCPhys - pRam->GCPhys;
1900 Assert(off < pRam->cb);
1901
1902 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1903 {
1904 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1905 /* Physical chunk in dynamically allocated range not present? */
1906 if (RT_UNLIKELY(!CTXSUFF(pRam->pavHCChunk)[idx]))
1907 {
1908#ifdef IN_RING3
1909 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
1910#else
1911 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1912#endif
1913 if (rc != VINF_SUCCESS)
1914 return rc;
1915 }
1916 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1917 return VINF_SUCCESS;
1918 }
1919 if (pRam->pvHC)
1920 {
1921 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1922 return VINF_SUCCESS;
1923 }
1924 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1925}
1926
1927
1928/**
1929 * Convert GC Phys to HC Virt and HC Phys.
1930 *
1931 * @returns VBox status.
1932 * @param pPGM PGM handle.
1933 * @param GCPhys The GC physical address.
1934 * @param pHCPtr Where to store the corresponding HC virtual address.
1935 * @param pHCPhys Where to store the HC Physical address and its flags.
1936 */
1937DECLINLINE(int) PGMRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
1938{
1939 /*
1940 * Walk range list.
1941 */
1942 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1943 while (pRam)
1944 {
1945 RTGCPHYS off = GCPhys - pRam->GCPhys;
1946 if (off < pRam->cb)
1947 {
1948 unsigned iPage = off >> PAGE_SHIFT;
1949 /* Physical chunk in dynamically allocated range not present? */
1950 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
1951 {
1952#ifdef IN_RING3
1953 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1954#else
1955 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1956#endif
1957 if (rc != VINF_SUCCESS)
1958 return rc;
1959 }
1960 *pHCPhys = pRam->aHCPhys[iPage];
1961
1962 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
1963 {
1964 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
1965 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
1966 return VINF_SUCCESS;
1967 }
1968 if (pRam->pvHC)
1969 {
1970 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
1971 return VINF_SUCCESS;
1972 }
1973 *pHCPtr = 0;
1974 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1975 }
1976
1977 pRam = CTXSUFF(pRam->pNext);
1978 }
1979 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1980}
1981
1982
1983/**
1984 * Convert GC Phys page to a page entry pointer.
1985 *
1986 * This is used by code which may have to update the flags.
1987 *
1988 * @returns VBox status.
1989 * @param pPGM PGM handle.
1990 * @param GCPhys The GC physical address.
1991 * @param ppHCPhys Where to store the pointer to the page entry.
1992 */
1993DECLINLINE(int) PGMRamGCPhys2PagePtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS *ppHCPhys)
1994{
1995 /*
1996 * Walk range list.
1997 */
1998 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1999 while (pRam)
2000 {
2001 RTGCPHYS off = GCPhys - pRam->GCPhys;
2002 if (off < pRam->cb)
2003 {
2004 unsigned iPage = off >> PAGE_SHIFT;
2005 /* Physical chunk in dynamically allocated range not present? */
2006 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2007 {
2008#ifdef IN_RING3
2009 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2010#else
2011 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2012#endif
2013 if (rc != VINF_SUCCESS)
2014 return rc;
2015 }
2016 *ppHCPhys = &pRam->aHCPhys[iPage];
2017 return VINF_SUCCESS;
2018 }
2019
2020 pRam = CTXSUFF(pRam->pNext);
2021 }
2022 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2023}
2024
2025
2026/**
2027 * Convert GC Phys page to HC Phys page and flags.
2028 *
2029 * @returns VBox status.
2030 * @param pPGM PGM handle.
2031 * @param GCPhys The GC physical address.
2032 * @param pHCPhys Where to store the corresponding HC physical address of the page
2033 * and the page flags.
2034 */
2035DECLINLINE(int) PGMRamGCPhys2HCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
2036{
2037 /*
2038 * Walk range list.
2039 */
2040 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2041 while (pRam)
2042 {
2043 RTGCPHYS off = GCPhys - pRam->GCPhys;
2044 if (off < pRam->cb)
2045 {
2046 unsigned iPage = off >> PAGE_SHIFT;
2047 /* Physical chunk in dynamically allocated range not present? */
2048 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2049 {
2050#ifdef IN_RING3
2051 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2052#else
2053 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2054#endif
2055 if (rc != VINF_SUCCESS)
2056 return rc;
2057 }
2058 *pHCPhys = pRam->aHCPhys[iPage];
2059 return VINF_SUCCESS;
2060 }
2061
2062 pRam = CTXSUFF(pRam->pNext);
2063 }
2064 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2065}
2066
2067
2068/**
2069 * Clears flags associated with a RAM address.
2070 *
2071 * @returns VBox status code.
2072 * @param pPGM PGM handle.
2073 * @param GCPhys Guest context physical address.
2074 * @param fFlags fFlags to clear. (Bits 0-11.)
2075 */
2076DECLINLINE(int) PGMRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2077{
2078 /*
2079 * Walk range list.
2080 */
2081 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2082 while (pRam)
2083 {
2084 RTGCPHYS off = GCPhys - pRam->GCPhys;
2085 if (off < pRam->cb)
2086 {
2087 unsigned iPage = off >> PAGE_SHIFT;
2088 /* Physical chunk in dynamically allocated range not present? */
2089 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2090 {
2091#ifdef IN_RING3
2092 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2093#else
2094 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2095#endif
2096 if (rc != VINF_SUCCESS)
2097 return rc;
2098 }
2099 fFlags &= ~X86_PTE_PAE_PG_MASK;
2100 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2101 return VINF_SUCCESS;
2102 }
2103
2104 pRam = CTXSUFF(pRam->pNext);
2105 }
2106 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2107}
2108
2109
2110/**
2111 * Clears flags associated with a RAM address.
2112 *
2113 * @returns VBox status code.
2114 * @param pPGM PGM handle.
2115 * @param GCPhys Guest context physical address.
2116 * @param fFlags fFlags to clear. (Bits 0-11.)
2117 * @param ppRamHint Where to read and store the ram list hint.
2118 * The caller initializes this to NULL before the call.
2119 */
2120DECLINLINE(int) PGMRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2121{
2122 /*
2123 * Check the hint.
2124 */
2125 PPGMRAMRANGE pRam = *ppRamHint;
2126 if (pRam)
2127 {
2128 RTGCPHYS off = GCPhys - pRam->GCPhys;
2129 if (off < pRam->cb)
2130 {
2131 unsigned iPage = off >> PAGE_SHIFT;
2132 /* Physical chunk in dynamically allocated range not present? */
2133 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2134 {
2135#ifdef IN_RING3
2136 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2137#else
2138 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2139#endif
2140 if (rc != VINF_SUCCESS)
2141 return rc;
2142 }
2143 fFlags &= ~X86_PTE_PAE_PG_MASK;
2144 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2145 return VINF_SUCCESS;
2146 }
2147 }
2148
2149 /*
2150 * Walk range list.
2151 */
2152 pRam = CTXSUFF(pPGM->pRamRanges);
2153 while (pRam)
2154 {
2155 RTGCPHYS off = GCPhys - pRam->GCPhys;
2156 if (off < pRam->cb)
2157 {
2158 unsigned iPage = off >> PAGE_SHIFT;
2159 /* Physical chunk in dynamically allocated range not present? */
2160 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2161 {
2162#ifdef IN_RING3
2163 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2164#else
2165 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2166#endif
2167 if (rc != VINF_SUCCESS)
2168 return rc;
2169 }
2170 fFlags &= ~X86_PTE_PAE_PG_MASK;
2171 pRam->aHCPhys[iPage] &= ~(RTHCPHYS)fFlags;
2172 *ppRamHint = pRam;
2173 return VINF_SUCCESS;
2174 }
2175
2176 pRam = CTXSUFF(pRam->pNext);
2177 }
2178 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2179}
2180
2181/**
2182 * Sets (bitwise OR) flags associated with a RAM address.
2183 *
2184 * @returns VBox status code.
2185 * @param pPGM PGM handle.
2186 * @param GCPhys Guest context physical address.
2187 * @param fFlags fFlags to set clear. (Bits 0-11.)
2188 */
2189DECLINLINE(int) PGMRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2190{
2191 /*
2192 * Walk range list.
2193 */
2194 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2195 while (pRam)
2196 {
2197 RTGCPHYS off = GCPhys - pRam->GCPhys;
2198 if (off < pRam->cb)
2199 {
2200 unsigned iPage = off >> PAGE_SHIFT;
2201 /* Physical chunk in dynamically allocated range not present? */
2202 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2203 {
2204#ifdef IN_RING3
2205 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2206#else
2207 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2208#endif
2209 if (rc != VINF_SUCCESS)
2210 return rc;
2211 }
2212 fFlags &= ~X86_PTE_PAE_PG_MASK;
2213 pRam->aHCPhys[iPage] |= fFlags;
2214 return VINF_SUCCESS;
2215 }
2216
2217 pRam = CTXSUFF(pRam->pNext);
2218 }
2219 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2220}
2221
2222/**
2223 * Sets (bitwise OR) flags associated with a RAM address.
2224 *
2225 * @returns VBox status code.
2226 * @param pPGM PGM handle.
2227 * @param GCPhys Guest context physical address.
2228 * @param fFlags fFlags to set clear. (Bits 0-11.)
2229 * @param ppRamHint Where to read and store the ram list hint.
2230 * The caller initializes this to NULL before the call.
2231 */
2232DECLINLINE(int) PGMRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2233{
2234 /*
2235 * Check the hint.
2236 */
2237 PPGMRAMRANGE pRam = *ppRamHint;
2238 if (pRam)
2239 {
2240 RTGCPHYS off = GCPhys - pRam->GCPhys;
2241 if (off < pRam->cb)
2242 {
2243 unsigned iPage = off >> PAGE_SHIFT;
2244 /* Physical chunk in dynamically allocated range not present? */
2245 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2246 {
2247#ifdef IN_RING3
2248 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2249#else
2250 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2251#endif
2252 if (rc != VINF_SUCCESS)
2253 return rc;
2254 }
2255 fFlags &= ~X86_PTE_PAE_PG_MASK;
2256 pRam->aHCPhys[iPage] |= fFlags;
2257 return VINF_SUCCESS;
2258 }
2259 }
2260
2261 /*
2262 * Walk range list.
2263 */
2264 pRam = CTXSUFF(pPGM->pRamRanges);
2265 while (pRam)
2266 {
2267 RTGCPHYS off = GCPhys - pRam->GCPhys;
2268 if (off < pRam->cb)
2269 {
2270 unsigned iPage = off >> PAGE_SHIFT;
2271 /* Physical chunk in dynamically allocated range not present? */
2272 if (RT_UNLIKELY(!(pRam->aHCPhys[iPage] & X86_PTE_PAE_PG_MASK)))
2273 {
2274#ifdef IN_RING3
2275 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2276#else
2277 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2278#endif
2279 if (rc != VINF_SUCCESS)
2280 return rc;
2281 }
2282 fFlags &= ~X86_PTE_PAE_PG_MASK;
2283 pRam->aHCPhys[iPage] |= fFlags;
2284 *ppRamHint = pRam;
2285 return VINF_SUCCESS;
2286 }
2287
2288 pRam = CTXSUFF(pRam->pNext);
2289 }
2290 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2291}
2292
2293
2294/**
2295 * Gets the page directory for the specified address.
2296 *
2297 * @returns Pointer to the page directory in question.
2298 * @returns NULL if the page directory is not present or on an invalid page.
2299 * @param pPGM Pointer to the PGM instance data.
2300 * @param GCPtr The address.
2301 */
2302DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr)
2303{
2304 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2305 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2306 {
2307 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2308 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr];
2309
2310 /* cache is out-of-sync. */
2311 PX86PDPAE pPD;
2312 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2313 if (VBOX_SUCCESS(rc))
2314 return pPD;
2315 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2316 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emualted as all 0s. */
2317 }
2318 return NULL;
2319}
2320
2321
2322/**
2323 * Gets the page directory entry for the specified address.
2324 *
2325 * @returns Pointer to the page directory entry in question.
2326 * @returns NULL if the page directory is not present or on an invalid page.
2327 * @param pPGM Pointer to the PGM instance data.
2328 * @param GCPtr The address.
2329 */
2330DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr)
2331{
2332 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2333 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2334 {
2335 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2336 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2337 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD];
2338
2339 /* cache is out-of-sync. */
2340 PX86PDPAE pPD;
2341 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2342 if (VBOX_SUCCESS(rc))
2343 return &pPD->a[iPD];
2344 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2345 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */
2346 }
2347 return NULL;
2348}
2349
2350
2351/**
2352 * Gets the page directory entry for the specified address.
2353 *
2354 * @returns The page directory entry in question.
2355 * @returns A non-present entry if the page directory is not present or on an invalid page.
2356 * @param pPGM Pointer to the PGM instance data.
2357 * @param GCPtr The address.
2358 */
2359DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr)
2360{
2361 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2362 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2363 {
2364 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2365 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2366 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD].u;
2367
2368 /* cache is out-of-sync. */
2369 PX86PDPAE pPD;
2370 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2371 if (VBOX_SUCCESS(rc))
2372 return pPD->a[iPD].u;
2373 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2374 }
2375 return 0;
2376}
2377
2378
2379/**
2380 * Checks if any of the specified page flags are set for the given page.
2381 *
2382 * @returns true if any of the flags are set.
2383 * @returns false if all the flags are clear.
2384 * @param pPGM PGM handle.
2385 * @param GCPhys The GC physical address.
2386 * @param fFlags The flags to check for.
2387 */
2388DECLINLINE(bool) PGMRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
2389{
2390 /*
2391 * Walk range list.
2392 */
2393 for (PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2394 pRam;
2395 pRam = CTXSUFF(pRam->pNext))
2396 {
2397 RTGCPHYS off = GCPhys - pRam->GCPhys;
2398 if (off < pRam->cb)
2399 return (pRam->aHCPhys[off >> PAGE_SHIFT] & fFlags) != 0;
2400 }
2401 return false;
2402}
2403
2404
2405/**
2406 * Gets the ram flags for a handler.
2407 *
2408 * @returns The ram flags.
2409 * @param pCur The physical handler in question.
2410 */
2411DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur)
2412{
2413 switch (pCur->enmType)
2414 {
2415 case PGMPHYSHANDLERTYPE_PHYSICAL:
2416 return MM_RAM_FLAGS_PHYSICAL_HANDLER;
2417
2418 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
2419 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE;
2420
2421 case PGMPHYSHANDLERTYPE_MMIO:
2422 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
2423 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL;
2424
2425 default:
2426 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
2427 }
2428}
2429
2430
2431/**
2432 * Clears one physical page of a virtual handler
2433 *
2434 * @param pPGM Pointer to the PGM instance.
2435 * @param pCur Virtual handler structure
2436 * @param iPage Physical page index
2437 */
2438DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
2439{
2440 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
2441
2442 /*
2443 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
2444 */
2445#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2446 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2447 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2448 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2449#endif
2450 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
2451 {
2452 /* We're the head of the alias chain. */
2453 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
2454#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2455 AssertReleaseMsg(pRemove != NULL,
2456 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2457 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2458 AssertReleaseMsg(pRemove == pPhys2Virt,
2459 ("wanted: pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
2460 " got: pRemove=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2461 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
2462 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
2463#endif
2464 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
2465 {
2466 /* Insert the next list in the alias chain into the tree. */
2467 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2468#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2469 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2470 ("pNext=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2471 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
2472#endif
2473 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
2474 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
2475 AssertRelease(fRc);
2476 }
2477 }
2478 else
2479 {
2480 /* Locate the previous node in the alias chain. */
2481 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
2482#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2483 AssertReleaseMsg(pPrev != pPhys2Virt,
2484 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2485 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2486#endif
2487 for (;;)
2488 {
2489 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2490 if (pNext == pPhys2Virt)
2491 {
2492 /* unlink. */
2493 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%VGp-%VGp]\n",
2494 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
2495 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
2496 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
2497 else
2498 {
2499 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2500 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
2501 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
2502 }
2503 break;
2504 }
2505
2506 /* next */
2507 if (pNext == pPrev)
2508 {
2509#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2510 AssertReleaseMsg(pNext != pPrev,
2511 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2512 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2513#endif
2514 break;
2515 }
2516 pPrev = pNext;
2517 }
2518 }
2519 Log2(("PHYS2VIRT: Removing %VGp-%VGp %#RX32 %s\n",
2520 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, HCSTRING(pCur->pszDesc)));
2521 pPhys2Virt->offNextAlias = 0;
2522 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
2523
2524 /*
2525 * Clear the ram flags for this page.
2526 */
2527 int rc = PGMRamFlagsClearByGCPhys(pPGM, pPhys2Virt->Core.Key,
2528 MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE);
2529 AssertRC(rc);
2530}
2531
2532
2533/**
2534 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2535 *
2536 * @returns Pointer to the shadow page structure.
2537 * @param pPool The pool.
2538 * @param HCPhys The HC physical address of the shadow page.
2539 */
2540DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
2541{
2542 /*
2543 * Look up the page.
2544 */
2545 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
2546 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%VHp pPage=%p type=%d\n", HCPhys, pPage, (pPage) ? pPage->enmKind : 0));
2547 return pPage;
2548}
2549
2550
2551/**
2552 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2553 *
2554 * @returns Pointer to the shadow page structure.
2555 * @param pPool The pool.
2556 * @param idx The pool page index.
2557 */
2558DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
2559{
2560 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
2561 return &pPool->aPages[idx];
2562}
2563
2564
2565#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2566/**
2567 * Clear references to guest physical memory.
2568 *
2569 * @param pPool The pool.
2570 * @param pPage The page.
2571 * @param pHCPhys Pointer to the aHCPhys entry in the ram range.
2572 */
2573DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PRTHCPHYS pHCPhys)
2574{
2575 /*
2576 * Just deal with the simple case here.
2577 */
2578#ifdef LOG_ENABLED
2579 const RTHCPHYS HCPhysOrg = *pHCPhys;
2580#endif
2581 const unsigned cRefs = *pHCPhys >> MM_RAM_FLAGS_CREFS_SHIFT;
2582 if (cRefs == 1)
2583 {
2584 Assert(pPage->idx == ((*pHCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK));
2585 *pHCPhys = *pHCPhys & MM_RAM_FLAGS_NO_REFS_MASK;
2586 }
2587 else
2588 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPage, pHCPhys);
2589 LogFlow(("pgmTrackDerefGCPhys: *pHCPhys=%RHp -> %RHp\n", HCPhysOrg, *pHCPhys));
2590}
2591#endif
2592
2593
2594#ifdef PGMPOOL_WITH_CACHE
2595/**
2596 * Moves the page to the head of the age list.
2597 *
2598 * This is done when the cached page is used in one way or another.
2599 *
2600 * @param pPool The pool.
2601 * @param pPage The cached page.
2602 * @todo inline in PGMInternal.h!
2603 */
2604DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2605{
2606 /*
2607 * Move to the head of the age list.
2608 */
2609 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2610 {
2611 /* unlink */
2612 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2613 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2614 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2615 else
2616 pPool->iAgeTail = pPage->iAgePrev;
2617
2618 /* insert at head */
2619 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2620 pPage->iAgeNext = pPool->iAgeHead;
2621 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
2622 pPool->iAgeHead = pPage->idx;
2623 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
2624 }
2625}
2626#endif /* PGMPOOL_WITH_CACHE */
2627
2628/**
2629 * Tells if mappings are to be put into the shadow page table or not
2630 *
2631 * @returns boolean result
2632 * @param pVM VM handle.
2633 */
2634
2635DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
2636{
2637 return !pPGM->fDisableMappings;
2638}
2639
2640/** @} */
2641
2642#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette