VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 4675

Last change on this file since 4675 was 4665, checked in by vboxsync, 17 years ago

Moved some of the odd address conversion routines to PGMR3Dbg just to get them out of the way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 111.2 KB
Line 
1/* $Id: PGMInternal.h 4665 2007-09-10 13:41:18Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInternal_h
19#define ___PGMInternal_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm.h>
27#include <VBox/mm.h>
28#include <VBox/pdmcritsect.h>
29#include <VBox/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/dbgf.h>
32#include <VBox/log.h>
33#include <iprt/avl.h>
34#include <iprt/assert.h>
35#include <iprt/critsect.h>
36
37#if !defined(IN_PGM_R3) && !defined(IN_PGM_R0) && !defined(IN_PGM_GC)
38# error "Not in PGM! This is an internal header!"
39#endif
40
41
42/** @defgroup grp_pgm_int Internals
43 * @ingroup grp_pgm
44 * @internal
45 * @{
46 */
47
48
49/** @name PGM Compile Time Config
50 * @{
51 */
52
53/**
54 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
55 * Comment it if it will break something.
56 */
57#define PGM_OUT_OF_SYNC_IN_GC
58
59/**
60 * Virtualize the dirty bit
61 * This also makes a half-hearted attempt at the accessed bit. For full
62 * accessed bit virtualization define PGM_SYNC_ACCESSED_BIT.
63 */
64#define PGM_SYNC_DIRTY_BIT
65
66/**
67 * Fully virtualize the accessed bit.
68 * @remark This requires SYNC_DIRTY_ACCESSED_BITS to be defined!
69 */
70#define PGM_SYNC_ACCESSED_BIT
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Sync N pages instead of a whole page table
79 */
80#define PGM_SYNC_N_PAGES
81
82/**
83 * Number of pages to sync during a page fault
84 *
85 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
86 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
87 */
88#define PGM_SYNC_NR_PAGES 8
89
90/**
91 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
92 */
93#define PGM_MAX_PHYSCACHE_ENTRIES 64
94#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
95
96/**
97 * Enable caching of PGMR3PhysRead/WriteByte/Word/Dword
98 */
99#define PGM_PHYSMEMACCESS_CACHING
100
101/*
102 * Assert Sanity.
103 */
104#if defined(PGM_SYNC_ACCESSED_BIT) && !defined(PGM_SYNC_DIRTY_BIT)
105# error "PGM_SYNC_ACCESSED_BIT requires PGM_SYNC_DIRTY_BIT!"
106#endif
107
108/** @def PGMPOOL_WITH_CACHE
109 * Enable agressive caching using the page pool.
110 *
111 * This requires PGMPOOL_WITH_USER_TRACKING and PGMPOOL_WITH_MONITORING.
112 */
113#define PGMPOOL_WITH_CACHE
114
115/** @def PGMPOOL_WITH_MIXED_PT_CR3
116 * When defined, we'll deal with 'uncachable' pages.
117 */
118#ifdef PGMPOOL_WITH_CACHE
119# define PGMPOOL_WITH_MIXED_PT_CR3
120#endif
121
122/** @def PGMPOOL_WITH_MONITORING
123 * Monitor the guest pages which are shadowed.
124 * When this is enabled, PGMPOOL_WITH_CACHE or PGMPOOL_WITH_GCPHYS_TRACKING must
125 * be enabled as well.
126 * @remark doesn't really work without caching now. (Mixed PT/CR3 change.)
127 */
128#ifdef PGMPOOL_WITH_CACHE
129# define PGMPOOL_WITH_MONITORING
130#endif
131
132/** @def PGMPOOL_WITH_GCPHYS_TRACKING
133 * Tracking the of shadow pages mapping guest physical pages.
134 *
135 * This is very expensive, the current cache prototype is trying to figure out
136 * whether it will be acceptable with an agressive caching policy.
137 */
138#if defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
139# define PGMPOOL_WITH_GCPHYS_TRACKING
140#endif
141
142/** @def PGMPOOL_WITH_USER_TRACKNG
143 * Tracking users of shadow pages. This is required for the linking of shadow page
144 * tables and physical guest addresses.
145 */
146#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE) || defined(PGMPOOL_WITH_MONITORING)
147# define PGMPOOL_WITH_USER_TRACKING
148#endif
149
150/** @def PGMPOOL_CFG_MAX_GROW
151 * The maximum number of pages to add to the pool in one go.
152 */
153#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
154
155/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
156 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
157 */
158#ifdef VBOX_STRICT
159# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
160#endif
161/** @} */
162
163
164/** @name PDPTR and PML4 flags.
165 * These are placed in the three bits available for system programs in
166 * the PDPTR and PML4 entries.
167 * @{ */
168/** The entry is a permanent one and it's must always be present.
169 * Never free such an entry. */
170#define PGM_PLXFLAGS_PERMANENT BIT64(10)
171/** @} */
172
173/** @name Page directory flags.
174 * These are placed in the three bits available for system programs in
175 * the page directory entries.
176 * @{ */
177/** Mapping (hypervisor allocated pagetable). */
178#define PGM_PDFLAGS_MAPPING BIT64(10)
179/** Made read-only to facilitate dirty bit tracking. */
180#define PGM_PDFLAGS_TRACK_DIRTY BIT64(11)
181/** @} */
182
183/** @name Page flags.
184 * These are placed in the three bits available for system programs in
185 * the page entries.
186 * @{ */
187/** Made read-only to facilitate dirty bit tracking. */
188#define PGM_PTFLAGS_TRACK_DIRTY BIT64(9)
189
190#ifndef PGM_PTFLAGS_CSAM_VALIDATED
191/** Scanned and approved by CSAM (tm).
192 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
193 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
194#define PGM_PTFLAGS_CSAM_VALIDATED BIT64(11)
195#endif
196/** @} */
197
198/** @name Defines used to indicate the shadow and guest paging in the templates.
199 * @{ */
200#define PGM_TYPE_REAL 1
201#define PGM_TYPE_PROT 2
202#define PGM_TYPE_32BIT 3
203#define PGM_TYPE_PAE 4
204#define PGM_TYPE_AMD64 5
205/** @} */
206
207/** Macro for checking if the guest is using paging.
208 * @param uType PGM_TYPE_*
209 * @remark ASSUMES certain order of the PGM_TYPE_* values.
210 */
211#define PGM_WITH_PAGING(uType) ((uType) >= PGM_TYPE_32BIT)
212
213
214/** @def PGM_HCPHYS_2_PTR
215 * Maps a HC physical page pool address to a virtual address.
216 *
217 * @returns VBox status code.
218 * @param pVM The VM handle.
219 * @param HCPhys The HC physical address to map to a virtual one.
220 * @param ppv Where to store the virtual address. No need to cast this.
221 *
222 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
223 * small page window employeed by that function. Be careful.
224 * @remark There is no need to assert on the result.
225 */
226#ifdef IN_GC
227# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) PGMGCDynMapHCPage(pVM, HCPhys, (void **)(ppv))
228#else
229# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
230#endif
231
232/** @def PGM_GCPHYS_2_PTR
233 * Maps a GC physical page address to a virtual address.
234 *
235 * @returns VBox status code.
236 * @param pVM The VM handle.
237 * @param GCPhys The GC physical address to map to a virtual one.
238 * @param ppv Where to store the virtual address. No need to cast this.
239 *
240 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
241 * small page window employeed by that function. Be careful.
242 * @remark There is no need to assert on the result.
243 */
244#ifdef IN_GC
245# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMGCDynMapGCPage(pVM, GCPhys, (void **)(ppv))
246#else
247# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
248#endif
249
250/** @def PGM_GCPHYS_2_PTR_EX
251 * Maps a unaligned GC physical page address to a virtual address.
252 *
253 * @returns VBox status code.
254 * @param pVM The VM handle.
255 * @param GCPhys The GC physical address to map to a virtual one.
256 * @param ppv Where to store the virtual address. No need to cast this.
257 *
258 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
259 * small page window employeed by that function. Be careful.
260 * @remark There is no need to assert on the result.
261 */
262#ifdef IN_GC
263# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMGCDynMapGCPageEx(pVM, GCPhys, (void **)(ppv))
264#else
265# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) PGMPhysGCPhys2HCPtr(pVM, GCPhys, 1 /* one page only */, (void **)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
266#endif
267
268/** @def PGM_INVL_PG
269 * Invalidates a page when in GC does nothing in HC.
270 *
271 * @param GCVirt The virtual address of the page to invalidate.
272 */
273#ifdef IN_GC
274# define PGM_INVL_PG(GCVirt) ASMInvalidatePage((void *)(GCVirt))
275#else
276# define PGM_INVL_PG(GCVirt) ((void)0)
277#endif
278
279/** @def PGM_INVL_BIG_PG
280 * Invalidates a 4MB page directory entry when in GC does nothing in HC.
281 *
282 * @param GCVirt The virtual address within the page directory to invalidate.
283 */
284#ifdef IN_GC
285# define PGM_INVL_BIG_PG(GCVirt) ASMReloadCR3()
286#else
287# define PGM_INVL_BIG_PG(GCVirt) ((void)0)
288#endif
289
290/** @def PGM_INVL_GUEST_TLBS()
291 * Invalidates all guest TLBs.
292 */
293#ifdef IN_GC
294# define PGM_INVL_GUEST_TLBS() ASMReloadCR3()
295#else
296# define PGM_INVL_GUEST_TLBS() ((void)0)
297#endif
298
299
300/**
301 * Structure for tracking GC Mappings.
302 *
303 * This structure is used by linked list in both GC and HC.
304 */
305typedef struct PGMMAPPING
306{
307 /** Pointer to next entry. */
308 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
309 /** Pointer to next entry. */
310 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
311 /** Pointer to next entry. */
312 GCPTRTYPE(struct PGMMAPPING *) pNextGC;
313 /** Start Virtual address. */
314 RTGCUINTPTR GCPtr;
315 /** Last Virtual address (inclusive). */
316 RTGCUINTPTR GCPtrLast;
317 /** Range size (bytes). */
318 RTGCUINTPTR cb;
319 /** Pointer to relocation callback function. */
320 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
321 /** User argument to the callback. */
322 R3PTRTYPE(void *) pvUser;
323 /** Mapping description / name. For easing debugging. */
324 R3PTRTYPE(const char *) pszDesc;
325 /** Number of page tables. */
326 RTUINT cPTs;
327#if HC_ARCH_BITS != GC_ARCH_BITS
328 RTUINT uPadding0; /**< Alignment padding. */
329#endif
330 /** Array of page table mapping data. Each entry
331 * describes one page table. The array can be longer
332 * than the declared length.
333 */
334 struct
335 {
336 /** The HC physical address of the page table. */
337 RTHCPHYS HCPhysPT;
338 /** The HC physical address of the first PAE page table. */
339 RTHCPHYS HCPhysPaePT0;
340 /** The HC physical address of the second PAE page table. */
341 RTHCPHYS HCPhysPaePT1;
342 /** The HC virtual address of the 32-bit page table. */
343 R3PTRTYPE(PVBOXPT) pPTR3;
344 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
345 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
346 /** The GC virtual address of the 32-bit page table. */
347 GCPTRTYPE(PVBOXPT) pPTGC;
348 /** The GC virtual address of the two PAE page table. */
349 GCPTRTYPE(PX86PTPAE) paPaePTsGC;
350 /** The GC virtual address of the 32-bit page table. */
351 R0PTRTYPE(PVBOXPT) pPTR0;
352 /** The GC virtual address of the two PAE page table. */
353 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
354 } aPTs[1];
355} PGMMAPPING;
356/** Pointer to structure for tracking GC Mappings. */
357typedef struct PGMMAPPING *PPGMMAPPING;
358
359
360/**
361 * Physical page access handler structure.
362 *
363 * This is used to keep track of physical address ranges
364 * which are being monitored in some kind of way.
365 */
366typedef struct PGMPHYSHANDLER
367{
368 AVLROGCPHYSNODECORE Core;
369 /** Alignment padding. */
370 uint32_t u32Padding;
371 /** Access type. */
372 PGMPHYSHANDLERTYPE enmType;
373 /** Number of pages to update. */
374 uint32_t cPages;
375 /** Pointer to R3 callback function. */
376 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
377 /** User argument for R3 handlers. */
378 R3PTRTYPE(void *) pvUserR3;
379 /** Pointer to R0 callback function. */
380 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
381 /** User argument for R0 handlers. */
382 R0PTRTYPE(void *) pvUserR0;
383 /** Pointer to GC callback function. */
384 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC;
385 /** User argument for GC handlers. */
386 GCPTRTYPE(void *) pvUserGC;
387 /** Description / Name. For easing debugging. */
388 R3PTRTYPE(const char *) pszDesc;
389#ifdef VBOX_WITH_STATISTICS
390 /** Profiling of this handler. */
391 STAMPROFILE Stat;
392#endif
393} PGMPHYSHANDLER;
394/** Pointer to a physical page access handler structure. */
395typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
396
397
398/**
399 * Cache node for the physical addresses covered by a virtual handler.
400 */
401typedef struct PGMPHYS2VIRTHANDLER
402{
403 /** Core node for the tree based on physical ranges. */
404 AVLROGCPHYSNODECORE Core;
405 /** Offset from this struct to the PGMVIRTHANDLER structure. */
406 RTGCINTPTR offVirtHandler;
407 /** Offset of the next alias relativer to this one.
408 * Bit 0 is used for indicating whether we're in the tree.
409 * Bit 1 is used for indicating that we're the head node.
410 */
411 int32_t offNextAlias;
412} PGMPHYS2VIRTHANDLER;
413/** Pointer to a phys to virtual handler structure. */
414typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
415
416/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
417 * node is in the tree. */
418#define PGMPHYS2VIRTHANDLER_IN_TREE BIT(0)
419/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
420 * node is in the head of an alias chain.
421 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
422#define PGMPHYS2VIRTHANDLER_IS_HEAD BIT(1)
423/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
424#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
425
426
427/**
428 * Virtual page access handler structure.
429 *
430 * This is used to keep track of virtual address ranges
431 * which are being monitored in some kind of way.
432 */
433typedef struct PGMVIRTHANDLER
434{
435 /** Core node for the tree based on virtual ranges. */
436 AVLROGCPTRNODECORE Core;
437 /** Number of cache pages. */
438 uint32_t u32Padding;
439 /** Access type. */
440 PGMVIRTHANDLERTYPE enmType;
441 /** Number of cache pages. */
442 uint32_t cPages;
443
444/** @todo The next two members are redundant. It adds some readability though. */
445 /** Start of the range. */
446 RTGCPTR GCPtr;
447 /** End of the range (exclusive). */
448 RTGCPTR GCPtrLast;
449 /** Size of the range (in bytes). */
450 RTGCUINTPTR cb;
451 /** Pointer to the GC callback function. */
452 GCPTRTYPE(PFNPGMGCVIRTHANDLER) pfnHandlerGC;
453 /** Pointer to the HC callback function for invalidation. */
454 HCPTRTYPE(PFNPGMHCVIRTINVALIDATE) pfnInvalidateHC;
455 /** Pointer to the HC callback function. */
456 HCPTRTYPE(PFNPGMHCVIRTHANDLER) pfnHandlerHC;
457 /** Description / Name. For easing debugging. */
458 HCPTRTYPE(const char *) pszDesc;
459#ifdef VBOX_WITH_STATISTICS
460 /** Profiling of this handler. */
461 STAMPROFILE Stat;
462#endif
463 /** Array of cached physical addresses for the monitored ranged. */
464 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
465} PGMVIRTHANDLER;
466/** Pointer to a virtual page access handler structure. */
467typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
468
469
470/**
471 * A Physical Guest Page tracking structure.
472 *
473 * The format of this structure is complicated because we have to fit a lot
474 * of information into as few bits as possible. The format is also subject
475 * to change (there is one comming up soon). Which means that for we'll be
476 * using PGM_PAGE_GET_* and PGM_PAGE_SET_* macros for all accessess to the
477 * structure.
478 */
479typedef struct PGMPAGE
480{
481 /** The physical address and a whole lot of other stuff. All bits are used! */
482 RTHCPHYS HCPhys;
483 uint32_t u32A;
484 uint32_t u32B;
485} PGMPAGE;
486/** Pointer to a physical guest page. */
487typedef PGMPAGE *PPGMPAGE;
488/** Pointer to a const physical guest page. */
489typedef const PGMPAGE *PCPGMPAGE;
490/** Pointer to a physical guest page pointer. */
491typedef PPGMPAGE *PPPGMPAGE;
492
493
494/**
495 * Gets the host physical address of the guest page.
496 * @returns host physical address (RTHCPHYS).
497 * @param pPage Pointer to the physical guest page tracking structure.
498 */
499#define PGM_PAGE_GET_HCPHYS(pPage) ( (pPage)->HCPhys & UINT64_C(0x0000fffffffff000) )
500
501/**
502 * Checks if the page is 'reserved'.
503 * @returns true/false.
504 * @param pPage Pointer to the physical guest page tracking structure.
505 */
506#define PGM_PAGE_IS_RESERVED(pPage) ( !!((pPage)->HCPhys & MM_RAM_FLAGS_RESERVED) )
507
508
509/**
510 * Ram range for GC Phys to HC Phys conversion.
511 *
512 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
513 * conversions too, but we'll let MM handle that for now.
514 *
515 * This structure is used by linked lists in both GC and HC.
516 */
517typedef struct PGMRAMRANGE
518{
519 /** Pointer to the next RAM range - for HC. */
520 HCPTRTYPE(struct PGMRAMRANGE *) pNextHC;
521 /** Pointer to the next RAM range - for GC. */
522 GCPTRTYPE(struct PGMRAMRANGE *) pNextGC;
523 /** Start of the range. Page aligned. */
524 RTGCPHYS GCPhys;
525 /** Last address in the range (inclusive). Page aligned (-1). */
526 RTGCPHYS GCPhysLast;
527 /** Size of the range. (Page aligned of course). */
528 RTGCPHYS cb;
529 /** MM_RAM_* flags */
530 uint32_t fFlags;
531
532 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
533 GCPTRTYPE(PRTHCPTR) pavHCChunkGC;
534 /** HC virtual lookup ranges for chunks. Currently only used with MM_RAM_FLAGS_DYNAMIC_ALLOC ranges. */
535 HCPTRTYPE(PRTHCPTR) pavHCChunkHC;
536
537 /** Start of the HC mapping of the range.
538 * For pure MMIO and dynamically allocated ranges this is NULL, while for all ranges this is a valid pointer. */
539 HCPTRTYPE(void *) pvHC;
540
541 /** Array of physical guest page tracking structures. */
542 PGMPAGE aPages[1];
543} PGMRAMRANGE;
544/** Pointer to Ram range for GC Phys to HC Phys conversion. */
545typedef PGMRAMRANGE *PPGMRAMRANGE;
546
547/** Return hc ptr corresponding to the ram range and physical offset */
548#define PGMRAMRANGE_GETHCPTR(pRam, off) \
549 (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) ? (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[(off >> PGM_DYNAMIC_CHUNK_SHIFT)] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK)) \
550 : (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
551
552/** @todo r=bird: fix typename. */
553/**
554 * PGMPhysRead/Write cache entry
555 */
556typedef struct PGMPHYSCACHE_ENTRY
557{
558 /** HC pointer to physical page */
559 R3PTRTYPE(uint8_t *) pbHC;
560 /** GC Physical address for cache entry */
561 RTGCPHYS GCPhys;
562#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
563 RTGCPHYS u32Padding0; /**< alignment padding. */
564#endif
565} PGMPHYSCACHE_ENTRY;
566
567/**
568 * PGMPhysRead/Write cache to reduce REM memory access overhead
569 */
570typedef struct PGMPHYSCACHE
571{
572 /** Bitmap of valid cache entries */
573 uint64_t aEntries;
574 /** Cache entries */
575 PGMPHYSCACHE_ENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
576} PGMPHYSCACHE;
577
578
579/** @name PGM Pool Indexes.
580 * Aka. the unique shadow page identifier.
581 * @{ */
582/** NIL page pool IDX. */
583#define NIL_PGMPOOL_IDX 0
584/** The first normal index. */
585#define PGMPOOL_IDX_FIRST_SPECIAL 1
586/** Page directory (32-bit root). */
587#define PGMPOOL_IDX_PD 1
588/** The extended PAE page directory (2048 entries, works as root currently). */
589#define PGMPOOL_IDX_PAE_PD 2
590/** Page Directory Pointer Table (PAE root, not currently used). */
591#define PGMPOOL_IDX_PDPTR 3
592/** Page Map Level-4 (64-bit root). */
593#define PGMPOOL_IDX_PML4 4
594/** The first normal index. */
595#define PGMPOOL_IDX_FIRST 5
596/** The last valid index. (inclusive, 14 bits) */
597#define PGMPOOL_IDX_LAST 0x3fff
598/** @} */
599
600/** The NIL index for the parent chain. */
601#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
602
603/**
604 * Node in the chain linking a shadowed page to it's parent (user).
605 */
606#pragma pack(1)
607typedef struct PGMPOOLUSER
608{
609 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
610 uint16_t iNext;
611 /** The user page index. */
612 uint16_t iUser;
613 /** Index into the user table. */
614 uint16_t iUserTable;
615} PGMPOOLUSER, *PPGMPOOLUSER;
616typedef const PGMPOOLUSER *PCPGMPOOLUSER;
617#pragma pack()
618
619
620/** The NIL index for the phys ext chain. */
621#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
622
623/**
624 * Node in the chain of physical cross reference extents.
625 */
626#pragma pack(1)
627typedef struct PGMPOOLPHYSEXT
628{
629 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
630 uint16_t iNext;
631 /** The user page index. */
632 uint16_t aidx[3];
633} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
634typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
635#pragma pack()
636
637
638/**
639 * The kind of page that's being shadowed.
640 */
641typedef enum PGMPOOLKIND
642{
643 /** The virtual invalid 0 entry. */
644 PGMPOOLKIND_INVALID = 0,
645 /** The entry is free (=unused). */
646 PGMPOOLKIND_FREE,
647
648 /** Shw: 32-bit page table; Gst: no paging */
649 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
650 /** Shw: 32-bit page table; Gst: 32-bit page table. */
651 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
652 /** Shw: 32-bit page table; Gst: 4MB page. */
653 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
654 /** Shw: PAE page table; Gst: no paging */
655 PGMPOOLKIND_PAE_PT_FOR_PHYS,
656 /** Shw: PAE page table; Gst: 32-bit page table. */
657 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
658 /** Shw: PAE page table; Gst: Half of a 4MB page. */
659 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
660 /** Shw: PAE page table; Gst: PAE page table. */
661 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
662 /** Shw: PAE page table; Gst: 2MB page. */
663 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
664
665 /** Shw: PAE page directory; Gst: 32-bit page directory. */
666 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD,
667 /** Shw: PAE page directory; Gst: PAE page directory. */
668 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
669
670 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
671 PGMPOOLKIND_64BIT_PDPTR_FOR_64BIT_PDPTR,
672
673 /** Shw: Root 32-bit page directory. */
674 PGMPOOLKIND_ROOT_32BIT_PD,
675 /** Shw: Root PAE page directory */
676 PGMPOOLKIND_ROOT_PAE_PD,
677 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
678 PGMPOOLKIND_ROOT_PDPTR,
679 /** Shw: Root page map level-4 table. */
680 PGMPOOLKIND_ROOT_PML4,
681
682 /** The last valid entry. */
683 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_PML4
684} PGMPOOLKIND;
685
686
687/**
688 * The tracking data for a page in the pool.
689 */
690typedef struct PGMPOOLPAGE
691{
692 /** AVL node code with the (HC) physical address of this page. */
693 AVLOHCPHYSNODECORE Core;
694 /** Pointer to the HC mapping of the page. */
695 HCPTRTYPE(void *) pvPageHC;
696 /** The guest physical address. */
697 RTGCPHYS GCPhys;
698 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
699 uint8_t enmKind;
700 uint8_t bPadding;
701 /** The index of this page. */
702 uint16_t idx;
703 /** The next entry in the list this page currently resides in.
704 * It's either in the free list or in the GCPhys hash. */
705 uint16_t iNext;
706#ifdef PGMPOOL_WITH_USER_TRACKING
707 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
708 uint16_t iUserHead;
709 /** The number of present entries. */
710 uint16_t cPresent;
711 /** The first entry in the table which is present. */
712 uint16_t iFirstPresent;
713#endif
714#ifdef PGMPOOL_WITH_MONITORING
715 /** The number of modifications to the monitored page. */
716 uint16_t cModifications;
717 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
718 uint16_t iModifiedNext;
719 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
720 uint16_t iModifiedPrev;
721 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
722 uint16_t iMonitoredNext;
723 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
724 uint16_t iMonitoredPrev;
725#endif
726#ifdef PGMPOOL_WITH_CACHE
727 /** The next page in the age list. */
728 uint16_t iAgeNext;
729 /** The previous page in the age list. */
730 uint16_t iAgePrev;
731#endif /* PGMPOOL_WITH_CACHE */
732 /** Used to indicate that the page is zeroed. */
733 bool fZeroed;
734 /** Used to indicate that a PT has non-global entries. */
735 bool fSeenNonGlobal;
736 /** Used to indicate that we're monitoring writes to the guest page. */
737 bool fMonitored;
738 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
739 * (All pages are in the age list.) */
740 bool fCached;
741 /** This is used by the R3 access handlers when invoked by an async thread.
742 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
743 bool volatile fReusedFlushPending;
744 /** Used to indicate that the guest is mapping the page is also used as a CR3.
745 * In these cases the access handler acts differently and will check
746 * for mapping conflicts like the normal CR3 handler.
747 * @todo When we change the CR3 shadowing to use pool pages, this flag can be
748 * replaced by a list of pages which share access handler.
749 */
750 bool fCR3Mix;
751#if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64
752 bool Alignment[4]; /**< Align the structure size on a 64-bit boundrary. */
753#endif
754} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
755
756
757#ifdef PGMPOOL_WITH_CACHE
758/** The hash table size. */
759# define PGMPOOL_HASH_SIZE 0x40
760/** The hash function. */
761# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
762#endif
763
764
765/**
766 * The shadow page pool instance data.
767 *
768 * It's all one big allocation made at init time, except for the
769 * pages that is. The user nodes follows immediatly after the
770 * page structures.
771 */
772typedef struct PGMPOOL
773{
774 /** The VM handle - HC Ptr. */
775 HCPTRTYPE(PVM) pVMHC;
776 /** The VM handle - GC Ptr. */
777 GCPTRTYPE(PVM) pVMGC;
778 /** The max pool size. This includes the special IDs. */
779 uint16_t cMaxPages;
780 /** The current pool size. */
781 uint16_t cCurPages;
782 /** The head of the free page list. */
783 uint16_t iFreeHead;
784 /* Padding. */
785 uint16_t u16Padding;
786#ifdef PGMPOOL_WITH_USER_TRACKING
787 /** Head of the chain of free user nodes. */
788 uint16_t iUserFreeHead;
789 /** The number of user nodes we've allocated. */
790 uint16_t cMaxUsers;
791 /** The number of present page table entries in the entire pool. */
792 uint32_t cPresent;
793 /** Pointer to the array of user nodes - GC pointer. */
794 GCPTRTYPE(PPGMPOOLUSER) paUsersGC;
795 /** Pointer to the array of user nodes - HC pointer. */
796 HCPTRTYPE(PPGMPOOLUSER) paUsersHC;
797#endif /* PGMPOOL_WITH_USER_TRACKING */
798#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
799 /** Head of the chain of free phys ext nodes. */
800 uint16_t iPhysExtFreeHead;
801 /** The number of user nodes we've allocated. */
802 uint16_t cMaxPhysExts;
803 /** Pointer to the array of physical xref extent - GC pointer. */
804 GCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsGC;
805 /** Pointer to the array of physical xref extent nodes - HC pointer. */
806 HCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsHC;
807#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
808#ifdef PGMPOOL_WITH_CACHE
809 /** Hash table for GCPhys addresses. */
810 uint16_t aiHash[PGMPOOL_HASH_SIZE];
811 /** The head of the age list. */
812 uint16_t iAgeHead;
813 /** The tail of the age list. */
814 uint16_t iAgeTail;
815 /** Set if the cache is enabled. */
816 bool fCacheEnabled;
817#endif /* PGMPOOL_WITH_CACHE */
818#ifdef PGMPOOL_WITH_MONITORING
819 /** Head of the list of modified pages. */
820 uint16_t iModifiedHead;
821 /** The current number of modified pages. */
822 uint16_t cModifiedPages;
823 /** Access handler, GC. */
824 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnAccessHandlerGC;
825 /** Access handler, R0. */
826 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
827 /** Access handler, R3. */
828 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
829 /** The access handler description (HC ptr). */
830 R3PTRTYPE(const char *) pszAccessHandler;
831#endif /* PGMPOOL_WITH_MONITORING */
832 /** The number of pages currently in use. */
833 uint16_t cUsedPages;
834#ifdef VBOX_WITH_STATISTICS
835 /** The high wather mark for cUsedPages. */
836 uint16_t cUsedPagesHigh;
837 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
838 /** Profiling pgmPoolAlloc(). */
839 STAMPROFILEADV StatAlloc;
840 /** Profiling pgmPoolClearAll(). */
841 STAMPROFILE StatClearAll;
842 /** Profiling pgmPoolFlushAllInt(). */
843 STAMPROFILE StatFlushAllInt;
844 /** Profiling pgmPoolFlushPage(). */
845 STAMPROFILE StatFlushPage;
846 /** Profiling pgmPoolFree(). */
847 STAMPROFILE StatFree;
848 /** Profiling time spent zeroing pages. */
849 STAMPROFILE StatZeroPage;
850# ifdef PGMPOOL_WITH_USER_TRACKING
851 /** Profiling of pgmPoolTrackDeref. */
852 STAMPROFILE StatTrackDeref;
853 /** Profiling pgmTrackFlushGCPhysPT. */
854 STAMPROFILE StatTrackFlushGCPhysPT;
855 /** Profiling pgmTrackFlushGCPhysPTs. */
856 STAMPROFILE StatTrackFlushGCPhysPTs;
857 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
858 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
859 /** Number of times we've been out of user records. */
860 STAMCOUNTER StatTrackFreeUpOneUser;
861# endif
862# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
863 /** Profiling deref activity related tracking GC physical pages. */
864 STAMPROFILE StatTrackDerefGCPhys;
865 /** Number of linear searches for a HCPhys in the ram ranges. */
866 STAMCOUNTER StatTrackLinearRamSearches;
867 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
868 STAMCOUNTER StamTrackPhysExtAllocFailures;
869# endif
870# ifdef PGMPOOL_WITH_MONITORING
871 /** Profiling the GC PT access handler. */
872 STAMPROFILE StatMonitorGC;
873 /** Times we've failed interpreting the instruction. */
874 STAMCOUNTER StatMonitorGCEmulateInstr;
875 /** Profiling the pgmPoolFlushPage calls made from the GC PT access handler. */
876 STAMPROFILE StatMonitorGCFlushPage;
877 /** Times we've detected fork(). */
878 STAMCOUNTER StatMonitorGCFork;
879 /** Profiling the GC access we've handled (except REP STOSD). */
880 STAMPROFILE StatMonitorGCHandled;
881 /** Times we've failed interpreting a patch code instruction. */
882 STAMCOUNTER StatMonitorGCIntrFailPatch1;
883 /** Times we've failed interpreting a patch code instruction during flushing. */
884 STAMCOUNTER StatMonitorGCIntrFailPatch2;
885 /** The number of times we've seen rep prefixes we can't handle. */
886 STAMCOUNTER StatMonitorGCRepPrefix;
887 /** Profiling the REP STOSD cases we've handled. */
888 STAMPROFILE StatMonitorGCRepStosd;
889
890 /** Profiling the HC PT access handler. */
891 STAMPROFILE StatMonitorHC;
892 /** Times we've failed interpreting the instruction. */
893 STAMCOUNTER StatMonitorHCEmulateInstr;
894 /** Profiling the pgmPoolFlushPage calls made from the HC PT access handler. */
895 STAMPROFILE StatMonitorHCFlushPage;
896 /** Times we've detected fork(). */
897 STAMCOUNTER StatMonitorHCFork;
898 /** Profiling the HC access we've handled (except REP STOSD). */
899 STAMPROFILE StatMonitorHCHandled;
900 /** The number of times we've seen rep prefixes we can't handle. */
901 STAMCOUNTER StatMonitorHCRepPrefix;
902 /** Profiling the REP STOSD cases we've handled. */
903 STAMPROFILE StatMonitorHCRepStosd;
904 /** The number of times we're called in an async thread an need to flush. */
905 STAMCOUNTER StatMonitorHCAsync;
906 /** The high wather mark for cModifiedPages. */
907 uint16_t cModifiedPagesHigh;
908 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
909# endif
910# ifdef PGMPOOL_WITH_CACHE
911 /** The number of cache hits. */
912 STAMCOUNTER StatCacheHits;
913 /** The number of cache misses. */
914 STAMCOUNTER StatCacheMisses;
915 /** The number of times we've got a conflict of 'kind' in the cache. */
916 STAMCOUNTER StatCacheKindMismatches;
917 /** Number of times we've been out of pages. */
918 STAMCOUNTER StatCacheFreeUpOne;
919 /** The number of cacheable allocations. */
920 STAMCOUNTER StatCacheCacheable;
921 /** The number of uncacheable allocations. */
922 STAMCOUNTER StatCacheUncacheable;
923# endif
924#elif HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
925 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
926#endif
927 /** The AVL tree for looking up a page by its HC physical address. */
928 AVLOHCPHYSTREE HCPhysTree;
929 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
930 /** Array of pages. (cMaxPages in length)
931 * The Id is the index into thist array.
932 */
933 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
934} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
935
936
937/** @def PGMPOOL_PAGE_2_PTR
938 * Maps a pool page pool into the current context.
939 *
940 * @returns VBox status code.
941 * @param pVM The VM handle.
942 * @param pPage The pool page.
943 *
944 * @remark In HC this uses PGMGCDynMapHCPage(), so it will consume of the
945 * small page window employeed by that function. Be careful.
946 * @remark There is no need to assert on the result.
947 */
948#ifdef IN_GC
949# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmGCPoolMapPage((pVM), (pPage))
950#else
951# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageHC)
952#endif
953
954
955/**
956 * Trees are using self relative offsets as pointers.
957 * So, all its data, including the root pointer, must be in the heap for HC and GC
958 * to have the same layout.
959 */
960typedef struct PGMTREES
961{
962 /** Physical access handlers (AVL range+offsetptr tree). */
963 AVLROGCPHYSTREE PhysHandlers;
964 /** Virtual access handlers (AVL range + GC ptr tree). */
965 AVLROGCPTRTREE VirtHandlers;
966 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
967 AVLROGCPHYSTREE PhysToVirtHandlers;
968 uint32_t auPadding[1];
969} PGMTREES;
970/** Pointer to PGM trees. */
971typedef PGMTREES *PPGMTREES;
972
973
974/** @name Paging mode macros
975 * @{ */
976#ifdef IN_GC
977# define PGM_CTX(a,b) a##GC##b
978# define PGM_CTX_STR(a,b) a "GC" b
979# define PGM_CTX_DECL(type) PGMGCDECL(type)
980#else
981# ifdef IN_RING3
982# define PGM_CTX(a,b) a##R3##b
983# define PGM_CTX_STR(a,b) a "R3" b
984# define PGM_CTX_DECL(type) DECLCALLBACK(type)
985# else
986# define PGM_CTX(a,b) a##R0##b
987# define PGM_CTX_STR(a,b) a "R0" b
988# define PGM_CTX_DECL(type) PGMDECL(type)
989# endif
990#endif
991
992#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
993#define PGM_GST_NAME_GC_REAL_STR(name) "pgmGCGstReal" #name
994#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
995#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
996#define PGM_GST_NAME_GC_PROT_STR(name) "pgmGCGstProt" #name
997#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
998#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
999#define PGM_GST_NAME_GC_32BIT_STR(name) "pgmGCGst32Bit" #name
1000#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
1001#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
1002#define PGM_GST_NAME_GC_PAE_STR(name) "pgmGCGstPAE" #name
1003#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
1004#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
1005#define PGM_GST_NAME_GC_AMD64_STR(name) "pgmGCGstAMD64" #name
1006#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
1007#define PGM_GST_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Gst##name))
1008#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
1009
1010#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
1011#define PGM_SHW_NAME_GC_32BIT_STR(name) "pgmGCShw32Bit" #name
1012#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
1013#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
1014#define PGM_SHW_NAME_GC_PAE_STR(name) "pgmGCShwPAE" #name
1015#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
1016#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
1017#define PGM_SHW_NAME_GC_AMD64_STR(name) "pgmGCShwAMD64" #name
1018#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
1019#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
1020#define PGM_SHW_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Shw##name))
1021
1022/* Shw_Gst */
1023#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
1024#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
1025#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
1026#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
1027#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
1028#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
1029#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
1030#define PGM_BTH_NAME_AMD64_REAL(name) PGM_CTX(pgm,BthAMD64Real##name)
1031#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
1032#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
1033#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name) "pgmGCBth32BitReal" #name
1034#define PGM_BTH_NAME_GC_32BIT_PROT_STR(name) "pgmGCBth32BitProt" #name
1035#define PGM_BTH_NAME_GC_32BIT_32BIT_STR(name) "pgmGCBth32Bit32Bit" #name
1036#define PGM_BTH_NAME_GC_PAE_REAL_STR(name) "pgmGCBthPAEReal" #name
1037#define PGM_BTH_NAME_GC_PAE_PROT_STR(name) "pgmGCBthPAEProt" #name
1038#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name) "pgmGCBthPAE32Bit" #name
1039#define PGM_BTH_NAME_GC_PAE_PAE_STR(name) "pgmGCBthPAEPAE" #name
1040#define PGM_BTH_NAME_GC_AMD64_REAL_STR(name) "pgmGCBthAMD64Real" #name
1041#define PGM_BTH_NAME_GC_AMD64_PROT_STR(name) "pgmGCBthAMD64Prot" #name
1042#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name) "pgmGCBthAMD64AMD64" #name
1043#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
1044#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
1045#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
1046#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
1047#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
1048#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
1049#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
1050#define PGM_BTH_NAME_R0_AMD64_REAL_STR(name) "pgmR0BthAMD64Real" #name
1051#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
1052#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
1053#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
1054#define PGM_BTH_PFN(name, pVM) ((pVM)->pgm.s.PGM_CTX(pfn,Bth##name))
1055/** @} */
1056
1057/**
1058 * Data for each paging mode.
1059 */
1060typedef struct PGMMODEDATA
1061{
1062 /** The guest mode type. */
1063 uint32_t uGstType;
1064 /** The shadow mode type. */
1065 uint32_t uShwType;
1066
1067 /** @name Function pointers for Shadow paging.
1068 * @{
1069 */
1070 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1071 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1072 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1073 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1074 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1075 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1076 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1077
1078 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1079 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1080 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1081 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1082 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1083
1084 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1085 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1086 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1087 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1088 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1089 /** @} */
1090
1091 /** @name Function pointers for Guest paging.
1092 * @{
1093 */
1094 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1095 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1096 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1097 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1098 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1099 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1100 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1101 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1102 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1103 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1104 R3PTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1105
1106 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1107 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1108 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1109 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1110 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1111 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1112 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1113 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1114
1115 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1116 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1117 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1118 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1119 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1120 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1121 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1122 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1123 /** @} */
1124
1125 /** @name Function pointers for Both Shadow and Guest paging.
1126 * @{
1127 */
1128 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1129 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1130 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1131 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1132 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1133 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1134 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1135#ifdef VBOX_STRICT
1136 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1137#endif
1138
1139 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1140 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1141 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1142 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1143 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1144 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1145#ifdef VBOX_STRICT
1146 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1147#endif
1148
1149 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1150 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1151 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1152 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1153 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1154 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1155#ifdef VBOX_STRICT
1156 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1157#endif
1158 /** @} */
1159} PGMMODEDATA, *PPGMMODEDATA;
1160
1161
1162
1163/**
1164 * Converts a PGM pointer into a VM pointer.
1165 * @returns Pointer to the VM structure the PGM is part of.
1166 * @param pPGM Pointer to PGM instance data.
1167 */
1168#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
1169
1170/**
1171 * PGM Data (part of VM)
1172 */
1173typedef struct PGM
1174{
1175 /** Offset to the VM structure. */
1176 RTINT offVM;
1177
1178 /*
1179 * This will be redefined at least two more times before we're done, I'm sure.
1180 * The current code is only to get on with the coding.
1181 * - 2004-06-10: initial version, bird.
1182 * - 2004-07-02: 1st time, bird.
1183 * - 2004-10-18: 2nd time, bird.
1184 * - 2005-07-xx: 3rd time, bird.
1185 */
1186
1187 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1188 GCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
1189 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
1190 GCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
1191
1192 /** The host paging mode. (This is what SUPLib reports.) */
1193 SUPPAGINGMODE enmHostMode;
1194 /** The shadow paging mode. */
1195 PGMMODE enmShadowMode;
1196 /** The guest paging mode. */
1197 PGMMODE enmGuestMode;
1198
1199 /** The current physical address representing in the guest CR3 register. */
1200 RTGCPHYS GCPhysCR3;
1201 /** Pointer to the 5 page CR3 content mapping.
1202 * The first page is always the CR3 (in some form) while the 4 other pages
1203 * are used of the PDs in PAE mode. */
1204 RTGCPTR GCPtrCR3Mapping;
1205 /** The physical address of the currently monitored guest CR3 page.
1206 * When this value is NIL_RTGCPHYS no page is being monitored. */
1207 RTGCPHYS GCPhysGstCR3Monitored;
1208#if HC_ARCH_BITS == 64 || GC_ARCH_BITS == 64
1209 RTGCPHYS GCPhysPadding0; /**< alignment padding. */
1210#endif
1211
1212 /** @name 32-bit Guest Paging.
1213 * @{ */
1214 /** The guest's page directory, HC pointer. */
1215 HCPTRTYPE(PVBOXPD) pGuestPDHC;
1216 /** The guest's page directory, static GC mapping. */
1217 GCPTRTYPE(PVBOXPD) pGuestPDGC;
1218 /** @} */
1219
1220 /** @name PAE Guest Paging.
1221 * @{ */
1222 /** The guest's page directory pointer table, static GC mapping. */
1223 GCPTRTYPE(PX86PDPTR) pGstPaePDPTRGC;
1224 /** The guest's page directory pointer table, HC pointer. */
1225 HCPTRTYPE(PX86PDPTR) pGstPaePDPTRHC;
1226 /** The guest's page directories, HC pointers.
1227 * These are individual pointers and doesn't have to be adjecent.
1228 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1229 HCPTRTYPE(PX86PDPAE) apGstPaePDsHC[4];
1230 /** The guest's page directories, static GC mapping.
1231 * Unlike the HC array the first entry can be accessed as a 2048 entry PD.
1232 * These doesn't have to be update to date - use pgmGstGetPaePD() to access them. */
1233 GCPTRTYPE(PX86PDPAE) apGstPaePDsGC[4];
1234 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
1235 RTGCPHYS aGCPhysGstPaePDs[4];
1236 /** The physical addresses of the monitored guest page directories (PAE). */
1237 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
1238 /** @} */
1239
1240
1241 /** @name 32-bit Shadow Paging
1242 * @{ */
1243 /** The 32-Bit PD - HC Ptr. */
1244 HCPTRTYPE(PX86PD) pHC32BitPD;
1245 /** The 32-Bit PD - GC Ptr. */
1246 GCPTRTYPE(PX86PD) pGC32BitPD;
1247#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1248 uint32_t u32Padding1; /**< alignment padding. */
1249#endif
1250 /** The Physical Address (HC) of the 32-Bit PD. */
1251 RTHCPHYS HCPhys32BitPD;
1252 /** @} */
1253
1254 /** @name PAE Shadow Paging
1255 * @{ */
1256 /** The four PDs for the low 4GB - HC Ptr.
1257 * Even though these are 4 pointers, what they point at is a single table.
1258 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
1259 HCPTRTYPE(PX86PDPAE) apHCPaePDs[4];
1260 /** The four PDs for the low 4GB - GC Ptr.
1261 * Same kind of mapping as apHCPaePDs. */
1262 GCPTRTYPE(PX86PDPAE) apGCPaePDs[4];
1263 /** The Physical Address (HC) of the four PDs for the low 4GB.
1264 * These are *NOT* 4 contiguous pages. */
1265 RTHCPHYS aHCPhysPaePDs[4];
1266 /** The PAE PDPTR - HC Ptr. */
1267 HCPTRTYPE(PX86PDPTR) pHCPaePDPTR;
1268 /** The Physical Address (HC) of the PAE PDPTR. */
1269 RTHCPHYS HCPhysPaePDPTR;
1270 /** The PAE PDPTR - GC Ptr. */
1271 GCPTRTYPE(PX86PDPTR) pGCPaePDPTR;
1272 /** @} */
1273
1274 /** @name AMD64 Shadow Paging
1275 * Extends PAE Paging.
1276 * @{ */
1277 /** The Page Map Level 4 table - HC Ptr. */
1278 GCPTRTYPE(PX86PML4) pGCPaePML4;
1279 /** The Page Map Level 4 table - GC Ptr. */
1280 HCPTRTYPE(PX86PML4) pHCPaePML4;
1281 /** The Physical Address (HC) of the Page Map Level 4 table. */
1282 RTHCPHYS HCPhysPaePML4;
1283 /** @}*/
1284
1285 /** @name Function pointers for Shadow paging.
1286 * @{
1287 */
1288 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1289 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVM pVM));
1290 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1291 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1292 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1293 DECLR3CALLBACKMEMBER(int, pfnR3ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1294 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1295
1296 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1297 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1298 DECLGCCALLBACKMEMBER(int, pfnGCShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1299 DECLGCCALLBACKMEMBER(int, pfnGCShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1300 DECLGCCALLBACKMEMBER(int, pfnGCShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1301#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
1302 RTGCPTR alignment0; /**< structure size alignment. */
1303#endif
1304
1305 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
1306 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1307 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPDEByIndex,(PVM pVM, uint32_t iPD, PX86PDEPAE pPde));
1308 DECLR0CALLBACKMEMBER(int, pfnR0ShwSetPDEByIndex,(PVM pVM, uint32_t iPD, X86PDEPAE Pde));
1309 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPDEByIndex,(PVM pVM, uint32_t iPD, uint64_t fFlags, uint64_t fMask));
1310
1311 /** @} */
1312
1313 /** @name Function pointers for Guest paging.
1314 * @{
1315 */
1316 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1317 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVM pVM));
1318 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1319 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1320 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1321 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1322 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));
1323 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1324 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM));
1325 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHCGstWriteHandlerCR3;
1326 R3PTRTYPE(const char *) pszHCGstWriteHandlerCR3;
1327
1328 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1329 DECLGCCALLBACKMEMBER(int, pfnGCGstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1330 DECLGCCALLBACKMEMBER(int, pfnGCGstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1331 DECLGCCALLBACKMEMBER(int, pfnGCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1332 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmonitorCR3,(PVM pVM));
1333 DECLGCCALLBACKMEMBER(int, pfnGCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1334 DECLGCCALLBACKMEMBER(int, pfnGCGstUnmapCR3,(PVM pVM));
1335 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnGCGstWriteHandlerCR3;
1336
1337 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
1338 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCUINTPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
1339 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPde));
1340 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1341 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));
1342 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
1343 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM));
1344 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnR0GstWriteHandlerCR3;
1345 /** @} */
1346
1347 /** @name Function pointers for Both Shadow and Guest paging.
1348 * @{
1349 */
1350 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVM pVM, RTGCUINTPTR offDelta));
1351 DECLR3CALLBACKMEMBER(int, pfnR3BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1352 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1353 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1354 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1355 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1356 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1357 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1358
1359 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1360 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1361 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1362 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1363 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1364 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1365 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1366
1367 DECLGCCALLBACKMEMBER(int, pfnGCBthTrap0eHandler,(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault));
1368 DECLGCCALLBACKMEMBER(int, pfnGCBthInvalidatePage,(PVM pVM, RTGCPTR GCPtrPage));
1369 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncCR3,(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal));
1370 DECLGCCALLBACKMEMBER(int, pfnGCBthSyncPage,(PVM pVM, VBOXPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uError));
1371 DECLGCCALLBACKMEMBER(int, pfnGCBthPrefetchPage,(PVM pVM, RTGCUINTPTR GCPtrPage));
1372 DECLGCCALLBACKMEMBER(int, pfnGCBthVerifyAccessSyncPage,(PVM pVM, RTGCUINTPTR GCPtrPage, unsigned fFlags, unsigned uError));
1373 DECLGCCALLBACKMEMBER(unsigned, pfnGCBthAssertCR3,(PVM pVM, uint32_t cr3, uint32_t cr4, RTGCUINTPTR GCPtr, RTGCUINTPTR cb));
1374#if GC_ARCH_BITS == 32 && HC_ARCH_BITS == 64
1375 RTGCPTR alignment2; /**< structure size alignment. */
1376#endif
1377 /** @} */
1378
1379 /** Pointer to SHW+GST mode data (function pointers).
1380 * The index into this table is made up from */
1381 R3PTRTYPE(PPGMMODEDATA) paModeData;
1382
1383
1384 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for HC.
1385 * This is sorted by physical address and contains no overlaps.
1386 * The memory locks and other conversions are managed by MM at the moment.
1387 */
1388 HCPTRTYPE(PPGMRAMRANGE) pRamRangesHC;
1389 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for GC.
1390 * This is sorted by physical address and contains no overlaps.
1391 * The memory locks and other conversions are managed by MM at the moment.
1392 */
1393 GCPTRTYPE(PPGMRAMRANGE) pRamRangesGC;
1394 /** The configured RAM size. */
1395 RTUINT cbRamSize;
1396
1397 /** PGM offset based trees - HC Ptr. */
1398 HCPTRTYPE(PPGMTREES) pTreesHC;
1399 /** PGM offset based trees - GC Ptr. */
1400 GCPTRTYPE(PPGMTREES) pTreesGC;
1401
1402 /** Linked list of GC mappings - for GC.
1403 * The list is sorted ascending on address.
1404 */
1405 GCPTRTYPE(PPGMMAPPING) pMappingsGC;
1406 /** Linked list of GC mappings - for HC.
1407 * The list is sorted ascending on address.
1408 */
1409 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
1410 /** Linked list of GC mappings - for R0.
1411 * The list is sorted ascending on address.
1412 */
1413 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
1414
1415 /** If set no conflict checks are required. (boolean) */
1416 bool fMappingsFixed;
1417 /** If set, then no mappings are put into the shadow page table. (boolean) */
1418 bool fDisableMappings;
1419 /** Size of fixed mapping */
1420 uint32_t cbMappingFixed;
1421 /** Base address (GC) of fixed mapping */
1422 RTGCPTR GCPtrMappingFixed;
1423#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1424 uint32_t u32Padding0; /**< alignment padding. */
1425#endif
1426
1427
1428 /** @name Intermediate Context
1429 * @{ */
1430 /** Pointer to the intermediate page directory - Normal. */
1431 HCPTRTYPE(PX86PD) pInterPD;
1432 /** Pointer to the intermedate page tables - Normal.
1433 * There are two page tables, one for the identity mapping and one for
1434 * the host context mapping (of the core code). */
1435 HCPTRTYPE(PX86PT) apInterPTs[2];
1436 /** Pointer to the intermedate page tables - PAE. */
1437 HCPTRTYPE(PX86PTPAE) apInterPaePTs[2];
1438 /** Pointer to the intermedate page directory - PAE. */
1439 HCPTRTYPE(PX86PDPAE) apInterPaePDs[4];
1440 /** Pointer to the intermedate page directory - PAE. */
1441 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR;
1442 /** Pointer to the intermedate page-map level 4 - AMD64. */
1443 HCPTRTYPE(PX86PML4) pInterPaePML4;
1444 /** Pointer to the intermedate page directory - AMD64. */
1445 HCPTRTYPE(PX86PDPTR) pInterPaePDPTR64;
1446 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
1447 RTHCPHYS HCPhysInterPD;
1448 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
1449 RTHCPHYS HCPhysInterPaePDPTR;
1450 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
1451 RTHCPHYS HCPhysInterPaePML4;
1452 /** @} */
1453
1454 /** Base address of the dynamic page mapping area.
1455 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
1456 */
1457 GCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
1458 /** The index of the last entry used in the dynamic page mapping area. */
1459 RTUINT iDynPageMapLast;
1460 /** Cache containing the last entries in the dynamic page mapping area.
1461 * The cache size is covering half of the mapping area. */
1462 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
1463
1464 /** A20 gate mask.
1465 * Our current approach to A20 emulation is to let REM do it and don't bother
1466 * anywhere else. The interesting Guests will be operating with it enabled anyway.
1467 * But whould need arrise, we'll subject physical addresses to this mask. */
1468 RTGCPHYS GCPhysA20Mask;
1469 /** A20 gate state - boolean! */
1470 RTUINT fA20Enabled;
1471
1472 /** What needs syncing (PGM_SYNC_*).
1473 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
1474 * PGMFlushTLB, and PGMR3Load. */
1475 RTUINT fSyncFlags;
1476
1477#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1478 RTUINT uPadding3; /**< alignment padding. */
1479#endif
1480 /** PGM critical section.
1481 * This protects the physical & virtual access handlers, ram ranges,
1482 * and the page flag updating (some of it anyway).
1483 */
1484 PDMCRITSECT CritSect;
1485
1486 /** Shadow Page Pool - HC Ptr. */
1487 HCPTRTYPE(PPGMPOOL) pPoolHC;
1488 /** Shadow Page Pool - GC Ptr. */
1489 GCPTRTYPE(PPGMPOOL) pPoolGC;
1490
1491 /** We're not in a state which permits writes to guest memory.
1492 * (Only used in strict builds.) */
1493 bool fNoMorePhysWrites;
1494
1495 /** Flush the cache on the next access. */
1496 bool fPhysCacheFlushPending;
1497/** @todo r=bird: Fix member names!*/
1498 /** PGMPhysRead cache */
1499 PGMPHYSCACHE pgmphysreadcache;
1500 /** PGMPhysWrite cache */
1501 PGMPHYSCACHE pgmphyswritecache;
1502
1503 /** @name Release Statistics
1504 * @{ */
1505 /** The number of times the guest has switched mode since last reset or statistics reset. */
1506 STAMCOUNTER cGuestModeChanges;
1507 /** @} */
1508
1509#ifdef VBOX_WITH_STATISTICS
1510 /** GC: Which statistic this \#PF should be attributed to. */
1511 GCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionGC;
1512 RTGCPTR padding0;
1513 /** HC: Which statistic this \#PF should be attributed to. */
1514 HCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionHC;
1515 RTHCPTR padding1;
1516 STAMPROFILE StatGCTrap0e; /**< GC: PGMGCTrap0eHandler() profiling. */
1517 STAMPROFILE StatTrap0eCSAM; /**< Profiling of the Trap0eHandler body when the cause is CSAM. */
1518 STAMPROFILE StatTrap0eDirtyAndAccessedBits; /**< Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
1519 STAMPROFILE StatTrap0eGuestTrap; /**< Profiling of the Trap0eHandler body when the cause is a guest trap. */
1520 STAMPROFILE StatTrap0eHndPhys; /**< Profiling of the Trap0eHandler body when the cause is a physical handler. */
1521 STAMPROFILE StatTrap0eHndVirt; /**< Profiling of the Trap0eHandler body when the cause is a virtual handler. */
1522 STAMPROFILE StatTrap0eHndUnhandled; /**< Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
1523 STAMPROFILE StatTrap0eMisc; /**< Profiling of the Trap0eHandler body when the cause is not known. */
1524 STAMPROFILE StatTrap0eOutOfSync; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
1525 STAMPROFILE StatTrap0eOutOfSyncHndPhys; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
1526 STAMPROFILE StatTrap0eOutOfSyncHndVirt; /**< Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
1527 STAMPROFILE StatTrap0eOutOfSyncObsHnd; /**< Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
1528 STAMPROFILE StatTrap0eSyncPT; /**< Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
1529
1530 STAMCOUNTER StatTrap0eMapHandler; /**< Number of traps due to access handlers in mappings. */
1531 STAMCOUNTER StatGCTrap0eConflicts; /**< GC: The number of times \#PF was caused by an undetected conflict. */
1532
1533 STAMCOUNTER StatGCTrap0eUSNotPresentRead;
1534 STAMCOUNTER StatGCTrap0eUSNotPresentWrite;
1535 STAMCOUNTER StatGCTrap0eUSWrite;
1536 STAMCOUNTER StatGCTrap0eUSReserved;
1537 STAMCOUNTER StatGCTrap0eUSRead;
1538
1539 STAMCOUNTER StatGCTrap0eSVNotPresentRead;
1540 STAMCOUNTER StatGCTrap0eSVNotPresentWrite;
1541 STAMCOUNTER StatGCTrap0eSVWrite;
1542 STAMCOUNTER StatGCTrap0eSVReserved;
1543
1544 STAMCOUNTER StatGCTrap0eUnhandled;
1545 STAMCOUNTER StatGCTrap0eMap;
1546
1547 /** GC: PGMSyncPT() profiling. */
1548 STAMPROFILE StatGCSyncPT;
1549 /** GC: The number of times PGMSyncPT() needed to allocate page tables. */
1550 STAMCOUNTER StatGCSyncPTAlloc;
1551 /** GC: The number of times PGMSyncPT() detected conflicts. */
1552 STAMCOUNTER StatGCSyncPTConflict;
1553 /** GC: The number of times PGMSyncPT() failed. */
1554 STAMCOUNTER StatGCSyncPTFailed;
1555 /** GC: PGMGCInvalidatePage() profiling. */
1556 STAMPROFILE StatGCInvalidatePage;
1557 /** GC: The number of times PGMGCInvalidatePage() was called for a 4KB page. */
1558 STAMCOUNTER StatGCInvalidatePage4KBPages;
1559 /** GC: The number of times PGMGCInvalidatePage() was called for a 4MB page. */
1560 STAMCOUNTER StatGCInvalidatePage4MBPages;
1561 /** GC: The number of times PGMGCInvalidatePage() skipped a 4MB page. */
1562 STAMCOUNTER StatGCInvalidatePage4MBPagesSkip;
1563 /** GC: The number of times PGMGCInvalidatePage() was called for a not accessed page directory. */
1564 STAMCOUNTER StatGCInvalidatePagePDNAs;
1565 /** GC: The number of times PGMGCInvalidatePage() was called for a not present page directory. */
1566 STAMCOUNTER StatGCInvalidatePagePDNPs;
1567 /** GC: The number of times PGMGCInvalidatePage() was called for a page directory containing mappings (no conflict). */
1568 STAMCOUNTER StatGCInvalidatePagePDMappings;
1569 /** GC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1570 STAMCOUNTER StatGCInvalidatePagePDOutOfSync;
1571 /** HC: The number of times PGMGCInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1572 STAMCOUNTER StatGCInvalidatePageSkipped;
1573 /** GC: The number of times user page is out of sync was detected in GC. */
1574 STAMCOUNTER StatGCPageOutOfSyncUser;
1575 /** GC: The number of times supervisor page is out of sync was detected in GC. */
1576 STAMCOUNTER StatGCPageOutOfSyncSupervisor;
1577 /** GC: The number of dynamic page mapping cache hits */
1578 STAMCOUNTER StatDynMapCacheMisses;
1579 /** GC: The number of dynamic page mapping cache misses */
1580 STAMCOUNTER StatDynMapCacheHits;
1581 /** GC: The number of times pgmGCGuestPDWriteHandler() was successfully called. */
1582 STAMCOUNTER StatGCGuestCR3WriteHandled;
1583 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and we had to fall back to the recompiler. */
1584 STAMCOUNTER StatGCGuestCR3WriteUnhandled;
1585 /** GC: The number of times pgmGCGuestPDWriteHandler() was called and a conflict was detected. */
1586 STAMCOUNTER StatGCGuestCR3WriteConflict;
1587 /** GC: Number of out-of-sync handled pages. */
1588 STAMCOUNTER StatHandlersOutOfSync;
1589 /** GC: Number of traps due to physical access handlers. */
1590 STAMCOUNTER StatHandlersPhysical;
1591 /** GC: Number of traps due to virtual access handlers. */
1592 STAMCOUNTER StatHandlersVirtual;
1593 /** GC: Number of traps due to virtual access handlers found by physical address. */
1594 STAMCOUNTER StatHandlersVirtualByPhys;
1595 /** GC: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
1596 STAMCOUNTER StatHandlersVirtualUnmarked;
1597 /** GC: Number of traps due to access outside range of monitored page(s). */
1598 STAMCOUNTER StatHandlersUnhandled;
1599
1600 /** GC: The number of times pgmGCGuestROMWriteHandler() was successfully called. */
1601 STAMCOUNTER StatGCGuestROMWriteHandled;
1602 /** GC: The number of times pgmGCGuestROMWriteHandler() was called and we had to fall back to the recompiler */
1603 STAMCOUNTER StatGCGuestROMWriteUnhandled;
1604
1605 /** HC: PGMR3InvalidatePage() profiling. */
1606 STAMPROFILE StatHCInvalidatePage;
1607 /** HC: The number of times PGMR3InvalidatePage() was called for a 4KB page. */
1608 STAMCOUNTER StatHCInvalidatePage4KBPages;
1609 /** HC: The number of times PGMR3InvalidatePage() was called for a 4MB page. */
1610 STAMCOUNTER StatHCInvalidatePage4MBPages;
1611 /** HC: The number of times PGMR3InvalidatePage() skipped a 4MB page. */
1612 STAMCOUNTER StatHCInvalidatePage4MBPagesSkip;
1613 /** HC: The number of times PGMR3InvalidatePage() was called for a not accessed page directory. */
1614 STAMCOUNTER StatHCInvalidatePagePDNAs;
1615 /** HC: The number of times PGMR3InvalidatePage() was called for a not present page directory. */
1616 STAMCOUNTER StatHCInvalidatePagePDNPs;
1617 /** HC: The number of times PGMR3InvalidatePage() was called for a page directory containing mappings (no conflict). */
1618 STAMCOUNTER StatHCInvalidatePagePDMappings;
1619 /** HC: The number of times PGMGCInvalidatePage() was called for an out of sync page directory. */
1620 STAMCOUNTER StatHCInvalidatePagePDOutOfSync;
1621 /** HC: The number of times PGMR3InvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
1622 STAMCOUNTER StatHCInvalidatePageSkipped;
1623 /** HC: PGMR3SyncPT() profiling. */
1624 STAMPROFILE StatHCSyncPT;
1625 /** HC: pgmr3SyncPTResolveConflict() profiling (includes the entire relocation). */
1626 STAMPROFILE StatHCResolveConflict;
1627 /** HC: Number of times PGMR3CheckMappingConflicts() detected a conflict. */
1628 STAMCOUNTER StatHCDetectedConflicts;
1629 /** HC: The total number of times pgmHCGuestPDWriteHandler() was called. */
1630 STAMCOUNTER StatHCGuestPDWrite;
1631 /** HC: The number of times pgmHCGuestPDWriteHandler() detected a conflict */
1632 STAMCOUNTER StatHCGuestPDWriteConflict;
1633
1634 /** HC: The number of pages marked not present for accessed bit emulation. */
1635 STAMCOUNTER StatHCAccessedPage;
1636 /** HC: The number of pages marked read-only for dirty bit tracking. */
1637 STAMCOUNTER StatHCDirtyPage;
1638 /** HC: The number of pages marked read-only for dirty bit tracking. */
1639 STAMCOUNTER StatHCDirtyPageBig;
1640 /** HC: The number of traps generated for dirty bit tracking. */
1641 STAMCOUNTER StatHCDirtyPageTrap;
1642 /** HC: The number of pages already dirty or readonly. */
1643 STAMCOUNTER StatHCDirtyPageSkipped;
1644
1645 /** GC: The number of pages marked not present for accessed bit emulation. */
1646 STAMCOUNTER StatGCAccessedPage;
1647 /** GC: The number of pages marked read-only for dirty bit tracking. */
1648 STAMCOUNTER StatGCDirtyPage;
1649 /** GC: The number of pages marked read-only for dirty bit tracking. */
1650 STAMCOUNTER StatGCDirtyPageBig;
1651 /** GC: The number of traps generated for dirty bit tracking. */
1652 STAMCOUNTER StatGCDirtyPageTrap;
1653 /** GC: The number of pages already dirty or readonly. */
1654 STAMCOUNTER StatGCDirtyPageSkipped;
1655 /** GC: The number of pages marked dirty because of write accesses. */
1656 STAMCOUNTER StatGCDirtiedPage;
1657 /** GC: The number of pages already marked dirty because of write accesses. */
1658 STAMCOUNTER StatGCPageAlreadyDirty;
1659 /** GC: The number of real pages faults during dirty bit tracking. */
1660 STAMCOUNTER StatGCDirtyTrackRealPF;
1661
1662 /** GC: Profiling of the PGMTrackDirtyBit() body */
1663 STAMPROFILE StatGCDirtyBitTracking;
1664 /** HC: Profiling of the PGMTrackDirtyBit() body */
1665 STAMPROFILE StatHCDirtyBitTracking;
1666
1667 /** GC: Profiling of the PGMGstModifyPage() body */
1668 STAMPROFILE StatGCGstModifyPage;
1669 /** HC: Profiling of the PGMGstModifyPage() body */
1670 STAMPROFILE StatHCGstModifyPage;
1671
1672 /** GC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1673 STAMCOUNTER StatGCSyncPagePDNAs;
1674 /** GC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1675 STAMCOUNTER StatGCSyncPagePDOutOfSync;
1676 /** HC: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
1677 STAMCOUNTER StatHCSyncPagePDNAs;
1678 /** HC: The number of time we've encountered an out-of-sync PD in SyncPage. */
1679 STAMCOUNTER StatHCSyncPagePDOutOfSync;
1680
1681 STAMCOUNTER StatSynPT4kGC;
1682 STAMCOUNTER StatSynPT4kHC;
1683 STAMCOUNTER StatSynPT4MGC;
1684 STAMCOUNTER StatSynPT4MHC;
1685
1686 /** Profiling of the PGMFlushTLB() body. */
1687 STAMPROFILE StatFlushTLB;
1688 /** The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
1689 STAMCOUNTER StatFlushTLBNewCR3;
1690 /** The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
1691 STAMCOUNTER StatFlushTLBNewCR3Global;
1692 /** The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
1693 STAMCOUNTER StatFlushTLBSameCR3;
1694 /** The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
1695 STAMCOUNTER StatFlushTLBSameCR3Global;
1696
1697 STAMPROFILE StatGCSyncCR3; /**< GC: PGMSyncCR3() profiling. */
1698 STAMPROFILE StatGCSyncCR3Handlers; /**< GC: Profiling of the PGMSyncCR3() update handler section. */
1699 STAMPROFILE StatGCSyncCR3HandlerVirtualReset; /**< GC: Profiling of the virtual handler resets. */
1700 STAMPROFILE StatGCSyncCR3HandlerVirtualUpdate; /**< GC: Profiling of the virtual handler updates. */
1701 STAMCOUNTER StatGCSyncCR3Global; /**< GC: The number of global CR3 syncs. */
1702 STAMCOUNTER StatGCSyncCR3NotGlobal; /**< GC: The number of non-global CR3 syncs. */
1703 STAMCOUNTER StatGCSyncCR3DstFreed; /**< GC: The number of times we've had to free a shadow entry. */
1704 STAMCOUNTER StatGCSyncCR3DstFreedSrcNP; /**< GC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1705 STAMCOUNTER StatGCSyncCR3DstNotPresent; /**< GC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1706 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPD; /**< GC: The number of times a global page directory wasn't flushed. */
1707 STAMCOUNTER StatGCSyncCR3DstSkippedGlobalPT; /**< GC: The number of times a page table with only global entries wasn't flushed. */
1708 STAMCOUNTER StatGCSyncCR3DstCacheHit; /**< GC: The number of times we got some kind of cache hit on a page table. */
1709
1710 STAMPROFILE StatHCSyncCR3; /**< HC: PGMSyncCR3() profiling. */
1711 STAMPROFILE StatHCSyncCR3Handlers; /**< HC: Profiling of the PGMSyncCR3() update handler section. */
1712 STAMPROFILE StatHCSyncCR3HandlerVirtualReset; /**< HC: Profiling of the virtual handler resets. */
1713 STAMPROFILE StatHCSyncCR3HandlerVirtualUpdate; /**< HC: Profiling of the virtual handler updates. */
1714 STAMCOUNTER StatHCSyncCR3Global; /**< HC: The number of global CR3 syncs. */
1715 STAMCOUNTER StatHCSyncCR3NotGlobal; /**< HC: The number of non-global CR3 syncs. */
1716 STAMCOUNTER StatHCSyncCR3DstFreed; /**< HC: The number of times we've had to free a shadow entry. */
1717 STAMCOUNTER StatHCSyncCR3DstFreedSrcNP; /**< HC: The number of times we've had to free a shadow entry for which the source entry was not present. */
1718 STAMCOUNTER StatHCSyncCR3DstNotPresent; /**< HC: The number of times we've encountered a not present shadow entry for a present guest entry. */
1719 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPD; /**< HC: The number of times a global page directory wasn't flushed. */
1720 STAMCOUNTER StatHCSyncCR3DstSkippedGlobalPT; /**< HC: The number of times a page table with only global entries wasn't flushed. */
1721 STAMCOUNTER StatHCSyncCR3DstCacheHit; /**< HC: The number of times we got some kind of cache hit on a page table. */
1722
1723 /** GC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1724 STAMPROFILE StatVirtHandleSearchByPhysGC;
1725 /** HC: Profiling of pgmHandlerVirtualFindByPhysAddr. */
1726 STAMPROFILE StatVirtHandleSearchByPhysHC;
1727 /** HC: The number of times PGMR3HandlerPhysicalReset is called. */
1728 STAMCOUNTER StatHandlePhysicalReset;
1729
1730 STAMPROFILE StatCheckPageFault;
1731 STAMPROFILE StatLazySyncPT;
1732 STAMPROFILE StatMapping;
1733 STAMPROFILE StatOutOfSync;
1734 STAMPROFILE StatHandlers;
1735 STAMPROFILE StatEIPHandlers;
1736 STAMPROFILE StatHCPrefetch;
1737
1738# ifdef PGMPOOL_WITH_GCPHYS_TRACKING
1739 /** The number of first time shadowings. */
1740 STAMCOUNTER StatTrackVirgin;
1741 /** The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
1742 STAMCOUNTER StatTrackAliased;
1743 /** The number of times we're tracking using cRef2. */
1744 STAMCOUNTER StatTrackAliasedMany;
1745 /** The number of times we're hitting pages which has overflowed cRef2. */
1746 STAMCOUNTER StatTrackAliasedLots;
1747 /** The number of times the extent list grows to long. */
1748 STAMCOUNTER StatTrackOverflows;
1749 /** Profiling of SyncPageWorkerTrackDeref (expensive). */
1750 STAMPROFILE StatTrackDeref;
1751# endif
1752
1753 /** Allocated mbs of guest ram */
1754 STAMCOUNTER StatDynRamTotal;
1755 /** Nr of pgmr3PhysGrowRange calls. */
1756 STAMCOUNTER StatDynRamGrow;
1757
1758 STAMCOUNTER StatGCTrap0ePD[X86_PG_ENTRIES];
1759 STAMCOUNTER StatGCSyncPtPD[X86_PG_ENTRIES];
1760 STAMCOUNTER StatGCSyncPagePD[X86_PG_ENTRIES];
1761#endif
1762} PGM, *PPGM;
1763
1764
1765/** @name PGM::fSyncFlags Flags
1766 * @{
1767 */
1768/** Updates the MM_RAM_FLAGS_VIRTUAL_HANDLER page bit. */
1769#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL BIT(0)
1770/** Always sync CR3. */
1771#define PGM_SYNC_ALWAYS BIT(1)
1772/** Check monitoring on next CR3 (re)load and invalidate page. */
1773#define PGM_SYNC_MONITOR_CR3 BIT(2)
1774/** Clear the page pool (a light weight flush). */
1775#define PGM_SYNC_CLEAR_PGM_POOL BIT(8)
1776/** @} */
1777
1778
1779__BEGIN_DECLS
1780
1781PGMGCDECL(int) pgmGCGuestPDWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1782PGMDECL(int) pgmGuestROMWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, void *pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1783PGMGCDECL(int) pgmCachePTWriteGC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
1784int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PVBOXPD pPDSrc, int iPDOld);
1785PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
1786void pgmR3MapRelocate(PVM pVM, PPGMMAPPING pMapping, int iPDOld, int iPDNew);
1787int pgmR3ChangeMode(PVM pVM, PGMMODE enmGuestMode);
1788int pgmLock(PVM pVM);
1789void pgmUnlock(PVM pVM);
1790
1791void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
1792int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
1793DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
1794#ifdef VBOX_STRICT
1795void pgmHandlerVirtualDumpPhysPages(PVM pVM);
1796#else
1797# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
1798#endif
1799DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
1800
1801
1802#ifdef IN_RING3
1803int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys);
1804
1805int pgmR3PoolInit(PVM pVM);
1806void pgmR3PoolRelocate(PVM pVM);
1807void pgmR3PoolReset(PVM pVM);
1808
1809#endif
1810#ifdef IN_GC
1811void *pgmGCPoolMapPage(PVM pVM, PPGMPOOLPAGE pPage);
1812#endif
1813int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint16_t iUserTable, PPPGMPOOLPAGE ppPage);
1814PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
1815void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint16_t iUserTable);
1816void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint16_t iUserTable);
1817int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1818void pgmPoolFlushAll(PVM pVM);
1819void pgmPoolClearAll(PVM pVM);
1820void pgmPoolTrackFlushGCPhysPT(PVM pVM, PPGMPAGE pPhysPage, uint16_t iShw, uint16_t cRefs);
1821void pgmPoolTrackFlushGCPhysPTs(PVM pVM, PPGMPAGE pPhysPage, uint16_t iPhysExt);
1822int pgmPoolTrackFlushGCPhysPTsSlow(PVM pVM, PPGMPAGE pPhysPage);
1823PPGMPOOLPHYSEXT pgmPoolTrackPhysExtAlloc(PVM pVM, uint16_t *piPhysExt);
1824void pgmPoolTrackPhysExtFree(PVM pVM, uint16_t iPhysExt);
1825void pgmPoolTrackPhysExtFreeList(PVM pVM, uint16_t iPhysExt);
1826uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
1827void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
1828#ifdef PGMPOOL_WITH_MONITORING
1829# ifdef IN_RING3
1830void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTHCPTR pvAddress, PDISCPUSTATE pCpu);
1831# else
1832void pgmPoolMonitorChainChanging(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, RTGCPTR pvAddress, PDISCPUSTATE pCpu);
1833# endif
1834int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1835void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
1836void pgmPoolMonitorModifiedClearAll(PVM pVM);
1837int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3);
1838int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot);
1839#endif
1840
1841__END_DECLS
1842
1843
1844/**
1845 * Gets the PGMPAGE structure for a guest page.
1846 *
1847 * @returns Pointer to the page on success.
1848 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
1849 *
1850 * @param pPGM PGM handle.
1851 * @param GCPhys The GC physical address.
1852 */
1853DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
1854{
1855 /*
1856 * Optimize for the first range.
1857 */
1858 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1859 RTGCPHYS off = GCPhys - pRam->GCPhys;
1860 if (RT_UNLIKELY(off >= pRam->cb))
1861 {
1862 do
1863 {
1864 pRam = CTXSUFF(pRam->pNext);
1865 if (RT_UNLIKELY(!pRam))
1866 return NULL;
1867 off = GCPhys - pRam->GCPhys;
1868 } while (off >= pRam->cb);
1869 }
1870 return &pRam->aPages[off >> PAGE_SHIFT];
1871}
1872
1873
1874/**
1875 * Gets the PGMPAGE structure for a guest page.
1876 *
1877 * Old Phys code: Will make sure the page is present.
1878 *
1879 * @returns VBox status code.
1880 * @retval VINF_SUCCESS and a valid *ppPage on success.
1881 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
1882 *
1883 * @param pPGM PGM handle.
1884 * @param GCPhys The GC physical address.
1885 * @param ppPage Where to store the page poitner on success.
1886 */
1887DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
1888{
1889 /*
1890 * Optimize for the first range.
1891 */
1892 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
1893 RTGCPHYS off = GCPhys - pRam->GCPhys;
1894 if (RT_UNLIKELY(off >= pRam->cb))
1895 {
1896 do
1897 {
1898 pRam = CTXSUFF(pRam->pNext);
1899 if (RT_UNLIKELY(!pRam))
1900 {
1901 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
1902 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1903 }
1904 off = GCPhys - pRam->GCPhys;
1905 } while (off >= pRam->cb);
1906 }
1907 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
1908#ifndef NEW_PHYS_CODE
1909
1910 /*
1911 * Make sure it's present.
1912 */
1913 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
1914 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
1915 {
1916#ifdef IN_RING3
1917 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1918#else
1919 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1920#endif
1921 if (VBOX_FAILURE(rc))
1922 {
1923 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
1924 return rc;
1925 }
1926 Assert(rc == VINF_SUCCESS);
1927 }
1928#endif
1929 return VINF_SUCCESS;
1930}
1931
1932
1933
1934
1935/**
1936 * Gets the PGMPAGE structure for a guest page.
1937 *
1938 * Old Phys code: Will make sure the page is present.
1939 *
1940 * @returns VBox status code.
1941 * @retval VINF_SUCCESS and a valid *ppPage on success.
1942 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
1943 *
1944 * @param pPGM PGM handle.
1945 * @param GCPhys The GC physical address.
1946 * @param ppPage Where to store the page poitner on success.
1947 * @param ppRamHint Where to read and store the ram list hint.
1948 * The caller initializes this to NULL before the call.
1949 */
1950DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
1951{
1952 RTGCPHYS off;
1953 PPGMRAMRANGE pRam = *ppRamHint;
1954 if ( !pRam
1955 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
1956 {
1957 pRam = CTXSUFF(pPGM->pRamRanges);
1958 off = GCPhys - pRam->GCPhys;
1959 if (RT_UNLIKELY(off >= pRam->cb))
1960 {
1961 do
1962 {
1963 pRam = CTXSUFF(pRam->pNext);
1964 if (RT_UNLIKELY(!pRam))
1965 {
1966 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
1967 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1968 }
1969 off = GCPhys - pRam->GCPhys;
1970 } while (off >= pRam->cb);
1971 }
1972 *ppRamHint = pRam;
1973 }
1974 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
1975#ifndef NEW_PHYS_CODE
1976
1977 /*
1978 * Make sure it's present.
1979 */
1980 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
1981 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
1982 {
1983#ifdef IN_RING3
1984 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
1985#else
1986 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
1987#endif
1988 if (VBOX_FAILURE(rc))
1989 {
1990 *ppPage = NULL; /* Shut up annoying smart ass. */
1991 return rc;
1992 }
1993 Assert(rc == VINF_SUCCESS);
1994 }
1995#endif
1996 return VINF_SUCCESS;
1997}
1998
1999
2000/**
2001 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2002 *
2003 * @returns Pointer to the page on success.
2004 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2005 *
2006 * @param pPGM PGM handle.
2007 * @param GCPhys The GC physical address.
2008 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
2009 */
2010DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
2011{
2012 /*
2013 * Optimize for the first range.
2014 */
2015 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2016 RTGCPHYS off = GCPhys - pRam->GCPhys;
2017 if (RT_UNLIKELY(off >= pRam->cb))
2018 {
2019 do
2020 {
2021 pRam = CTXSUFF(pRam->pNext);
2022 if (RT_UNLIKELY(!pRam))
2023 return NULL;
2024 off = GCPhys - pRam->GCPhys;
2025 } while (off >= pRam->cb);
2026 }
2027 *ppRam = pRam;
2028 return &pRam->aPages[off >> PAGE_SHIFT];
2029}
2030
2031
2032
2033
2034/**
2035 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
2036 *
2037 * @returns Pointer to the page on success.
2038 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
2039 *
2040 * @param pPGM PGM handle.
2041 * @param GCPhys The GC physical address.
2042 * @param ppPage Where to store the pointer to the PGMPAGE structure.
2043 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
2044 */
2045DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
2046{
2047 /*
2048 * Optimize for the first range.
2049 */
2050 PPGMRAMRANGE pRam = CTXSUFF(pPGM->pRamRanges);
2051 RTGCPHYS off = GCPhys - pRam->GCPhys;
2052 if (RT_UNLIKELY(off >= pRam->cb))
2053 {
2054 do
2055 {
2056 pRam = CTXSUFF(pRam->pNext);
2057 if (RT_UNLIKELY(!pRam))
2058 {
2059 *ppRam = NULL; /* Shut up silly GCC warnings. */
2060 *ppPage = NULL; /* ditto */
2061 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2062 }
2063 off = GCPhys - pRam->GCPhys;
2064 } while (off >= pRam->cb);
2065 }
2066 *ppRam = pRam;
2067 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
2068#ifndef NEW_PHYS_CODE
2069
2070 /*
2071 * Make sure it's present.
2072 */
2073 if (RT_UNLIKELY( !PGM_PAGE_GET_HCPHYS(*ppPage)
2074 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)))
2075 {
2076#ifdef IN_RING3
2077 int rc = pgmr3PhysGrowRange(PGM2VM(pPGM), GCPhys);
2078#else
2079 int rc = CTXALLMID(VMM, CallHost)(PGM2VM(pPGM), VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2080#endif
2081 if (VBOX_FAILURE(rc))
2082 {
2083 *ppPage = NULL; /* Shut up silly GCC warnings. */
2084 *ppPage = NULL; /* ditto */
2085 return rc;
2086 }
2087 Assert(rc == VINF_SUCCESS);
2088
2089 }
2090#endif
2091 return VINF_SUCCESS;
2092}
2093
2094
2095/**
2096 * Convert GC Phys to HC Phys.
2097 *
2098 * @returns VBox status.
2099 * @param pPGM PGM handle.
2100 * @param GCPhys The GC physical address.
2101 * @param pHCPhys Where to store the corresponding HC physical address.
2102 *
2103 * @deprecated Doesn't deal with zero, shared or write monitored pages.
2104 * Avoid when writing new code!
2105 */
2106DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
2107{
2108 PPGMPAGE pPage;
2109 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
2110 if (VBOX_FAILURE(rc))
2111 return rc;
2112 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
2113 return VINF_SUCCESS;
2114}
2115
2116
2117#ifndef NEW_PHYS_CODE
2118/**
2119 * Convert GC Phys to HC Virt.
2120 *
2121 * @returns VBox status.
2122 * @param pPGM PGM handle.
2123 * @param GCPhys The GC physical address.
2124 * @param pHCPtr Where to store the corresponding HC virtual address.
2125 *
2126 * @deprecated This will be eliminated by PGMPhysGCPhys2CCPtr.
2127 */
2128DECLINLINE(int) pgmRamGCPhys2HCPtr(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
2129{
2130 PPGMRAMRANGE pRam;
2131 PPGMPAGE pPage;
2132 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
2133 if (VBOX_FAILURE(rc))
2134 {
2135 *pHCPtr = 0; /* Shut up silly GCC warnings. */
2136 return rc;
2137 }
2138 RTGCPHYS off = GCPhys - pRam->GCPhys;
2139
2140 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2141 {
2142 unsigned iChunk = off >> PGM_DYNAMIC_CHUNK_SHIFT;
2143 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[iChunk] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2144 return VINF_SUCCESS;
2145 }
2146 if (pRam->pvHC)
2147 {
2148 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
2149 return VINF_SUCCESS;
2150 }
2151 *pHCPtr = 0; /* Shut up silly GCC warnings. */
2152 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2153}
2154#endif /* !NEW_PHYS_CODE */
2155
2156
2157/**
2158 * Convert GC Phys to HC Virt.
2159 *
2160 * @returns VBox status.
2161 * @param PVM VM handle.
2162 * @param pRam Ram range
2163 * @param GCPhys The GC physical address.
2164 * @param pHCPtr Where to store the corresponding HC virtual address.
2165 *
2166 * @deprecated This will be eliminated. Don't use it.
2167 */
2168DECLINLINE(int) pgmRamGCPhys2HCPtrWithRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, PRTHCPTR pHCPtr)
2169{
2170 RTGCPHYS off = GCPhys - pRam->GCPhys;
2171 Assert(off < pRam->cb);
2172
2173 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2174 {
2175 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
2176 /* Physical chunk in dynamically allocated range not present? */
2177 if (RT_UNLIKELY(!CTXSUFF(pRam->pavHCChunk)[idx]))
2178 {
2179#ifdef IN_RING3
2180 int rc = pgmr3PhysGrowRange(pVM, GCPhys);
2181#else
2182 int rc = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
2183#endif
2184 if (rc != VINF_SUCCESS)
2185 {
2186 *pHCPtr = 0; /* GCC crap */
2187 return rc;
2188 }
2189 }
2190 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2191 return VINF_SUCCESS;
2192 }
2193 if (pRam->pvHC)
2194 {
2195 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
2196 return VINF_SUCCESS;
2197 }
2198 *pHCPtr = 0; /* GCC crap */
2199 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2200}
2201
2202
2203/**
2204 * Convert GC Phys to HC Virt and HC Phys.
2205 *
2206 * @returns VBox status.
2207 * @param pPGM PGM handle.
2208 * @param GCPhys The GC physical address.
2209 * @param pHCPtr Where to store the corresponding HC virtual address.
2210 * @param pHCPhys Where to store the HC Physical address and its flags.
2211 *
2212 * @deprecated Will go away or be changed. Only user is MapCR3. MapCR3 will have to do ring-3
2213 * and ring-0 locking of the CR3 in a lazy fashion I'm fear... or perhaps not. we'll see.
2214 */
2215DECLINLINE(int) pgmRamGCPhys2HCPtrAndHCPhysWithFlags(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPTR pHCPtr, PRTHCPHYS pHCPhys)
2216{
2217 PPGMRAMRANGE pRam;
2218 PPGMPAGE pPage;
2219 int rc = pgmPhysGetPageAndRangeEx(pPGM, GCPhys, &pPage, &pRam);
2220 if (VBOX_FAILURE(rc))
2221 {
2222 *pHCPtr = 0; /* Shut up crappy GCC warnings */
2223 *pHCPhys = 0; /* ditto */
2224 return rc;
2225 }
2226 RTGCPHYS off = GCPhys - pRam->GCPhys;
2227
2228 *pHCPhys = pPage->HCPhys; /** @todo PAGE FLAGS */
2229 if (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
2230 {
2231 unsigned idx = (off >> PGM_DYNAMIC_CHUNK_SHIFT);
2232 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)CTXSUFF(pRam->pavHCChunk)[idx] + (off & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
2233 return VINF_SUCCESS;
2234 }
2235 if (pRam->pvHC)
2236 {
2237 *pHCPtr = (RTHCPTR)((RTHCUINTPTR)pRam->pvHC + off);
2238 return VINF_SUCCESS;
2239 }
2240 *pHCPtr = 0;
2241 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2242}
2243
2244
2245/**
2246 * Clears flags associated with a RAM address.
2247 *
2248 * @returns VBox status code.
2249 * @param pPGM PGM handle.
2250 * @param GCPhys Guest context physical address.
2251 * @param fFlags fFlags to clear. (Bits 0-11.)
2252 */
2253DECLINLINE(int) pgmRamFlagsClearByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2254{
2255 PPGMPAGE pPage;
2256 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
2257 if (VBOX_FAILURE(rc))
2258 return rc;
2259
2260 fFlags &= ~X86_PTE_PAE_PG_MASK;
2261 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
2262 return VINF_SUCCESS;
2263}
2264
2265
2266/**
2267 * Clears flags associated with a RAM address.
2268 *
2269 * @returns VBox status code.
2270 * @param pPGM PGM handle.
2271 * @param GCPhys Guest context physical address.
2272 * @param fFlags fFlags to clear. (Bits 0-11.)
2273 * @param ppRamHint Where to read and store the ram list hint.
2274 * The caller initializes this to NULL before the call.
2275 */
2276DECLINLINE(int) pgmRamFlagsClearByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2277{
2278 PPGMPAGE pPage;
2279 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
2280 if (VBOX_FAILURE(rc))
2281 return rc;
2282
2283 fFlags &= ~X86_PTE_PAE_PG_MASK;
2284 pPage->HCPhys &= ~(RTHCPHYS)fFlags; /** @todo PAGE FLAGS */
2285 return VINF_SUCCESS;
2286}
2287
2288/**
2289 * Sets (bitwise OR) flags associated with a RAM address.
2290 *
2291 * @returns VBox status code.
2292 * @param pPGM PGM handle.
2293 * @param GCPhys Guest context physical address.
2294 * @param fFlags fFlags to set clear. (Bits 0-11.)
2295 */
2296DECLINLINE(int) pgmRamFlagsSetByGCPhys(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags)
2297{
2298 PPGMPAGE pPage;
2299 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
2300 if (VBOX_FAILURE(rc))
2301 return rc;
2302
2303 fFlags &= ~X86_PTE_PAE_PG_MASK;
2304 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
2305 return VINF_SUCCESS;
2306}
2307
2308
2309/**
2310 * Sets (bitwise OR) flags associated with a RAM address.
2311 *
2312 * @returns VBox status code.
2313 * @param pPGM PGM handle.
2314 * @param GCPhys Guest context physical address.
2315 * @param fFlags fFlags to set clear. (Bits 0-11.)
2316 * @param ppRamHint Where to read and store the ram list hint.
2317 * The caller initializes this to NULL before the call.
2318 */
2319DECLINLINE(int) pgmRamFlagsSetByGCPhysWithHint(PPGM pPGM, RTGCPHYS GCPhys, unsigned fFlags, PPGMRAMRANGE *ppRamHint)
2320{
2321 PPGMPAGE pPage;
2322 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
2323 if (VBOX_FAILURE(rc))
2324 return rc;
2325
2326 fFlags &= ~X86_PTE_PAE_PG_MASK;
2327 pPage->HCPhys |= fFlags; /** @todo PAGE FLAGS */
2328 return VINF_SUCCESS;
2329}
2330
2331
2332/**
2333 * Gets the page directory for the specified address.
2334 *
2335 * @returns Pointer to the page directory in question.
2336 * @returns NULL if the page directory is not present or on an invalid page.
2337 * @param pPGM Pointer to the PGM instance data.
2338 * @param GCPtr The address.
2339 */
2340DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGM pPGM, RTGCUINTPTR GCPtr)
2341{
2342 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2343 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2344 {
2345 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2346 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr];
2347
2348 /* cache is out-of-sync. */
2349 PX86PDPAE pPD;
2350 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2351 if (VBOX_SUCCESS(rc))
2352 return pPD;
2353 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2354 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emualted as all 0s. */
2355 }
2356 return NULL;
2357}
2358
2359
2360/**
2361 * Gets the page directory entry for the specified address.
2362 *
2363 * @returns Pointer to the page directory entry in question.
2364 * @returns NULL if the page directory is not present or on an invalid page.
2365 * @param pPGM Pointer to the PGM instance data.
2366 * @param GCPtr The address.
2367 */
2368DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGM pPGM, RTGCUINTPTR GCPtr)
2369{
2370 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2371 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2372 {
2373 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2374 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2375 return &CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD];
2376
2377 /* The cache is out-of-sync. */
2378 PX86PDPAE pPD;
2379 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2380 if (VBOX_SUCCESS(rc))
2381 return &pPD->a[iPD];
2382 AssertMsgFailed(("Impossible! rc=%Vrc PDPE=%RX64\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2383 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. */
2384 }
2385 return NULL;
2386}
2387
2388
2389/**
2390 * Gets the page directory entry for the specified address.
2391 *
2392 * @returns The page directory entry in question.
2393 * @returns A non-present entry if the page directory is not present or on an invalid page.
2394 * @param pPGM Pointer to the PGM instance data.
2395 * @param GCPtr The address.
2396 */
2397DECLINLINE(uint64_t) pgmGstGetPaePDE(PPGM pPGM, RTGCUINTPTR GCPtr)
2398{
2399 const unsigned iPdPtr = GCPtr >> X86_PDPTR_SHIFT;
2400 if (CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].n.u1Present)
2401 {
2402 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
2403 if ((CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK) == pPGM->aGCPhysGstPaePDs[iPdPtr])
2404 return CTXSUFF(pPGM->apGstPaePDs)[iPdPtr]->a[iPD].u;
2405
2406 /* cache is out-of-sync. */
2407 PX86PDPAE pPD;
2408 int rc = PGM_GCPHYS_2_PTR(PGM2VM(pPGM), CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u & X86_PDPE_PG_MASK, &pPD);
2409 if (VBOX_SUCCESS(rc))
2410 return pPD->a[iPD].u;
2411 AssertMsgFailed(("Impossible! rc=%d PDPE=%#llx\n", rc, CTXSUFF(pPGM->pGstPaePDPTR)->a[iPdPtr].u));
2412 }
2413 return 0;
2414}
2415
2416
2417/**
2418 * Checks if any of the specified page flags are set for the given page.
2419 *
2420 * @returns true if any of the flags are set.
2421 * @returns false if all the flags are clear.
2422 * @param pPGM PGM handle.
2423 * @param GCPhys The GC physical address.
2424 * @param fFlags The flags to check for.
2425 */
2426DECLINLINE(bool) pgmRamTestFlags(PPGM pPGM, RTGCPHYS GCPhys, uint64_t fFlags)
2427{
2428 PPGMPAGE pPage = pgmPhysGetPage(pPGM, GCPhys);
2429 return pPage
2430 && (pPage->HCPhys & fFlags) != 0; /** @todo PAGE FLAGS */
2431}
2432
2433
2434/**
2435 * Gets the ram flags for a handler.
2436 *
2437 * @returns The ram flags.
2438 * @param pCur The physical handler in question.
2439 */
2440DECLINLINE(unsigned) pgmHandlerPhysicalCalcFlags(PPGMPHYSHANDLER pCur)
2441{
2442 switch (pCur->enmType)
2443 {
2444 case PGMPHYSHANDLERTYPE_PHYSICAL:
2445 return MM_RAM_FLAGS_PHYSICAL_HANDLER;
2446
2447 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
2448 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_WRITE;
2449
2450 case PGMPHYSHANDLERTYPE_MMIO:
2451 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
2452 return MM_RAM_FLAGS_PHYSICAL_HANDLER | MM_RAM_FLAGS_PHYSICAL_ALL;
2453
2454 default:
2455 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
2456 }
2457}
2458
2459
2460/**
2461 * Clears one physical page of a virtual handler
2462 *
2463 * @param pPGM Pointer to the PGM instance.
2464 * @param pCur Virtual handler structure
2465 * @param iPage Physical page index
2466 */
2467DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
2468{
2469 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
2470
2471 /*
2472 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
2473 */
2474#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2475 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2476 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2477 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2478#endif
2479 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
2480 {
2481 /* We're the head of the alias chain. */
2482 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
2483#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2484 AssertReleaseMsg(pRemove != NULL,
2485 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2486 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
2487 AssertReleaseMsg(pRemove == pPhys2Virt,
2488 ("wanted: pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
2489 " got: pRemove=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2490 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
2491 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
2492#endif
2493 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
2494 {
2495 /* Insert the next list in the alias chain into the tree. */
2496 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2497#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2498 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
2499 ("pNext=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
2500 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
2501#endif
2502 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
2503 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
2504 AssertRelease(fRc);
2505 }
2506 }
2507 else
2508 {
2509 /* Locate the previous node in the alias chain. */
2510 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
2511#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2512 AssertReleaseMsg(pPrev != pPhys2Virt,
2513 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2514 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2515#endif
2516 for (;;)
2517 {
2518 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2519 if (pNext == pPhys2Virt)
2520 {
2521 /* unlink. */
2522 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%VGp-%VGp]\n",
2523 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
2524 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
2525 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
2526 else
2527 {
2528 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
2529 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
2530 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
2531 }
2532 break;
2533 }
2534
2535 /* next */
2536 if (pNext == pPrev)
2537 {
2538#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
2539 AssertReleaseMsg(pNext != pPrev,
2540 ("pPhys2Virt=%p:{.Core.Key=%VGp, .Core.KeyLast=%VGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
2541 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
2542#endif
2543 break;
2544 }
2545 pPrev = pNext;
2546 }
2547 }
2548 Log2(("PHYS2VIRT: Removing %VGp-%VGp %#RX32 %s\n",
2549 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, HCSTRING(pCur->pszDesc)));
2550 pPhys2Virt->offNextAlias = 0;
2551 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
2552
2553 /*
2554 * Clear the ram flags for this page.
2555 */
2556 int rc = pgmRamFlagsClearByGCPhys(pPGM, pPhys2Virt->Core.Key,
2557 MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL | MM_RAM_FLAGS_VIRTUAL_WRITE);
2558 AssertRC(rc);
2559}
2560
2561
2562/**
2563 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2564 *
2565 * @returns Pointer to the shadow page structure.
2566 * @param pPool The pool.
2567 * @param HCPhys The HC physical address of the shadow page.
2568 */
2569DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
2570{
2571 /*
2572 * Look up the page.
2573 */
2574 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
2575 AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%VHp pPage=%p type=%d\n", HCPhys, pPage, (pPage) ? pPage->enmKind : 0));
2576 return pPage;
2577}
2578
2579
2580/**
2581 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
2582 *
2583 * @returns Pointer to the shadow page structure.
2584 * @param pPool The pool.
2585 * @param idx The pool page index.
2586 */
2587DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
2588{
2589 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
2590 return &pPool->aPages[idx];
2591}
2592
2593
2594#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
2595/**
2596 * Clear references to guest physical memory.
2597 *
2598 * @param pPool The pool.
2599 * @param pPoolPage The pool page.
2600 * @param pPhysPage The physical guest page tracking structure.
2601 */
2602DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
2603{
2604 /*
2605 * Just deal with the simple case here.
2606 */
2607#ifdef LOG_ENABLED
2608 const RTHCPHYS HCPhysOrg = pPhysPage->HCPhys; /** @todo PAGE FLAGS */
2609#endif
2610 const unsigned cRefs = pPhysPage->HCPhys >> MM_RAM_FLAGS_CREFS_SHIFT; /** @todo PAGE FLAGS */
2611 if (cRefs == 1)
2612 {
2613 Assert(pPoolPage->idx == ((pPhysPage->HCPhys >> MM_RAM_FLAGS_IDX_SHIFT) & MM_RAM_FLAGS_IDX_MASK));
2614 pPhysPage->HCPhys = pPhysPage->HCPhys & MM_RAM_FLAGS_NO_REFS_MASK;
2615 }
2616 else
2617 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
2618 LogFlow(("pgmTrackDerefGCPhys: HCPhys=%RHp -> %RHp\n", HCPhysOrg, pPhysPage->HCPhys));
2619}
2620#endif
2621
2622
2623#ifdef PGMPOOL_WITH_CACHE
2624/**
2625 * Moves the page to the head of the age list.
2626 *
2627 * This is done when the cached page is used in one way or another.
2628 *
2629 * @param pPool The pool.
2630 * @param pPage The cached page.
2631 * @todo inline in PGMInternal.h!
2632 */
2633DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
2634{
2635 /*
2636 * Move to the head of the age list.
2637 */
2638 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
2639 {
2640 /* unlink */
2641 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
2642 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
2643 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
2644 else
2645 pPool->iAgeTail = pPage->iAgePrev;
2646
2647 /* insert at head */
2648 pPage->iAgePrev = NIL_PGMPOOL_IDX;
2649 pPage->iAgeNext = pPool->iAgeHead;
2650 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
2651 pPool->iAgeHead = pPage->idx;
2652 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
2653 }
2654}
2655#endif /* PGMPOOL_WITH_CACHE */
2656
2657/**
2658 * Tells if mappings are to be put into the shadow page table or not
2659 *
2660 * @returns boolean result
2661 * @param pVM VM handle.
2662 */
2663
2664DECLINLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
2665{
2666 return !pPGM->fDisableMappings;
2667}
2668
2669/** @} */
2670
2671#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette