VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInline.h@ 28112

Last change on this file since 28112 was 26150, checked in by vboxsync, 15 years ago

PGM: Split out the inlined code from PGMInternal.h and into PGMInline.h so we can drop all the &pVM->pgm.s and &pVCpu->pgm.s stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 52.9 KB
Line 
1/* $Id: PGMInline.h 26150 2010-02-02 15:52:54Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInline_h
23#define ___PGMInline_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @addtogroup grp_pgm_int Internals
48 * @internal
49 * @{
50 */
51
52/** @todo Split out all the inline stuff into a separate file. Then we can
53 * include it later when VM and VMCPU are defined and so avoid all that
54 * &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off
55 * this file and will make it somewhat easier to navigate... */
56
57/**
58 * Gets the PGMRAMRANGE structure for a guest page.
59 *
60 * @returns Pointer to the RAM range on success.
61 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
62 *
63 * @param pPGM PGM handle.
64 * @param GCPhys The GC physical address.
65 */
66DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys)
67{
68 /*
69 * Optimize for the first range.
70 */
71 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
72 RTGCPHYS off = GCPhys - pRam->GCPhys;
73 if (RT_UNLIKELY(off >= pRam->cb))
74 {
75 do
76 {
77 pRam = pRam->CTX_SUFF(pNext);
78 if (RT_UNLIKELY(!pRam))
79 break;
80 off = GCPhys - pRam->GCPhys;
81 } while (off >= pRam->cb);
82 }
83 return pRam;
84}
85
86
87/**
88 * Gets the PGMPAGE structure for a guest page.
89 *
90 * @returns Pointer to the page on success.
91 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
92 *
93 * @param pPGM PGM handle.
94 * @param GCPhys The GC physical address.
95 */
96DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys)
97{
98 /*
99 * Optimize for the first range.
100 */
101 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
102 RTGCPHYS off = GCPhys - pRam->GCPhys;
103 if (RT_UNLIKELY(off >= pRam->cb))
104 {
105 do
106 {
107 pRam = pRam->CTX_SUFF(pNext);
108 if (RT_UNLIKELY(!pRam))
109 return NULL;
110 off = GCPhys - pRam->GCPhys;
111 } while (off >= pRam->cb);
112 }
113 return &pRam->aPages[off >> PAGE_SHIFT];
114}
115
116
117/**
118 * Gets the PGMPAGE structure for a guest page.
119 *
120 * Old Phys code: Will make sure the page is present.
121 *
122 * @returns VBox status code.
123 * @retval VINF_SUCCESS and a valid *ppPage on success.
124 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
125 *
126 * @param pPGM PGM handle.
127 * @param GCPhys The GC physical address.
128 * @param ppPage Where to store the page pointer on success.
129 */
130DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
131{
132 /*
133 * Optimize for the first range.
134 */
135 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
136 RTGCPHYS off = GCPhys - pRam->GCPhys;
137 if (RT_UNLIKELY(off >= pRam->cb))
138 {
139 do
140 {
141 pRam = pRam->CTX_SUFF(pNext);
142 if (RT_UNLIKELY(!pRam))
143 {
144 *ppPage = NULL; /* avoid incorrect and very annoying GCC warnings */
145 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
146 }
147 off = GCPhys - pRam->GCPhys;
148 } while (off >= pRam->cb);
149 }
150 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
151 return VINF_SUCCESS;
152}
153
154
155
156
157/**
158 * Gets the PGMPAGE structure for a guest page.
159 *
160 * Old Phys code: Will make sure the page is present.
161 *
162 * @returns VBox status code.
163 * @retval VINF_SUCCESS and a valid *ppPage on success.
164 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
165 *
166 * @param pPGM PGM handle.
167 * @param GCPhys The GC physical address.
168 * @param ppPage Where to store the page pointer on success.
169 * @param ppRamHint Where to read and store the ram list hint.
170 * The caller initializes this to NULL before the call.
171 */
172DECLINLINE(int) pgmPhysGetPageWithHintEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
173{
174 RTGCPHYS off;
175 PPGMRAMRANGE pRam = *ppRamHint;
176 if ( !pRam
177 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
178 {
179 pRam = pPGM->CTX_SUFF(pRamRanges);
180 off = GCPhys - pRam->GCPhys;
181 if (RT_UNLIKELY(off >= pRam->cb))
182 {
183 do
184 {
185 pRam = pRam->CTX_SUFF(pNext);
186 if (RT_UNLIKELY(!pRam))
187 {
188 *ppPage = NULL; /* Kill the incorrect and extremely annoying GCC warnings. */
189 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
190 }
191 off = GCPhys - pRam->GCPhys;
192 } while (off >= pRam->cb);
193 }
194 *ppRamHint = pRam;
195 }
196 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
197 return VINF_SUCCESS;
198}
199
200
201/**
202 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
203 *
204 * @returns Pointer to the page on success.
205 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
206 *
207 * @param pPGM PGM handle.
208 * @param GCPhys The GC physical address.
209 * @param ppRam Where to store the pointer to the PGMRAMRANGE.
210 */
211DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam)
212{
213 /*
214 * Optimize for the first range.
215 */
216 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
217 RTGCPHYS off = GCPhys - pRam->GCPhys;
218 if (RT_UNLIKELY(off >= pRam->cb))
219 {
220 do
221 {
222 pRam = pRam->CTX_SUFF(pNext);
223 if (RT_UNLIKELY(!pRam))
224 return NULL;
225 off = GCPhys - pRam->GCPhys;
226 } while (off >= pRam->cb);
227 }
228 *ppRam = pRam;
229 return &pRam->aPages[off >> PAGE_SHIFT];
230}
231
232
233/**
234 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
235 *
236 * @returns Pointer to the page on success.
237 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
238 *
239 * @param pPGM PGM handle.
240 * @param GCPhys The GC physical address.
241 * @param ppPage Where to store the pointer to the PGMPAGE structure.
242 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
243 */
244DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
245{
246 /*
247 * Optimize for the first range.
248 */
249 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
250 RTGCPHYS off = GCPhys - pRam->GCPhys;
251 if (RT_UNLIKELY(off >= pRam->cb))
252 {
253 do
254 {
255 pRam = pRam->CTX_SUFF(pNext);
256 if (RT_UNLIKELY(!pRam))
257 {
258 *ppRam = NULL; /* Shut up silly GCC warnings. */
259 *ppPage = NULL; /* ditto */
260 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
261 }
262 off = GCPhys - pRam->GCPhys;
263 } while (off >= pRam->cb);
264 }
265 *ppRam = pRam;
266 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
267 return VINF_SUCCESS;
268}
269
270
271/**
272 * Convert GC Phys to HC Phys.
273 *
274 * @returns VBox status.
275 * @param pPGM PGM handle.
276 * @param GCPhys The GC physical address.
277 * @param pHCPhys Where to store the corresponding HC physical address.
278 *
279 * @deprecated Doesn't deal with zero, shared or write monitored pages.
280 * Avoid when writing new code!
281 */
282DECLINLINE(int) pgmRamGCPhys2HCPhys(PPGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
283{
284 PPGMPAGE pPage;
285 int rc = pgmPhysGetPageEx(pPGM, GCPhys, &pPage);
286 if (RT_FAILURE(rc))
287 return rc;
288 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
289 return VINF_SUCCESS;
290}
291
292#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
293
294/**
295 * Inlined version of the ring-0 version of PGMDynMapHCPage that
296 * optimizes access to pages already in the set.
297 *
298 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
299 * @param pPGM Pointer to the PVM instance data.
300 * @param HCPhys The physical address of the page.
301 * @param ppv Where to store the mapping address.
302 */
303DECLINLINE(int) pgmR0DynMapHCPageInlined(PPGM pPGM, RTHCPHYS HCPhys, void **ppv)
304{
305 PVM pVM = PGM2VM(pPGM);
306 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
307 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
308
309 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapHCPageInl, a);
310 Assert(!(HCPhys & PAGE_OFFSET_MASK));
311 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
312
313 unsigned iHash = PGMMAPSET_HASH(HCPhys);
314 unsigned iEntry = pSet->aiHashTable[iHash];
315 if ( iEntry < pSet->cEntries
316 && pSet->aEntries[iEntry].HCPhys == HCPhys)
317 {
318 *ppv = pSet->aEntries[iEntry].pvPage;
319 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlHits);
320 }
321 else
322 {
323 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapHCPageInlMisses);
324 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
325 }
326
327 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapHCPageInl, a);
328 return VINF_SUCCESS;
329}
330
331
332/**
333 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
334 * access to pages already in the set.
335 *
336 * @returns See PGMDynMapGCPage.
337 * @param pPGM Pointer to the PVM instance data.
338 * @param GCPhys The guest physical address of the page.
339 * @param ppv Where to store the mapping address.
340 */
341DECLINLINE(int) pgmR0DynMapGCPageInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
342{
343 PVM pVM = PGM2VM(pPGM);
344 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
345
346 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
347 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
348
349 /*
350 * Get the ram range.
351 */
352 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
353 RTGCPHYS off = GCPhys - pRam->GCPhys;
354 if (RT_UNLIKELY(off >= pRam->cb
355 /** @todo || page state stuff */))
356 {
357 /* This case is not counted into StatR0DynMapGCPageInl. */
358 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
359 return PGMDynMapGCPage(pVM, GCPhys, ppv);
360 }
361
362 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
363 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
364
365 /*
366 * pgmR0DynMapHCPageInlined with out stats.
367 */
368 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
369 Assert(!(HCPhys & PAGE_OFFSET_MASK));
370 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
371
372 unsigned iHash = PGMMAPSET_HASH(HCPhys);
373 unsigned iEntry = pSet->aiHashTable[iHash];
374 if ( iEntry < pSet->cEntries
375 && pSet->aEntries[iEntry].HCPhys == HCPhys)
376 {
377 *ppv = pSet->aEntries[iEntry].pvPage;
378 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
379 }
380 else
381 {
382 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
383 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
384 }
385
386 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
387 return VINF_SUCCESS;
388}
389
390
391/**
392 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
393 * access to pages already in the set.
394 *
395 * @returns See PGMDynMapGCPage.
396 * @param pPGM Pointer to the PVM instance data.
397 * @param HCPhys The physical address of the page.
398 * @param ppv Where to store the mapping address.
399 */
400DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PPGM pPGM, RTGCPHYS GCPhys, void **ppv)
401{
402 PVM pVM = PGM2VM(pPGM);
403 PPGMCPU pPGMCPU = (PPGMCPU)((uint8_t *)VMMGetCpu(pVM) + pPGM->offVCpuPGM); /* very pretty ;-) */
404
405 STAM_PROFILE_START(&pPGMCPU->StatR0DynMapGCPageInl, a);
406
407 /*
408 * Get the ram range.
409 */
410 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
411 RTGCPHYS off = GCPhys - pRam->GCPhys;
412 if (RT_UNLIKELY(off >= pRam->cb
413 /** @todo || page state stuff */))
414 {
415 /* This case is not counted into StatR0DynMapGCPageInl. */
416 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamMisses);
417 return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
418 }
419
420 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
421 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlRamHits);
422
423 /*
424 * pgmR0DynMapHCPageInlined with out stats.
425 */
426 PPGMMAPSET pSet = &pPGMCPU->AutoSet;
427 Assert(!(HCPhys & PAGE_OFFSET_MASK));
428 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
429
430 unsigned iHash = PGMMAPSET_HASH(HCPhys);
431 unsigned iEntry = pSet->aiHashTable[iHash];
432 if ( iEntry < pSet->cEntries
433 && pSet->aEntries[iEntry].HCPhys == HCPhys)
434 {
435 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
436 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlHits);
437 }
438 else
439 {
440 STAM_COUNTER_INC(&pPGMCPU->StatR0DynMapGCPageInlMisses);
441 pgmR0DynMapHCPageCommon(pVM, pSet, HCPhys, ppv);
442 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
443 }
444
445 STAM_PROFILE_STOP(&pPGMCPU->StatR0DynMapGCPageInl, a);
446 return VINF_SUCCESS;
447}
448
449#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
450#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
451
452/**
453 * Maps the page into current context (RC and maybe R0).
454 *
455 * @returns pointer to the mapping.
456 * @param pVM Pointer to the PGM instance data.
457 * @param pPage The page.
458 */
459DECLINLINE(void *) pgmPoolMapPageInlined(PPGM pPGM, PPGMPOOLPAGE pPage)
460{
461 if (pPage->idx >= PGMPOOL_IDX_FIRST)
462 {
463 Assert(pPage->idx < pPGM->CTX_SUFF(pPool)->cCurPages);
464 void *pv;
465# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
466 pgmR0DynMapHCPageInlined(pPGM, pPage->Core.Key, &pv);
467# else
468 PGMDynMapHCPage(PGM2VM(pPGM), pPage->Core.Key, &pv);
469# endif
470 return pv;
471 }
472 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
473}
474
475/**
476 * Temporarily maps one host page specified by HC physical address, returning
477 * pointer within the page.
478 *
479 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
480 * reused after 8 mappings (or perhaps a few more if you score with the cache).
481 *
482 * @returns The address corresponding to HCPhys.
483 * @param pPGM Pointer to the PVM instance data.
484 * @param HCPhys HC Physical address of the page.
485 */
486DECLINLINE(void *) pgmDynMapHCPageOff(PPGM pPGM, RTHCPHYS HCPhys)
487{
488 void *pv;
489# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
490 pgmR0DynMapHCPageInlined(pPGM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
491# else
492 PGMDynMapHCPage(PGM2VM(pPGM), HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, &pv);
493# endif
494 pv = (void *)((uintptr_t)pv | ((uintptr_t)HCPhys & PAGE_OFFSET_MASK));
495 return pv;
496}
497
498#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
499#ifndef IN_RC
500
501/**
502 * Queries the Physical TLB entry for a physical guest page,
503 * attempting to load the TLB entry if necessary.
504 *
505 * @returns VBox status code.
506 * @retval VINF_SUCCESS on success
507 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
508 *
509 * @param pPGM The PGM instance handle.
510 * @param GCPhys The address of the guest page.
511 * @param ppTlbe Where to store the pointer to the TLB entry.
512 */
513DECLINLINE(int) pgmPhysPageQueryTlbe(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
514{
515 int rc;
516 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
517 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
518 {
519 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
520 rc = VINF_SUCCESS;
521 }
522 else
523 rc = pgmPhysPageLoadIntoTlb(pPGM, GCPhys);
524 *ppTlbe = pTlbe;
525 return rc;
526}
527
528
529/**
530 * Queries the Physical TLB entry for a physical guest page,
531 * attempting to load the TLB entry if necessary.
532 *
533 * @returns VBox status code.
534 * @retval VINF_SUCCESS on success
535 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
536 *
537 * @param pPGM The PGM instance handle.
538 * @param pPage Pointer to the PGMPAGE structure corresponding to
539 * GCPhys.
540 * @param GCPhys The address of the guest page.
541 * @param ppTlbe Where to store the pointer to the TLB entry.
542 */
543DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
544{
545 int rc;
546 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
547 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
548 {
549 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbHits));
550 rc = VINF_SUCCESS;
551 }
552 else
553 rc = pgmPhysPageLoadIntoTlbWithPage(pPGM, pPage, GCPhys);
554 *ppTlbe = pTlbe;
555 return rc;
556}
557
558#endif /* !IN_RC */
559
560/**
561 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
562 * Takes PSE-36 into account.
563 *
564 * @returns guest physical address
565 * @param pPGM Pointer to the PGM instance data.
566 * @param Pde Guest Pde
567 */
568DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PPGM pPGM, X86PDE Pde)
569{
570 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
571 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
572
573 return GCPhys & pPGM->GCPhys4MBPSEMask;
574}
575
576
577/**
578 * Gets the page directory entry for the specified address (32-bit paging).
579 *
580 * @returns The page directory entry in question.
581 * @param pPGM Pointer to the PGM instance data.
582 * @param GCPtr The address.
583 */
584DECLINLINE(X86PDE) pgmGstGet32bitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
585{
586#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
587 PCX86PD pGuestPD = NULL;
588 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
589 if (RT_FAILURE(rc))
590 {
591 X86PDE ZeroPde = {0};
592 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);
593 }
594#else
595 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
596# ifdef IN_RING3
597 if (!pGuestPD)
598 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
599# endif
600#endif
601 return pGuestPD->a[GCPtr >> X86_PD_SHIFT];
602}
603
604
605/**
606 * Gets the address of a specific page directory entry (32-bit paging).
607 *
608 * @returns Pointer the page directory entry in question.
609 * @param pPGM Pointer to the PGM instance data.
610 * @param GCPtr The address.
611 */
612DECLINLINE(PX86PDE) pgmGstGet32bitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
613{
614#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
615 PX86PD pGuestPD = NULL;
616 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
617 AssertRCReturn(rc, NULL);
618#else
619 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
620# ifdef IN_RING3
621 if (!pGuestPD)
622 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
623# endif
624#endif
625 return &pGuestPD->a[GCPtr >> X86_PD_SHIFT];
626}
627
628
629/**
630 * Gets the address the guest page directory (32-bit paging).
631 *
632 * @returns Pointer the page directory entry in question.
633 * @param pPGM Pointer to the PGM instance data.
634 */
635DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PPGMCPU pPGM)
636{
637#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
638 PX86PD pGuestPD = NULL;
639 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPD);
640 AssertRCReturn(rc, NULL);
641#else
642 PX86PD pGuestPD = pPGM->CTX_SUFF(pGst32BitPd);
643# ifdef IN_RING3
644 if (!pGuestPD)
645 pGuestPD = pgmGstLazyMap32BitPD(pPGM);
646# endif
647#endif
648 return pGuestPD;
649}
650
651
652/**
653 * Gets the guest page directory pointer table.
654 *
655 * @returns Pointer to the page directory in question.
656 * @returns NULL if the page directory is not present or on an invalid page.
657 * @param pPGM Pointer to the PGM instance data.
658 */
659DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PPGMCPU pPGM)
660{
661#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
662 PX86PDPT pGuestPDPT = NULL;
663 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
664 AssertRCReturn(rc, NULL);
665#else
666 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
667# ifdef IN_RING3
668 if (!pGuestPDPT)
669 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
670# endif
671#endif
672 return pGuestPDPT;
673}
674
675
676/**
677 * Gets the guest page directory pointer table entry for the specified address.
678 *
679 * @returns Pointer to the page directory in question.
680 * @returns NULL if the page directory is not present or on an invalid page.
681 * @param pPGM Pointer to the PGM instance data.
682 * @param GCPtr The address.
683 */
684DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
685{
686 AssertGCPtr32(GCPtr);
687
688#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
689 PX86PDPT pGuestPDPT = 0;
690 int rc = pgmR0DynMapGCPageOffInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPDPT);
691 AssertRCReturn(rc, 0);
692#else
693 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
694# ifdef IN_RING3
695 if (!pGuestPDPT)
696 pGuestPDPT = pgmGstLazyMapPaePDPT(pPGM);
697# endif
698#endif
699 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
700}
701
702
703/**
704 * Gets the page directory for the specified address.
705 *
706 * @returns Pointer to the page directory in question.
707 * @returns NULL if the page directory is not present or on an invalid page.
708 * @param pPGM Pointer to the PGM instance data.
709 * @param GCPtr The address.
710 */
711DECLINLINE(PX86PDPAE) pgmGstGetPaePD(PPGMCPU pPGM, RTGCPTR GCPtr)
712{
713 AssertGCPtr32(GCPtr);
714
715 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
716 AssertReturn(pGuestPDPT, NULL);
717 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
718 if (pGuestPDPT->a[iPdpt].n.u1Present)
719 {
720#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
721 PX86PDPAE pGuestPD = NULL;
722 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
723 AssertRCReturn(rc, NULL);
724#else
725 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
726 if ( !pGuestPD
727 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
728 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
729#endif
730 return pGuestPD;
731 /* returning NULL is ok if we assume it's just an invalid page of some kind emulated as all 0s. (not quite true) */
732 }
733 return NULL;
734}
735
736
737/**
738 * Gets the page directory entry for the specified address.
739 *
740 * @returns Pointer to the page directory entry in question.
741 * @returns NULL if the page directory is not present or on an invalid page.
742 * @param pPGM Pointer to the PGM instance data.
743 * @param GCPtr The address.
744 */
745DECLINLINE(PX86PDEPAE) pgmGstGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
746{
747 AssertGCPtr32(GCPtr);
748
749 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
750 AssertReturn(pGuestPDPT, NULL);
751 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
752 if (pGuestPDPT->a[iPdpt].n.u1Present)
753 {
754 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
755#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
756 PX86PDPAE pGuestPD = NULL;
757 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
758 AssertRCReturn(rc, NULL);
759#else
760 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
761 if ( !pGuestPD
762 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
763 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
764#endif
765 return &pGuestPD->a[iPD];
766 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page or something which we'll emulate as all 0s. (not quite true) */
767 }
768 return NULL;
769}
770
771
772/**
773 * Gets the page directory entry for the specified address.
774 *
775 * @returns The page directory entry in question.
776 * @returns A non-present entry if the page directory is not present or on an invalid page.
777 * @param pPGM Pointer to the PGM instance data.
778 * @param GCPtr The address.
779 */
780DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
781{
782 AssertGCPtr32(GCPtr);
783 X86PDEPAE ZeroPde = {0};
784 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
785 if (RT_LIKELY(pGuestPDPT))
786 {
787 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
788 if (pGuestPDPT->a[iPdpt].n.u1Present)
789 {
790 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
791#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
792 PX86PDPAE pGuestPD = NULL;
793 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
794 AssertRCReturn(rc, ZeroPde);
795#else
796 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
797 if ( !pGuestPD
798 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
799 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
800#endif
801 return pGuestPD->a[iPD];
802 }
803 }
804 return ZeroPde;
805}
806
807
808/**
809 * Gets the page directory pointer table entry for the specified address
810 * and returns the index into the page directory
811 *
812 * @returns Pointer to the page directory in question.
813 * @returns NULL if the page directory is not present or on an invalid page.
814 * @param pPGM Pointer to the PGM instance data.
815 * @param GCPtr The address.
816 * @param piPD Receives the index into the returned page directory
817 * @param pPdpe Receives the page directory pointer entry. Optional.
818 */
819DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
820{
821 AssertGCPtr32(GCPtr);
822
823 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pPGM);
824 AssertReturn(pGuestPDPT, NULL);
825 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
826 if (pPdpe)
827 *pPdpe = pGuestPDPT->a[iPdpt];
828 if (pGuestPDPT->a[iPdpt].n.u1Present)
829 {
830 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
831#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
832 PX86PDPAE pGuestPD = NULL;
833 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK, (void **)&pGuestPD);
834 AssertRCReturn(rc, NULL);
835#else
836 PX86PDPAE pGuestPD = pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
837 if ( !pGuestPD
838 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pPGM->aGCPhysGstPaePDs[iPdpt])
839 pGuestPD = pgmGstLazyMapPaePD(pPGM, iPdpt);
840#endif
841 *piPD = iPD;
842 return pGuestPD;
843 /* returning NIL_RTGCPHYS is ok if we assume it's just an invalid page of some kind emulated as all 0s. */
844 }
845 return NULL;
846}
847
848#ifndef IN_RC
849
850/**
851 * Gets the page map level-4 pointer for the guest.
852 *
853 * @returns Pointer to the PML4 page.
854 * @param pPGM Pointer to the PGM instance data.
855 */
856DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PPGMCPU pPGM)
857{
858#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
859 PX86PML4 pGuestPml4;
860 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
861 AssertRCReturn(rc, NULL);
862#else
863 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
864# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
865 if (!pGuestPml4)
866 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
867# endif
868 Assert(pGuestPml4);
869#endif
870 return pGuestPml4;
871}
872
873
874/**
875 * Gets the pointer to a page map level-4 entry.
876 *
877 * @returns Pointer to the PML4 entry.
878 * @param pPGM Pointer to the PGM instance data.
879 * @param iPml4 The index.
880 */
881DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
882{
883#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
884 PX86PML4 pGuestPml4;
885 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
886 AssertRCReturn(rc, NULL);
887#else
888 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
889# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
890 if (!pGuestPml4)
891 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
892# endif
893 Assert(pGuestPml4);
894#endif
895 return &pGuestPml4->a[iPml4];
896}
897
898
899/**
900 * Gets a page map level-4 entry.
901 *
902 * @returns The PML4 entry.
903 * @param pPGM Pointer to the PGM instance data.
904 * @param iPml4 The index.
905 */
906DECLINLINE(X86PML4E) pgmGstGetLongModePML4E(PPGMCPU pPGM, unsigned int iPml4)
907{
908#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
909 PX86PML4 pGuestPml4;
910 int rc = pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), pPGM->GCPhysCR3, (void **)&pGuestPml4);
911 if (RT_FAILURE(rc))
912 {
913 X86PML4E ZeroPml4e = {0};
914 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);
915 }
916#else
917 PX86PML4 pGuestPml4 = pPGM->CTX_SUFF(pGstAmd64Pml4);
918# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
919 if (!pGuestPml4)
920 pGuestPml4 = pgmGstLazyMapPml4(pPGM);
921# endif
922 Assert(pGuestPml4);
923#endif
924 return pGuestPml4->a[iPml4];
925}
926
927
928/**
929 * Gets the page directory pointer entry for the specified address.
930 *
931 * @returns Pointer to the page directory pointer entry in question.
932 * @returns NULL if the page directory is not present or on an invalid page.
933 * @param pPGM Pointer to the PGM instance data.
934 * @param GCPtr The address.
935 * @param ppPml4e Page Map Level-4 Entry (out)
936 */
937DECLINLINE(PX86PDPE) pgmGstGetLongModePDPTPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e)
938{
939 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
940 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
941 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
942 if (pPml4e->n.u1Present)
943 {
944 PX86PDPT pPdpt;
945 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdpt);
946 AssertRCReturn(rc, NULL);
947
948 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
949 return &pPdpt->a[iPdpt];
950 }
951 return NULL;
952}
953
954
955/**
956 * Gets the page directory entry for the specified address.
957 *
958 * @returns The page directory entry in question.
959 * @returns A non-present entry if the page directory is not present or on an invalid page.
960 * @param pPGM Pointer to the PGM instance data.
961 * @param GCPtr The address.
962 * @param ppPml4e Page Map Level-4 Entry (out)
963 * @param pPdpe Page directory pointer table entry (out)
964 */
965DECLINLINE(X86PDEPAE) pgmGstGetLongModePDEEx(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe)
966{
967 X86PDEPAE ZeroPde = {0};
968 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
969 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
970 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
971 if (pPml4e->n.u1Present)
972 {
973 PCX86PDPT pPdptTemp;
974 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
975 AssertRCReturn(rc, ZeroPde);
976
977 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
978 *pPdpe = pPdptTemp->a[iPdpt];
979 if (pPdptTemp->a[iPdpt].n.u1Present)
980 {
981 PCX86PDPAE pPD;
982 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
983 AssertRCReturn(rc, ZeroPde);
984
985 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
986 return pPD->a[iPD];
987 }
988 }
989
990 return ZeroPde;
991}
992
993
994/**
995 * Gets the page directory entry for the specified address.
996 *
997 * @returns The page directory entry in question.
998 * @returns A non-present entry if the page directory is not present or on an invalid page.
999 * @param pPGM Pointer to the PGM instance data.
1000 * @param GCPtr The address.
1001 */
1002DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PPGMCPU pPGM, RTGCPTR64 GCPtr)
1003{
1004 X86PDEPAE ZeroPde = {0};
1005 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1006 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1007 if (pGuestPml4->a[iPml4].n.u1Present)
1008 {
1009 PCX86PDPT pPdptTemp;
1010 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1011 AssertRCReturn(rc, ZeroPde);
1012
1013 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1014 if (pPdptTemp->a[iPdpt].n.u1Present)
1015 {
1016 PCX86PDPAE pPD;
1017 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1018 AssertRCReturn(rc, ZeroPde);
1019
1020 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1021 return pPD->a[iPD];
1022 }
1023 }
1024 return ZeroPde;
1025}
1026
1027
1028/**
1029 * Gets the page directory entry for the specified address.
1030 *
1031 * @returns Pointer to the page directory entry in question.
1032 * @returns NULL if the page directory is not present or on an invalid page.
1033 * @param pPGM Pointer to the PGM instance data.
1034 * @param GCPtr The address.
1035 */
1036DECLINLINE(PX86PDEPAE) pgmGstGetLongModePDEPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr)
1037{
1038 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1039 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1040 if (pGuestPml4->a[iPml4].n.u1Present)
1041 {
1042 PCX86PDPT pPdptTemp;
1043 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1044 AssertRCReturn(rc, NULL);
1045
1046 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1047 if (pPdptTemp->a[iPdpt].n.u1Present)
1048 {
1049 PX86PDPAE pPD;
1050 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1051 AssertRCReturn(rc, NULL);
1052
1053 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1054 return &pPD->a[iPD];
1055 }
1056 }
1057 return NULL;
1058}
1059
1060
1061/**
1062 * Gets the GUEST page directory pointer for the specified address.
1063 *
1064 * @returns The page directory in question.
1065 * @returns NULL if the page directory is not present or on an invalid page.
1066 * @param pPGM Pointer to the PGM instance data.
1067 * @param GCPtr The address.
1068 * @param ppPml4e Page Map Level-4 Entry (out)
1069 * @param pPdpe Page directory pointer table entry (out)
1070 * @param piPD Receives the index into the returned page directory
1071 */
1072DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
1073{
1074 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1075 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1076 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1077 if (pPml4e->n.u1Present)
1078 {
1079 PCX86PDPT pPdptTemp;
1080 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1081 AssertRCReturn(rc, NULL);
1082
1083 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1084 *pPdpe = pPdptTemp->a[iPdpt];
1085 if (pPdptTemp->a[iPdpt].n.u1Present)
1086 {
1087 PX86PDPAE pPD;
1088 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1089 AssertRCReturn(rc, NULL);
1090
1091 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1092 return pPD;
1093 }
1094 }
1095 return 0;
1096}
1097
1098#endif /* !IN_RC */
1099
1100/**
1101 * Gets the shadow page directory, 32-bit.
1102 *
1103 * @returns Pointer to the shadow 32-bit PD.
1104 * @param pPGM Pointer to the PGM instance data.
1105 */
1106DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGMCPU pPGM)
1107{
1108 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1109}
1110
1111
1112/**
1113 * Gets the shadow page directory entry for the specified address, 32-bit.
1114 *
1115 * @returns Shadow 32-bit PDE.
1116 * @param pPGM Pointer to the PGM instance data.
1117 * @param GCPtr The address.
1118 */
1119DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1120{
1121 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1122
1123 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM);
1124 if (!pShwPde)
1125 {
1126 X86PDE ZeroPde = {0};
1127 return ZeroPde;
1128 }
1129 return pShwPde->a[iPd];
1130}
1131
1132
1133/**
1134 * Gets the pointer to the shadow page directory entry for the specified
1135 * address, 32-bit.
1136 *
1137 * @returns Pointer to the shadow 32-bit PDE.
1138 * @param pPGM Pointer to the PGM instance data.
1139 * @param GCPtr The address.
1140 */
1141DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1142{
1143 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1144
1145 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM);
1146 AssertReturn(pPde, NULL);
1147 return &pPde->a[iPd];
1148}
1149
1150
1151/**
1152 * Gets the shadow page pointer table, PAE.
1153 *
1154 * @returns Pointer to the shadow PAE PDPT.
1155 * @param pPGM Pointer to the PGM instance data.
1156 */
1157DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGMCPU pPGM)
1158{
1159 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1160}
1161
1162
1163/**
1164 * Gets the shadow page directory for the specified address, PAE.
1165 *
1166 * @returns Pointer to the shadow PD.
1167 * @param pPGM Pointer to the PGM instance data.
1168 * @param GCPtr The address.
1169 */
1170DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1171{
1172 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1173 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1174
1175 if (!pPdpt->a[iPdpt].n.u1Present)
1176 return NULL;
1177
1178 /* Fetch the pgm pool shadow descriptor. */
1179 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1180 AssertReturn(pShwPde, NULL);
1181
1182 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1183}
1184
1185
1186/**
1187 * Gets the shadow page directory for the specified address, PAE.
1188 *
1189 * @returns Pointer to the shadow PD.
1190 * @param pPGM Pointer to the PGM instance data.
1191 * @param GCPtr The address.
1192 */
1193DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGMCPU pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
1194{
1195 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1196
1197 if (!pPdpt->a[iPdpt].n.u1Present)
1198 return NULL;
1199
1200 /* Fetch the pgm pool shadow descriptor. */
1201 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1202 AssertReturn(pShwPde, NULL);
1203
1204 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pShwPde);
1205}
1206
1207
1208/**
1209 * Gets the shadow page directory entry, PAE.
1210 *
1211 * @returns PDE.
1212 * @param pPGM Pointer to the PGM instance data.
1213 * @param GCPtr The address.
1214 */
1215DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGMCPU pPGM, RTGCPTR GCPtr)
1216{
1217 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1218
1219 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1220 if (!pShwPde)
1221 {
1222 X86PDEPAE ZeroPde = {0};
1223 return ZeroPde;
1224 }
1225 return pShwPde->a[iPd];
1226}
1227
1228
1229/**
1230 * Gets the pointer to the shadow page directory entry for an address, PAE.
1231 *
1232 * @returns Pointer to the PDE.
1233 * @param pPGM Pointer to the PGM instance data.
1234 * @param GCPtr The address.
1235 */
1236DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGMCPU pPGM, RTGCPTR GCPtr)
1237{
1238 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1239
1240 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr);
1241 AssertReturn(pPde, NULL);
1242 return &pPde->a[iPd];
1243}
1244
1245#ifndef IN_RC
1246
1247/**
1248 * Gets the shadow page map level-4 pointer.
1249 *
1250 * @returns Pointer to the shadow PML4.
1251 * @param pPGM Pointer to the PGM instance data.
1252 */
1253DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGMCPU pPGM)
1254{
1255 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1256}
1257
1258
1259/**
1260 * Gets the shadow page map level-4 entry for the specified address.
1261 *
1262 * @returns The entry.
1263 * @param pPGM Pointer to the PGM instance data.
1264 * @param GCPtr The address.
1265 */
1266DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PPGMCPU pPGM, RTGCPTR GCPtr)
1267{
1268 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1269 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1270
1271 if (!pShwPml4)
1272 {
1273 X86PML4E ZeroPml4e = {0};
1274 return ZeroPml4e;
1275 }
1276 return pShwPml4->a[iPml4];
1277}
1278
1279
1280/**
1281 * Gets the pointer to the specified shadow page map level-4 entry.
1282 *
1283 * @returns The entry.
1284 * @param pPGM Pointer to the PGM instance data.
1285 * @param iPml4 The PML4 index.
1286 */
1287DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGMCPU pPGM, unsigned int iPml4)
1288{
1289 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM);
1290 if (!pShwPml4)
1291 return NULL;
1292 return &pShwPml4->a[iPml4];
1293}
1294
1295
1296/**
1297 * Gets the GUEST page directory pointer for the specified address.
1298 *
1299 * @returns The page directory in question.
1300 * @returns NULL if the page directory is not present or on an invalid page.
1301 * @param pPGM Pointer to the PGM instance data.
1302 * @param GCPtr The address.
1303 * @param piPD Receives the index into the returned page directory
1304 */
1305DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PPGMCPU pPGM, RTGCPTR64 GCPtr, unsigned *piPD)
1306{
1307 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pPGM);
1308 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1309 if (pGuestPml4->a[iPml4].n.u1Present)
1310 {
1311 PCX86PDPT pPdptTemp;
1312 int rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
1313 AssertRCReturn(rc, NULL);
1314
1315 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1316 if (pPdptTemp->a[iPdpt].n.u1Present)
1317 {
1318 PX86PDPAE pPD;
1319 rc = PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1320 AssertRCReturn(rc, NULL);
1321
1322 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1323 return pPD;
1324 }
1325 }
1326 return NULL;
1327}
1328
1329#endif /* !IN_RC */
1330
1331/**
1332 * Gets the page state for a physical handler.
1333 *
1334 * @returns The physical handler page state.
1335 * @param pCur The physical handler in question.
1336 */
1337DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1338{
1339 switch (pCur->enmType)
1340 {
1341 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1342 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1343
1344 case PGMPHYSHANDLERTYPE_MMIO:
1345 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1346 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1347
1348 default:
1349 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1350 }
1351}
1352
1353
1354/**
1355 * Gets the page state for a virtual handler.
1356 *
1357 * @returns The virtual handler page state.
1358 * @param pCur The virtual handler in question.
1359 * @remarks This should never be used on a hypervisor access handler.
1360 */
1361DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1362{
1363 switch (pCur->enmType)
1364 {
1365 case PGMVIRTHANDLERTYPE_WRITE:
1366 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1367 case PGMVIRTHANDLERTYPE_ALL:
1368 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1369 default:
1370 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1371 }
1372}
1373
1374
1375/**
1376 * Clears one physical page of a virtual handler
1377 *
1378 * @param pPGM Pointer to the PGM instance.
1379 * @param pCur Virtual handler structure
1380 * @param iPage Physical page index
1381 *
1382 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1383 * need to care about other handlers in the same page.
1384 */
1385DECLINLINE(void) pgmHandlerVirtualClearPage(PPGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)
1386{
1387 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1388
1389 /*
1390 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1391 */
1392#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1393 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1394 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1395 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1396#endif
1397 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1398 {
1399 /* We're the head of the alias chain. */
1400 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1401#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1402 AssertReleaseMsg(pRemove != NULL,
1403 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1404 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1405 AssertReleaseMsg(pRemove == pPhys2Virt,
1406 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1407 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1408 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1409 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1410#endif
1411 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1412 {
1413 /* Insert the next list in the alias chain into the tree. */
1414 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1415#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1416 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1417 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1418 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1419#endif
1420 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1421 bool fRc = RTAvlroGCPhysInsert(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1422 AssertRelease(fRc);
1423 }
1424 }
1425 else
1426 {
1427 /* Locate the previous node in the alias chain. */
1428 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pPGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1429#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1430 AssertReleaseMsg(pPrev != pPhys2Virt,
1431 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1432 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1433#endif
1434 for (;;)
1435 {
1436 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1437 if (pNext == pPhys2Virt)
1438 {
1439 /* unlink. */
1440 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1441 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1442 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1443 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1444 else
1445 {
1446 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1447 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1448 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1449 }
1450 break;
1451 }
1452
1453 /* next */
1454 if (pNext == pPrev)
1455 {
1456#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1457 AssertReleaseMsg(pNext != pPrev,
1458 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1459 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1460#endif
1461 break;
1462 }
1463 pPrev = pNext;
1464 }
1465 }
1466 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1467 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1468 pPhys2Virt->offNextAlias = 0;
1469 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1470
1471 /*
1472 * Clear the ram flags for this page.
1473 */
1474 PPGMPAGE pPage = pgmPhysGetPage(pPGM, pPhys2Virt->Core.Key);
1475 AssertReturnVoid(pPage);
1476 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1477}
1478
1479
1480/**
1481 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1482 *
1483 * @returns Pointer to the shadow page structure.
1484 * @param pPool The pool.
1485 * @param idx The pool page index.
1486 */
1487DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1488{
1489 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1490 return &pPool->aPages[idx];
1491}
1492
1493
1494/**
1495 * Clear references to guest physical memory.
1496 *
1497 * @param pPool The pool.
1498 * @param pPoolPage The pool page.
1499 * @param pPhysPage The physical guest page tracking structure.
1500 */
1501DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage)
1502{
1503 /*
1504 * Just deal with the simple case here.
1505 */
1506# ifdef LOG_ENABLED
1507 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1508# endif
1509 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1510 if (cRefs == 1)
1511 {
1512 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1513 PGM_PAGE_SET_TRACKING(pPhysPage, 0);
1514 }
1515 else
1516 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage);
1517 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1518}
1519
1520
1521/**
1522 * Moves the page to the head of the age list.
1523 *
1524 * This is done when the cached page is used in one way or another.
1525 *
1526 * @param pPool The pool.
1527 * @param pPage The cached page.
1528 */
1529DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1530{
1531 PVM pVM = pPool->CTX_SUFF(pVM);
1532 pgmLock(pVM);
1533
1534 /*
1535 * Move to the head of the age list.
1536 */
1537 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1538 {
1539 /* unlink */
1540 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1541 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1542 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1543 else
1544 pPool->iAgeTail = pPage->iAgePrev;
1545
1546 /* insert at head */
1547 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1548 pPage->iAgeNext = pPool->iAgeHead;
1549 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1550 pPool->iAgeHead = pPage->idx;
1551 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1552 }
1553 pgmUnlock(pVM);
1554}
1555
1556/**
1557 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1558 *
1559 * @param pVM VM Handle.
1560 * @param pPage PGM pool page
1561 */
1562DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1563{
1564 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1565 ASMAtomicIncU32(&pPage->cLocked);
1566}
1567
1568
1569/**
1570 * Unlocks a page to allow flushing again
1571 *
1572 * @param pVM VM Handle.
1573 * @param pPage PGM pool page
1574 */
1575DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1576{
1577 Assert(PGMIsLockOwner(pPool->CTX_SUFF(pVM)));
1578 Assert(pPage->cLocked);
1579 ASMAtomicDecU32(&pPage->cLocked);
1580}
1581
1582
1583/**
1584 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1585 *
1586 * @returns VBox status code.
1587 * @param pPage PGM pool page
1588 */
1589DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)
1590{
1591 if (pPage->cLocked)
1592 {
1593 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1594 if (pPage->cModifications)
1595 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1596 return true;
1597 }
1598 return false;
1599}
1600
1601
1602/**
1603 * Tells if mappings are to be put into the shadow page table or not.
1604 *
1605 * @returns boolean result
1606 * @param pVM VM handle.
1607 */
1608DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PPGM pPGM)
1609{
1610#ifdef PGM_WITHOUT_MAPPINGS
1611 /* There are no mappings in VT-x and AMD-V mode. */
1612 Assert(pPGM->fMappingsDisabled);
1613 return false;
1614#else
1615 return !pPGM->fMappingsDisabled;
1616#endif
1617}
1618
1619
1620/**
1621 * Checks if the mappings are floating and enabled.
1622 *
1623 * @returns true / false.
1624 * @param pVM The VM handle.
1625 */
1626DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PPGM pPGM)
1627{
1628#ifdef PGM_WITHOUT_MAPPINGS
1629 /* There are no mappings in VT-x and AMD-V mode. */
1630 Assert(pPGM->fMappingsDisabled);
1631 return false;
1632#else
1633 return !pPGM->fMappingsDisabled
1634 && !pPGM->fMappingsFixed;
1635#endif
1636}
1637
1638/** @} */
1639
1640#endif
1641
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette