VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 78033

Last change on this file since 78033 was 77240, checked in by vboxsync, 6 years ago

PGMPool: Two optimizations to the dirty page code (PGMPOOL_WITH_OPTIMIZED_DIRTY_PT): Inline the first part of pgmPoolIsDirtyPage so we don't waste time on a full fledged call for nested paging. Split the PGMPOOL::aDirtyPages structure into index and page data so we can scan the indexes without requiring a cache line load for each entry. Also eliminated some double uIdx reads and checks.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 52.4 KB
Line 
1/* $Id: PGMInline.h 77240 2019-02-10 16:34:51Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
19#define VMM_INCLUDED_SRC_include_PGMInline_h
20#ifndef RT_WITHOUT_PRAGMA_ONCE
21# pragma once
22#endif
23
24#include <VBox/cdefs.h>
25#include <VBox/types.h>
26#include <VBox/err.h>
27#include <VBox/vmm/stam.h>
28#include <VBox/param.h>
29#include <VBox/vmm/vmm.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/pdmcritsect.h>
32#include <VBox/vmm/pdmapi.h>
33#include <VBox/dis.h>
34#include <VBox/vmm/dbgf.h>
35#include <VBox/log.h>
36#include <VBox/vmm/gmm.h>
37#include <VBox/vmm/hm.h>
38#ifndef IN_RC
39# include <VBox/vmm/nem.h>
40#endif
41#include <iprt/asm.h>
42#include <iprt/assert.h>
43#include <iprt/avl.h>
44#include <iprt/critsect.h>
45#include <iprt/sha.h>
46
47
48
49/** @addtogroup grp_pgm_int Internals
50 * @internal
51 * @{
52 */
53
54/**
55 * Gets the PGMRAMRANGE structure for a guest page.
56 *
57 * @returns Pointer to the RAM range on success.
58 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
59 *
60 * @param pVM The cross context VM structure.
61 * @param GCPhys The GC physical address.
62 */
63DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVM pVM, RTGCPHYS GCPhys)
64{
65 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
66 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
67 return pgmPhysGetRangeSlow(pVM, GCPhys);
68 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
69 return pRam;
70}
71
72
73/**
74 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
75 * range above it.
76 *
77 * @returns Pointer to the RAM range on success.
78 * @returns NULL if the address is located after the last range.
79 *
80 * @param pVM The cross context VM structure.
81 * @param GCPhys The GC physical address.
82 */
83DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVM pVM, RTGCPHYS GCPhys)
84{
85 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
86 if ( !pRam
87 || (GCPhys - pRam->GCPhys) >= pRam->cb)
88 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
89 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
90 return pRam;
91}
92
93
94/**
95 * Gets the PGMPAGE structure for a guest page.
96 *
97 * @returns Pointer to the page on success.
98 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
99 *
100 * @param pVM The cross context VM structure.
101 * @param GCPhys The GC physical address.
102 */
103DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVM pVM, RTGCPHYS GCPhys)
104{
105 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
106 RTGCPHYS off;
107 if ( !pRam
108 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
109 return pgmPhysGetPageSlow(pVM, GCPhys);
110 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
111 return &pRam->aPages[off >> PAGE_SHIFT];
112}
113
114
115/**
116 * Gets the PGMPAGE structure for a guest page.
117 *
118 * Old Phys code: Will make sure the page is present.
119 *
120 * @returns VBox status code.
121 * @retval VINF_SUCCESS and a valid *ppPage on success.
122 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
123 *
124 * @param pVM The cross context VM structure.
125 * @param GCPhys The GC physical address.
126 * @param ppPage Where to store the page pointer on success.
127 */
128DECLINLINE(int) pgmPhysGetPageEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
129{
130 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
131 RTGCPHYS off;
132 if ( !pRam
133 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
134 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
135 *ppPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
136 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
137 return VINF_SUCCESS;
138}
139
140
141/**
142 * Gets the PGMPAGE structure for a guest page.
143 *
144 * Old Phys code: Will make sure the page is present.
145 *
146 * @returns VBox status code.
147 * @retval VINF_SUCCESS and a valid *ppPage on success.
148 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
149 *
150 * @param pVM The cross context VM structure.
151 * @param GCPhys The GC physical address.
152 * @param ppPage Where to store the page pointer on success.
153 * @param ppRamHint Where to read and store the ram list hint.
154 * The caller initializes this to NULL before the call.
155 */
156DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
157{
158 RTGCPHYS off;
159 PPGMRAMRANGE pRam = *ppRamHint;
160 if ( !pRam
161 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
162 {
163 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
164 if ( !pRam
165 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
166 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
167
168 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
169 *ppRamHint = pRam;
170 }
171 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
172 return VINF_SUCCESS;
173}
174
175
176/**
177 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
178 *
179 * @returns Pointer to the page on success.
180 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
181 *
182 * @param pVM The cross context VM structure.
183 * @param GCPhys The GC physical address.
184 * @param ppPage Where to store the pointer to the PGMPAGE structure.
185 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
186 */
187DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
188{
189 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
190 RTGCPHYS off;
191 if ( !pRam
192 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
193 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
194
195 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
196 *ppRam = pRam;
197 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
198 return VINF_SUCCESS;
199}
200
201
202/**
203 * Convert GC Phys to HC Phys.
204 *
205 * @returns VBox status code.
206 * @param pVM The cross context VM structure.
207 * @param GCPhys The GC physical address.
208 * @param pHCPhys Where to store the corresponding HC physical address.
209 *
210 * @deprecated Doesn't deal with zero, shared or write monitored pages.
211 * Avoid when writing new code!
212 */
213DECLINLINE(int) pgmRamGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
214{
215 PPGMPAGE pPage;
216 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
217 if (RT_FAILURE(rc))
218 return rc;
219 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
220 return VINF_SUCCESS;
221}
222
223#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
224
225/**
226 * Inlined version of the ring-0 version of the host page mapping code
227 * that optimizes access to pages already in the set.
228 *
229 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
230 * @param pVCpu The cross context virtual CPU structure.
231 * @param HCPhys The physical address of the page.
232 * @param ppv Where to store the mapping address.
233 * @param SRC_POS The source location of the caller.
234 */
235DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
236{
237 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
238
239 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
240 Assert(!(HCPhys & PAGE_OFFSET_MASK));
241 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
242
243 unsigned iHash = PGMMAPSET_HASH(HCPhys);
244 unsigned iEntry = pSet->aiHashTable[iHash];
245 if ( iEntry < pSet->cEntries
246 && pSet->aEntries[iEntry].HCPhys == HCPhys
247 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
248 {
249 pSet->aEntries[iEntry].cInlinedRefs++;
250 *ppv = pSet->aEntries[iEntry].pvPage;
251 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
252 }
253 else
254 {
255 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
256 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
257 }
258
259 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
260 return VINF_SUCCESS;
261}
262
263
264/**
265 * Inlined version of the guest page mapping code that optimizes access to pages
266 * already in the set.
267 *
268 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
269 * @param pVM The cross context VM structure.
270 * @param pVCpu The cross context virtual CPU structure.
271 * @param GCPhys The guest physical address of the page.
272 * @param ppv Where to store the mapping address.
273 * @param SRC_POS The source location of the caller.
274 */
275DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
276{
277 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
278 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
279
280 /*
281 * Get the ram range.
282 */
283 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
284 RTGCPHYS off;
285 if ( !pRam
286 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
287 /** @todo || page state stuff */
288 )
289 {
290 /* This case is not counted into StatRZDynMapGCPageInl. */
291 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
292 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
293 }
294
295 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
296 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
297
298 /*
299 * pgmRZDynMapHCPageInlined with out stats.
300 */
301 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
302 Assert(!(HCPhys & PAGE_OFFSET_MASK));
303 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
304
305 unsigned iHash = PGMMAPSET_HASH(HCPhys);
306 unsigned iEntry = pSet->aiHashTable[iHash];
307 if ( iEntry < pSet->cEntries
308 && pSet->aEntries[iEntry].HCPhys == HCPhys
309 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
310 {
311 pSet->aEntries[iEntry].cInlinedRefs++;
312 *ppv = pSet->aEntries[iEntry].pvPage;
313 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
314 }
315 else
316 {
317 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
318 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
319 }
320
321 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Inlined version of the ring-0 version of guest page mapping that optimizes
328 * access to pages already in the set.
329 *
330 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
331 * @param pVCpu The cross context virtual CPU structure.
332 * @param GCPhys The guest physical address of the page.
333 * @param ppv Where to store the mapping address.
334 * @param SRC_POS The source location of the caller.
335 */
336DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
337{
338 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
339}
340
341
342/**
343 * Inlined version of the ring-0 version of the guest byte mapping code
344 * that optimizes access to pages already in the set.
345 *
346 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
347 * @param pVCpu The cross context virtual CPU structure.
348 * @param GCPhys The guest physical address of the page.
349 * @param ppv Where to store the mapping address. The offset is
350 * preserved.
351 * @param SRC_POS The source location of the caller.
352 */
353DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
354{
355 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
356
357 /*
358 * Get the ram range.
359 */
360 PVM pVM = pVCpu->CTX_SUFF(pVM);
361 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
362 RTGCPHYS off;
363 if ( !pRam
364 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
365 /** @todo || page state stuff */
366 )
367 {
368 /* This case is not counted into StatRZDynMapGCPageInl. */
369 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
370 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
371 }
372
373 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
374 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
375
376 /*
377 * pgmRZDynMapHCPageInlined with out stats.
378 */
379 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
380 Assert(!(HCPhys & PAGE_OFFSET_MASK));
381 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
382
383 unsigned iHash = PGMMAPSET_HASH(HCPhys);
384 unsigned iEntry = pSet->aiHashTable[iHash];
385 if ( iEntry < pSet->cEntries
386 && pSet->aEntries[iEntry].HCPhys == HCPhys
387 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
388 {
389 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
390 pSet->aEntries[iEntry].cInlinedRefs++;
391 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
392 }
393 else
394 {
395 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
396 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
397 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
398 }
399
400 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
401 return VINF_SUCCESS;
402}
403
404#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
405#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
406
407/**
408 * Maps the page into current context (RC and maybe R0).
409 *
410 * @returns pointer to the mapping.
411 * @param pVM The cross context VM structure.
412 * @param pPage The page.
413 * @param SRC_POS The source location of the caller.
414 */
415DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
416{
417 if (pPage->idx >= PGMPOOL_IDX_FIRST)
418 {
419 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
420 void *pv;
421 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
422 return pv;
423 }
424 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
425}
426
427
428/**
429 * Maps the page into current context (RC and maybe R0).
430 *
431 * @returns pointer to the mapping.
432 * @param pVM The cross context VM structure.
433 * @param pVCpu The cross context virtual CPU structure.
434 * @param pPage The page.
435 * @param SRC_POS The source location of the caller.
436 */
437DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
438{
439 if (pPage->idx >= PGMPOOL_IDX_FIRST)
440 {
441 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
442 void *pv;
443 Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);
444 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
445 return pv;
446 }
447 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
448}
449
450#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
451#ifndef IN_RC
452
453/**
454 * Queries the Physical TLB entry for a physical guest page,
455 * attempting to load the TLB entry if necessary.
456 *
457 * @returns VBox status code.
458 * @retval VINF_SUCCESS on success
459 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
460 *
461 * @param pVM The cross context VM structure.
462 * @param GCPhys The address of the guest page.
463 * @param ppTlbe Where to store the pointer to the TLB entry.
464 */
465DECLINLINE(int) pgmPhysPageQueryTlbe(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
466{
467 int rc;
468 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
469 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
470 {
471 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
472 rc = VINF_SUCCESS;
473 }
474 else
475 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
476 *ppTlbe = pTlbe;
477 return rc;
478}
479
480
481/**
482 * Queries the Physical TLB entry for a physical guest page,
483 * attempting to load the TLB entry if necessary.
484 *
485 * @returns VBox status code.
486 * @retval VINF_SUCCESS on success
487 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
488 *
489 * @param pVM The cross context VM structure.
490 * @param pPage Pointer to the PGMPAGE structure corresponding to
491 * GCPhys.
492 * @param GCPhys The address of the guest page.
493 * @param ppTlbe Where to store the pointer to the TLB entry.
494 */
495DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
496{
497 int rc;
498 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
499 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
500 {
501 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
502 rc = VINF_SUCCESS;
503# if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
504# ifdef IN_RING3
505 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)
506# else
507 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)
508# endif
509 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);
510# endif
511 AssertPtr(pTlbe->pv);
512# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
513 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
514# endif
515 }
516 else
517 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
518 *ppTlbe = pTlbe;
519 return rc;
520}
521
522
523/**
524 * Calculates NEM page protection flags.
525 */
526DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
527{
528 /*
529 * Deal with potentially writable pages first.
530 */
531 if (PGMPAGETYPE_IS_RWX(enmType))
532 {
533 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
534 {
535 if (PGM_PAGE_IS_ALLOCATED(pPage))
536 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
537 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
538 }
539 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
540 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
541 }
542 /*
543 * Potentially readable & executable pages.
544 */
545 else if ( PGMPAGETYPE_IS_ROX(enmType)
546 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
547 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
548
549 /*
550 * The rest is needs special access handling.
551 */
552 return NEM_PAGE_PROT_NONE;
553}
554
555#endif /* !IN_RC */
556
557/**
558 * Enables write monitoring for an allocated page.
559 *
560 * The caller is responsible for updating the shadow page tables.
561 *
562 * @param pVM The cross context VM structure.
563 * @param pPage The page to write monitor.
564 * @param GCPhysPage The address of the page.
565 */
566DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
567{
568 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
569 PGM_LOCK_ASSERT_OWNER(pVM);
570
571 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
572 pVM->pgm.s.cMonitoredPages++;
573
574 /* Large pages must disabled. */
575 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
576 {
577 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
578 AssertFatal(pFirstPage);
579 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
580 {
581 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
582 pVM->pgm.s.cLargePagesDisabled++;
583 }
584 else
585 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
586 }
587
588#ifndef IN_RC
589 /* Tell NEM. */
590 if (VM_IS_NEM_ENABLED(pVM))
591 {
592 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
593 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
594 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
595 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
596 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
597 }
598#endif
599}
600
601
602/**
603 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
604 *
605 * Only used when the guest is in PAE or long mode. This is inlined so that we
606 * can perform consistency checks in debug builds.
607 *
608 * @returns true if it is, false if it isn't.
609 * @param pVCpu The cross context virtual CPU structure.
610 */
611DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
612{
613 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
614 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
615 return pVCpu->pgm.s.fNoExecuteEnabled;
616}
617
618
619/**
620 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
621 *
622 * Only used when the guest is in paged 32-bit mode. This is inlined so that
623 * we can perform consistency checks in debug builds.
624 *
625 * @returns true if it is, false if it isn't.
626 * @param pVCpu The cross context virtual CPU structure.
627 */
628DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
629{
630 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
631 Assert(!CPUMIsGuestInPAEMode(pVCpu));
632 Assert(!CPUMIsGuestInLongMode(pVCpu));
633 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
634}
635
636
637/**
638 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
639 * Takes PSE-36 into account.
640 *
641 * @returns guest physical address
642 * @param pVM The cross context VM structure.
643 * @param Pde Guest Pde
644 */
645DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVM pVM, X86PDE Pde)
646{
647 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
648 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
649
650 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
651}
652
653
654/**
655 * Gets the address the guest page directory (32-bit paging).
656 *
657 * @returns VBox status code.
658 * @param pVCpu The cross context virtual CPU structure.
659 * @param ppPd Where to return the mapping. This is always set.
660 */
661DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
662{
663#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
664 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
665 if (RT_FAILURE(rc))
666 {
667 *ppPd = NULL;
668 return rc;
669 }
670#else
671 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
672 if (RT_UNLIKELY(!*ppPd))
673 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
674#endif
675 return VINF_SUCCESS;
676}
677
678
679/**
680 * Gets the address the guest page directory (32-bit paging).
681 *
682 * @returns Pointer to the page directory entry in question.
683 * @param pVCpu The cross context virtual CPU structure.
684 */
685DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
686{
687#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
688 PX86PD pGuestPD = NULL;
689 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
690 if (RT_FAILURE(rc))
691 {
692 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
693 return NULL;
694 }
695#else
696 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
697 if (RT_UNLIKELY(!pGuestPD))
698 {
699 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
700 if (RT_FAILURE(rc))
701 return NULL;
702 }
703#endif
704 return pGuestPD;
705}
706
707
708/**
709 * Gets the guest page directory pointer table.
710 *
711 * @returns VBox status code.
712 * @param pVCpu The cross context virtual CPU structure.
713 * @param ppPdpt Where to return the mapping. This is always set.
714 */
715DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
716{
717#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
718 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
719 if (RT_FAILURE(rc))
720 {
721 *ppPdpt = NULL;
722 return rc;
723 }
724#else
725 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
726 if (RT_UNLIKELY(!*ppPdpt))
727 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
728#endif
729 return VINF_SUCCESS;
730}
731
732
733/**
734 * Gets the guest page directory pointer table.
735 *
736 * @returns Pointer to the page directory in question.
737 * @returns NULL if the page directory is not present or on an invalid page.
738 * @param pVCpu The cross context virtual CPU structure.
739 */
740DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
741{
742 PX86PDPT pGuestPdpt;
743 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
744 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
745 return pGuestPdpt;
746}
747
748
749/**
750 * Gets the guest page directory pointer table entry for the specified address.
751 *
752 * @returns Pointer to the page directory in question.
753 * @returns NULL if the page directory is not present or on an invalid page.
754 * @param pVCpu The cross context virtual CPU structure.
755 * @param GCPtr The address.
756 */
757DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
758{
759 AssertGCPtr32(GCPtr);
760
761#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
762 PX86PDPT pGuestPDPT = NULL;
763 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
764 AssertRCReturn(rc, NULL);
765#else
766 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
767 if (RT_UNLIKELY(!pGuestPDPT))
768 {
769 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
770 if (RT_FAILURE(rc))
771 return NULL;
772 }
773#endif
774 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
775}
776
777
778/**
779 * Gets the page directory entry for the specified address.
780 *
781 * @returns The page directory entry in question.
782 * @returns A non-present entry if the page directory is not present or on an invalid page.
783 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
784 * @param GCPtr The address.
785 */
786DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
787{
788 AssertGCPtr32(GCPtr);
789 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
790 if (RT_LIKELY(pGuestPDPT))
791 {
792 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
793 if ( pGuestPDPT->a[iPdpt].n.u1Present
794 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
795 {
796 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
797#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
798 PX86PDPAE pGuestPD = NULL;
799 int rc = pgmRZDynMapGCPageInlined(pVCpu,
800 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
801 (void **)&pGuestPD
802 RTLOG_COMMA_SRC_POS);
803 if (RT_SUCCESS(rc))
804 return pGuestPD->a[iPD];
805 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
806#else
807 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
808 if ( !pGuestPD
809 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
810 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
811 if (pGuestPD)
812 return pGuestPD->a[iPD];
813#endif
814 }
815 }
816
817 X86PDEPAE ZeroPde = {0};
818 return ZeroPde;
819}
820
821
822/**
823 * Gets the page directory pointer table entry for the specified address
824 * and returns the index into the page directory
825 *
826 * @returns Pointer to the page directory in question.
827 * @returns NULL if the page directory is not present or on an invalid page.
828 * @param pVCpu The cross context virtual CPU structure.
829 * @param GCPtr The address.
830 * @param piPD Receives the index into the returned page directory
831 * @param pPdpe Receives the page directory pointer entry. Optional.
832 */
833DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
834{
835 AssertGCPtr32(GCPtr);
836
837 /* The PDPE. */
838 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
839 if (RT_UNLIKELY(!pGuestPDPT))
840 return NULL;
841 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
842 if (pPdpe)
843 *pPdpe = pGuestPDPT->a[iPdpt];
844 if (!pGuestPDPT->a[iPdpt].n.u1Present)
845 return NULL;
846 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
847 return NULL;
848
849 /* The PDE. */
850#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
851 PX86PDPAE pGuestPD = NULL;
852 int rc = pgmRZDynMapGCPageInlined(pVCpu,
853 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
854 (void **)&pGuestPD
855 RTLOG_COMMA_SRC_POS);
856 if (RT_FAILURE(rc))
857 {
858 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
859 return NULL;
860 }
861#else
862 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
863 if ( !pGuestPD
864 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
865 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
866#endif
867
868 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
869 return pGuestPD;
870}
871
872#ifndef IN_RC
873
874/**
875 * Gets the page map level-4 pointer for the guest.
876 *
877 * @returns VBox status code.
878 * @param pVCpu The cross context virtual CPU structure.
879 * @param ppPml4 Where to return the mapping. Always set.
880 */
881DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
882{
883#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
884 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
885 if (RT_FAILURE(rc))
886 {
887 *ppPml4 = NULL;
888 return rc;
889 }
890#else
891 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
892 if (RT_UNLIKELY(!*ppPml4))
893 return pgmGstLazyMapPml4(pVCpu, ppPml4);
894#endif
895 return VINF_SUCCESS;
896}
897
898
899/**
900 * Gets the page map level-4 pointer for the guest.
901 *
902 * @returns Pointer to the PML4 page.
903 * @param pVCpu The cross context virtual CPU structure.
904 */
905DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
906{
907 PX86PML4 pGuestPml4;
908 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
909 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
910 return pGuestPml4;
911}
912
913
914/**
915 * Gets the pointer to a page map level-4 entry.
916 *
917 * @returns Pointer to the PML4 entry.
918 * @param pVCpu The cross context virtual CPU structure.
919 * @param iPml4 The index.
920 * @remarks Only used by AssertCR3.
921 */
922DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
923{
924#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
925 PX86PML4 pGuestPml4;
926 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
927 AssertRCReturn(rc, NULL);
928#else
929 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
930 if (RT_UNLIKELY(!pGuestPml4))
931 {
932 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
933 AssertRCReturn(rc, NULL);
934 }
935#endif
936 return &pGuestPml4->a[iPml4];
937}
938
939
940/**
941 * Gets the page directory entry for the specified address.
942 *
943 * @returns The page directory entry in question.
944 * @returns A non-present entry if the page directory is not present or on an invalid page.
945 * @param pVCpu The cross context virtual CPU structure.
946 * @param GCPtr The address.
947 */
948DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
949{
950 /*
951 * Note! To keep things simple, ASSUME invalid physical addresses will
952 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
953 * supporting 52-bit wide physical guest addresses.
954 */
955 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
956 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
957 if ( RT_LIKELY(pGuestPml4)
958 && pGuestPml4->a[iPml4].n.u1Present
959 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
960 {
961 PCX86PDPT pPdptTemp;
962 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
963 if (RT_SUCCESS(rc))
964 {
965 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
966 if ( pPdptTemp->a[iPdpt].n.u1Present
967 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
968 {
969 PCX86PDPAE pPD;
970 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
971 if (RT_SUCCESS(rc))
972 {
973 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
974 return pPD->a[iPD];
975 }
976 }
977 }
978 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
979 }
980
981 X86PDEPAE ZeroPde = {0};
982 return ZeroPde;
983}
984
985
986/**
987 * Gets the GUEST page directory pointer for the specified address.
988 *
989 * @returns The page directory in question.
990 * @returns NULL if the page directory is not present or on an invalid page.
991 * @param pVCpu The cross context virtual CPU structure.
992 * @param GCPtr The address.
993 * @param ppPml4e Page Map Level-4 Entry (out)
994 * @param pPdpe Page directory pointer table entry (out)
995 * @param piPD Receives the index into the returned page directory
996 */
997DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
998{
999 /* The PMLE4. */
1000 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
1001 if (RT_UNLIKELY(!pGuestPml4))
1002 return NULL;
1003 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1004 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
1005 if (!pPml4e->n.u1Present)
1006 return NULL;
1007 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
1008 return NULL;
1009
1010 /* The PDPE. */
1011 PCX86PDPT pPdptTemp;
1012 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
1013 if (RT_FAILURE(rc))
1014 {
1015 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1016 return NULL;
1017 }
1018 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1019 *pPdpe = pPdptTemp->a[iPdpt];
1020 if (!pPdpe->n.u1Present)
1021 return NULL;
1022 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
1023 return NULL;
1024
1025 /* The PDE. */
1026 PX86PDPAE pPD;
1027 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
1028 if (RT_FAILURE(rc))
1029 {
1030 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
1031 return NULL;
1032 }
1033
1034 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1035 return pPD;
1036}
1037
1038#endif /* !IN_RC */
1039
1040/**
1041 * Gets the shadow page directory, 32-bit.
1042 *
1043 * @returns Pointer to the shadow 32-bit PD.
1044 * @param pVCpu The cross context virtual CPU structure.
1045 */
1046DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
1047{
1048 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1049}
1050
1051
1052/**
1053 * Gets the shadow page directory entry for the specified address, 32-bit.
1054 *
1055 * @returns Shadow 32-bit PDE.
1056 * @param pVCpu The cross context virtual CPU structure.
1057 * @param GCPtr The address.
1058 */
1059DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1060{
1061 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
1062 if (!pShwPde)
1063 {
1064 X86PDE ZeroPde = {0};
1065 return ZeroPde;
1066 }
1067 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1068}
1069
1070
1071/**
1072 * Gets the pointer to the shadow page directory entry for the specified
1073 * address, 32-bit.
1074 *
1075 * @returns Pointer to the shadow 32-bit PDE.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param GCPtr The address.
1078 */
1079DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1080{
1081 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1082 AssertReturn(pPde, NULL);
1083 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
1084}
1085
1086
1087/**
1088 * Gets the shadow page pointer table, PAE.
1089 *
1090 * @returns Pointer to the shadow PAE PDPT.
1091 * @param pVCpu The cross context virtual CPU structure.
1092 */
1093DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
1094{
1095 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1096}
1097
1098
1099/**
1100 * Gets the shadow page directory for the specified address, PAE.
1101 *
1102 * @returns Pointer to the shadow PD.
1103 * @param pVCpu The cross context virtual CPU structure.
1104 * @param GCPtr The address.
1105 */
1106DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1107{
1108 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1109 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1110
1111 if (!pPdpt->a[iPdpt].n.u1Present)
1112 return NULL;
1113
1114 /* Fetch the pgm pool shadow descriptor. */
1115 PVM pVM = pVCpu->CTX_SUFF(pVM);
1116 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1117 AssertReturn(pShwPde, NULL);
1118
1119 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1120}
1121
1122
1123/**
1124 * Gets the shadow page directory for the specified address, PAE.
1125 *
1126 * @returns Pointer to the shadow PD.
1127 * @param pVCpu The cross context virtual CPU structure.
1128 * @param pPdpt Pointer to the page directory pointer table.
1129 * @param GCPtr The address.
1130 */
1131DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1132{
1133 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
1134
1135 if (!pPdpt->a[iPdpt].n.u1Present)
1136 return NULL;
1137
1138 /* Fetch the pgm pool shadow descriptor. */
1139 PVM pVM = pVCpu->CTX_SUFF(pVM);
1140 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1141 AssertReturn(pShwPde, NULL);
1142
1143 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1144}
1145
1146
1147/**
1148 * Gets the shadow page directory entry, PAE.
1149 *
1150 * @returns PDE.
1151 * @param pVCpu The cross context virtual CPU structure.
1152 * @param GCPtr The address.
1153 */
1154DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1155{
1156 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1157
1158 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1159 if (!pShwPde)
1160 {
1161 X86PDEPAE ZeroPde = {0};
1162 return ZeroPde;
1163 }
1164 return pShwPde->a[iPd];
1165}
1166
1167
1168/**
1169 * Gets the pointer to the shadow page directory entry for an address, PAE.
1170 *
1171 * @returns Pointer to the PDE.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param GCPtr The address.
1174 * @remarks Only used by AssertCR3.
1175 */
1176DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1177{
1178 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1179
1180 PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1181 AssertReturn(pPde, NULL);
1182 return &pPde->a[iPd];
1183}
1184
1185#ifndef IN_RC
1186
1187/**
1188 * Gets the shadow page map level-4 pointer.
1189 *
1190 * @returns Pointer to the shadow PML4.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 */
1193DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
1194{
1195 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1196}
1197
1198
1199/**
1200 * Gets the shadow page map level-4 entry for the specified address.
1201 *
1202 * @returns The entry.
1203 * @param pVCpu The cross context virtual CPU structure.
1204 * @param GCPtr The address.
1205 */
1206DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
1207{
1208 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1209 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1210
1211 if (!pShwPml4)
1212 {
1213 X86PML4E ZeroPml4e = {0};
1214 return ZeroPml4e;
1215 }
1216 return pShwPml4->a[iPml4];
1217}
1218
1219
1220/**
1221 * Gets the pointer to the specified shadow page map level-4 entry.
1222 *
1223 * @returns The entry.
1224 * @param pVCpu The cross context virtual CPU structure.
1225 * @param iPml4 The PML4 index.
1226 */
1227DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
1228{
1229 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1230 if (!pShwPml4)
1231 return NULL;
1232 return &pShwPml4->a[iPml4];
1233}
1234
1235#endif /* !IN_RC */
1236
1237/**
1238 * Cached physical handler lookup.
1239 *
1240 * @returns Physical handler covering @a GCPhys.
1241 * @param pVM The cross context VM structure.
1242 * @param GCPhys The lookup address.
1243 */
1244DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
1245{
1246 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1247 if ( pHandler
1248 && GCPhys >= pHandler->Core.Key
1249 && GCPhys < pHandler->Core.KeyLast)
1250 {
1251 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1252 return pHandler;
1253 }
1254
1255 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1256 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1257 if (pHandler)
1258 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1259 return pHandler;
1260}
1261
1262
1263#ifdef VBOX_WITH_RAW_MODE
1264/**
1265 * Clears one physical page of a virtual handler.
1266 *
1267 * @param pVM The cross context VM structure.
1268 * @param pCur Virtual handler structure.
1269 * @param iPage Physical page index.
1270 *
1271 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1272 * need to care about other handlers in the same page.
1273 */
1274DECLINLINE(void) pgmHandlerVirtualClearPage(PVM pVM, PPGMVIRTHANDLER pCur, unsigned iPage)
1275{
1276 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1277
1278 /*
1279 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1280 */
1281# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1282 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1283 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1284 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1285# endif
1286 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1287 {
1288 /* We're the head of the alias chain. */
1289 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1290# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1291 AssertReleaseMsg(pRemove != NULL,
1292 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1293 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1294 AssertReleaseMsg(pRemove == pPhys2Virt,
1295 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1296 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1297 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1298 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1299# endif
1300 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1301 {
1302 /* Insert the next list in the alias chain into the tree. */
1303 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1304# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1305 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1306 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1307 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1308# endif
1309 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1310 bool fRc = RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1311 AssertRelease(fRc);
1312 }
1313 }
1314 else
1315 {
1316 /* Locate the previous node in the alias chain. */
1317 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1318# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1319 AssertReleaseMsg(pPrev != pPhys2Virt,
1320 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1321 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1322# endif
1323 for (;;)
1324 {
1325 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1326 if (pNext == pPhys2Virt)
1327 {
1328 /* unlink. */
1329 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1330 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1331 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1332 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1333 else
1334 {
1335 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1336 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1337 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1338 }
1339 break;
1340 }
1341
1342 /* next */
1343 if (pNext == pPrev)
1344 {
1345# ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1346 AssertReleaseMsg(pNext != pPrev,
1347 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1348 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1349# endif
1350 break;
1351 }
1352 pPrev = pNext;
1353 }
1354 }
1355 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1356 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1357 pPhys2Virt->offNextAlias = 0;
1358 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1359
1360 /*
1361 * Clear the ram flags for this page.
1362 */
1363 PPGMPAGE pPage = pgmPhysGetPage(pVM, pPhys2Virt->Core.Key);
1364 AssertReturnVoid(pPage);
1365 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1366}
1367#endif /* VBOX_WITH_RAW_MODE */
1368
1369
1370/**
1371 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1372 *
1373 * @returns Pointer to the shadow page structure.
1374 * @param pPool The pool.
1375 * @param idx The pool page index.
1376 */
1377DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1378{
1379 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1380 return &pPool->aPages[idx];
1381}
1382
1383
1384/**
1385 * Clear references to guest physical memory.
1386 *
1387 * @param pPool The pool.
1388 * @param pPoolPage The pool page.
1389 * @param pPhysPage The physical guest page tracking structure.
1390 * @param iPte Shadow PTE index
1391 */
1392DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1393{
1394 /*
1395 * Just deal with the simple case here.
1396 */
1397# ifdef VBOX_STRICT
1398 PVM pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1399# endif
1400# ifdef LOG_ENABLED
1401 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1402# endif
1403 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1404 if (cRefs == 1)
1405 {
1406 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1407 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1408 /* Invalidate the tracking data. */
1409 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1410 }
1411 else
1412 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1413 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1414}
1415
1416
1417/**
1418 * Moves the page to the head of the age list.
1419 *
1420 * This is done when the cached page is used in one way or another.
1421 *
1422 * @param pPool The pool.
1423 * @param pPage The cached page.
1424 */
1425DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1426{
1427 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1428
1429 /*
1430 * Move to the head of the age list.
1431 */
1432 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1433 {
1434 /* unlink */
1435 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1436 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1437 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1438 else
1439 pPool->iAgeTail = pPage->iAgePrev;
1440
1441 /* insert at head */
1442 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1443 pPage->iAgeNext = pPool->iAgeHead;
1444 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1445 pPool->iAgeHead = pPage->idx;
1446 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1447 }
1448}
1449
1450
1451/**
1452 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1453 *
1454 * @param pPool The pool.
1455 * @param pPage PGM pool page
1456 */
1457DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1458{
1459 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1460 ASMAtomicIncU32(&pPage->cLocked);
1461}
1462
1463
1464/**
1465 * Unlocks a page to allow flushing again
1466 *
1467 * @param pPool The pool.
1468 * @param pPage PGM pool page
1469 */
1470DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1471{
1472 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1473 Assert(pPage->cLocked);
1474 ASMAtomicDecU32(&pPage->cLocked);
1475}
1476
1477
1478/**
1479 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1480 *
1481 * @returns VBox status code.
1482 * @param pPage PGM pool page
1483 */
1484DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1485{
1486 if (pPage->cLocked)
1487 {
1488 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1489 if (pPage->cModifications)
1490 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1491 return true;
1492 }
1493 return false;
1494}
1495
1496
1497/**
1498 * Check if the specified page is dirty (not write monitored)
1499 *
1500 * @return dirty or not
1501 * @param pVM The cross context VM structure.
1502 * @param GCPhys Guest physical address
1503 */
1504DECLINLINE(bool) pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys)
1505{
1506 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1507 PGM_LOCK_ASSERT_OWNER(pVM);
1508 if (!pPool->cDirtyPages)
1509 return false;
1510 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1511}
1512
1513
1514/**
1515 * Tells if mappings are to be put into the shadow page table or not.
1516 *
1517 * @returns boolean result
1518 * @param pVM The cross context VM structure.
1519 */
1520DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM)
1521{
1522#ifdef PGM_WITHOUT_MAPPINGS
1523 /* Only raw-mode has mappings. */
1524 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1525 return false;
1526#else
1527 Assert(pVM->cCpus == 1 || !VM_IS_RAW_MODE_ENABLED(pVM));
1528 return VM_IS_RAW_MODE_ENABLED(pVM);
1529#endif
1530}
1531
1532
1533/**
1534 * Checks if the mappings are floating and enabled.
1535 *
1536 * @returns true / false.
1537 * @param pVM The cross context VM structure.
1538 */
1539DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM)
1540{
1541#ifdef PGM_WITHOUT_MAPPINGS
1542 /* Only raw-mode has mappings. */
1543 Assert(!VM_IS_RAW_MODE_ENABLED(pVM)); NOREF(pVM);
1544 return false;
1545#else
1546 return !pVM->pgm.s.fMappingsFixed
1547 && pgmMapAreMappingsEnabled(pVM);
1548#endif
1549}
1550
1551/** @} */
1552
1553#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1554
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette