VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 104881

Last change on this file since 104881 was 104881, checked in by vboxsync, 10 months ago

VMM/PGM: Made the apRamRangesTlb users handle stale entries (the TLB may contain such since r163452). bugref:10687 bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 39.7 KB
Line 
1/* $Id: PGMInline.h 104881 2024-06-11 09:05:04Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
29#define VMM_INCLUDED_SRC_include_PGMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/types.h>
36#include <VBox/err.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/pdmcritsect.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/dis.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/log.h>
46#include <VBox/vmm/gmm.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <iprt/asm.h>
50#include <iprt/assert.h>
51#include <iprt/avl.h>
52#include <iprt/critsect.h>
53#include <iprt/sha.h>
54
55
56
57/** @addtogroup grp_pgm_int Internals
58 * @internal
59 * @{
60 */
61
62/**
63 * Gets the PGMRAMRANGE structure for a guest page.
64 *
65 * @returns Pointer to the RAM range on success.
66 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
67 *
68 * @param pVM The cross context VM structure.
69 * @param GCPhys The GC physical address.
70 */
71DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
72{
73 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
74 if (pRam)
75 {
76 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
77 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
78 {
79 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
80 return pRam;
81 }
82 }
83 return pgmPhysGetRangeSlow(pVM, GCPhys);
84}
85
86
87/**
88 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
89 * range above it.
90 *
91 * @returns Pointer to the RAM range on success.
92 * @returns NULL if the address is located after the last range.
93 *
94 * @param pVM The cross context VM structure.
95 * @param GCPhys The GC physical address.
96 */
97DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
98{
99 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
100 if (pRam)
101 {
102 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
103 if (GCPhys - GCPhysFirst < pRam->cb && GCPhys >= GCPhysFirst)
104 {
105 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
106 return pRam;
107 }
108 }
109 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
110}
111
112
113/**
114 * Gets the PGMPAGE structure for a guest page.
115 *
116 * @returns Pointer to the page on success.
117 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
118 *
119 * @param pVM The cross context VM structure.
120 * @param GCPhys The GC physical address.
121 */
122DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
123{
124 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
125 if (pRam)
126 {
127 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
128 RTGCPHYS const off = GCPhys - GCPhysFirst;
129 if (off < pRam->cb && GCPhys >= GCPhysFirst)
130 {
131 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
132 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
133 }
134 }
135 return pgmPhysGetPageSlow(pVM, GCPhys);
136}
137
138
139/**
140 * Gets the PGMPAGE structure for a guest page.
141 *
142 * Old Phys code: Will make sure the page is present.
143 *
144 * @returns VBox status code.
145 * @retval VINF_SUCCESS and a valid *ppPage on success.
146 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
147 *
148 * @param pVM The cross context VM structure.
149 * @param GCPhys The GC physical address.
150 * @param ppPage Where to store the page pointer on success.
151 */
152DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
153{
154 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
155 if (pRam)
156 {
157 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
158 RTGCPHYS const off = GCPhys - GCPhysFirst;
159 if (off < pRam->cb && GCPhys >= GCPhysFirst)
160 {
161 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
162 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
163 return VINF_SUCCESS;
164 }
165 }
166 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
167}
168
169
170/**
171 * Gets the PGMPAGE structure for a guest page.
172 *
173 * Old Phys code: Will make sure the page is present.
174 *
175 * @returns VBox status code.
176 * @retval VINF_SUCCESS and a valid *ppPage on success.
177 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
178 *
179 * @param pVM The cross context VM structure.
180 * @param GCPhys The GC physical address.
181 * @param ppPage Where to store the page pointer on success.
182 * @param ppRamHint Where to read and store the ram list hint.
183 * The caller initializes this to NULL before the call.
184 */
185DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
186{
187 PPGMRAMRANGE pRam = *ppRamHint;
188 RTGCPHYS GCPhysFirst;
189 RTGCPHYS off;
190 if ( !pRam
191 || RT_UNLIKELY( (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
192 && GCPhys >= GCPhysFirst) )
193 {
194 pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
195 if ( !pRam
196 || (off = GCPhys - (GCPhysFirst = pRam->GCPhys)) >= pRam->cb
197 || GCPhys < GCPhysFirst)
198 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
199
200 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
201 *ppRamHint = pRam;
202 }
203 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
204 return VINF_SUCCESS;
205}
206
207
208/**
209 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
210 *
211 * @returns Pointer to the page on success.
212 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
213 *
214 * @param pVM The cross context VM structure.
215 * @param GCPhys The GC physical address.
216 * @param ppPage Where to store the pointer to the PGMPAGE structure.
217 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
218 */
219DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
220{
221 PPGMRAMRANGE pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)];
222 if (pRam)
223 {
224 RTGCPHYS const GCPhysFirst = pRam->GCPhys;
225 RTGCPHYS const off = GCPhys - GCPhysFirst;
226 if (off < pRam->cb && GCPhys >= GCPhysFirst)
227 {
228 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
229 *ppRam = pRam;
230 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
231 return VINF_SUCCESS;
232 }
233 }
234 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
235}
236
237
238/**
239 * Convert GC Phys to HC Phys.
240 *
241 * @returns VBox status code.
242 * @param pVM The cross context VM structure.
243 * @param GCPhys The GC physical address.
244 * @param pHCPhys Where to store the corresponding HC physical address.
245 *
246 * @deprecated Doesn't deal with zero, shared or write monitored pages.
247 * Avoid when writing new code!
248 */
249DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
250{
251 PPGMPAGE pPage;
252 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
253 if (RT_FAILURE(rc))
254 return rc;
255 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
256 return VINF_SUCCESS;
257}
258
259
260/**
261 * Queries the Physical TLB entry for a physical guest page,
262 * attempting to load the TLB entry if necessary.
263 *
264 * @returns VBox status code.
265 * @retval VINF_SUCCESS on success
266 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
267 *
268 * @param pVM The cross context VM structure.
269 * @param GCPhys The address of the guest page.
270 * @param ppTlbe Where to store the pointer to the TLB entry.
271 */
272DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
273{
274 int rc;
275 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
276 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
277 {
278 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
279 rc = VINF_SUCCESS;
280 }
281 else
282 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
283 *ppTlbe = pTlbe;
284 return rc;
285}
286
287
288/**
289 * Queries the Physical TLB entry for a physical guest page,
290 * attempting to load the TLB entry if necessary.
291 *
292 * @returns VBox status code.
293 * @retval VINF_SUCCESS on success
294 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
295 *
296 * @param pVM The cross context VM structure.
297 * @param pPage Pointer to the PGMPAGE structure corresponding to
298 * GCPhys.
299 * @param GCPhys The address of the guest page.
300 * @param ppTlbe Where to store the pointer to the TLB entry.
301 */
302DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
303{
304 int rc;
305 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
306 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
307 {
308 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
309 rc = VINF_SUCCESS;
310 AssertPtr(pTlbe->pv);
311#ifdef IN_RING3
312 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
313#endif
314 }
315 else
316 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
317 *ppTlbe = pTlbe;
318 return rc;
319}
320
321
322/**
323 * Calculates NEM page protection flags.
324 */
325DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
326{
327 /*
328 * Deal with potentially writable pages first.
329 */
330 if (PGMPAGETYPE_IS_RWX(enmType))
331 {
332 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
333 {
334 if (PGM_PAGE_IS_ALLOCATED(pPage))
335 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
336 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
337 }
338 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
339 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
340 }
341 /*
342 * Potentially readable & executable pages.
343 */
344 else if ( PGMPAGETYPE_IS_ROX(enmType)
345 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
346 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
347
348 /*
349 * The rest is needs special access handling.
350 */
351 return NEM_PAGE_PROT_NONE;
352}
353
354
355/**
356 * Enables write monitoring for an allocated page.
357 *
358 * The caller is responsible for updating the shadow page tables.
359 *
360 * @param pVM The cross context VM structure.
361 * @param pPage The page to write monitor.
362 * @param GCPhysPage The address of the page.
363 */
364DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
365{
366 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
367 PGM_LOCK_ASSERT_OWNER(pVM);
368
369 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
370 pVM->pgm.s.cMonitoredPages++;
371
372 /* Large pages must disabled. */
373 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
374 {
375 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
376 AssertFatal(pFirstPage);
377 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
378 {
379 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
380 pVM->pgm.s.cLargePagesDisabled++;
381 }
382 else
383 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
384 }
385
386#ifdef VBOX_WITH_NATIVE_NEM
387 /* Tell NEM. */
388 if (VM_IS_NEM_ENABLED(pVM))
389 {
390 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
391 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
392 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
393 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
394 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
395 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
396 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
397 }
398#endif
399}
400
401#ifndef VBOX_VMM_TARGET_ARMV8
402
403/**
404 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
405 *
406 * Only used when the guest is in PAE or long mode. This is inlined so that we
407 * can perform consistency checks in debug builds.
408 *
409 * @returns true if it is, false if it isn't.
410 * @param pVCpu The cross context virtual CPU structure.
411 */
412DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
413{
414 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
415 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
416 return pVCpu->pgm.s.fNoExecuteEnabled;
417}
418
419
420/**
421 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
422 *
423 * Only used when the guest is in paged 32-bit mode. This is inlined so that
424 * we can perform consistency checks in debug builds.
425 *
426 * @returns true if it is, false if it isn't.
427 * @param pVCpu The cross context virtual CPU structure.
428 */
429DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
430{
431 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
432 Assert(!CPUMIsGuestInPAEMode(pVCpu));
433 Assert(!CPUMIsGuestInLongMode(pVCpu));
434 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
435}
436
437
438/**
439 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
440 * Takes PSE-36 into account.
441 *
442 * @returns guest physical address
443 * @param pVM The cross context VM structure.
444 * @param Pde Guest Pde
445 */
446DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
447{
448 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
449 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
450
451 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
452}
453
454
455/**
456 * Gets the address the guest page directory (32-bit paging).
457 *
458 * @returns VBox status code.
459 * @param pVCpu The cross context virtual CPU structure.
460 * @param ppPd Where to return the mapping. This is always set.
461 */
462DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
463{
464 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
465 if (RT_UNLIKELY(!*ppPd))
466 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Gets the address the guest page directory (32-bit paging).
473 *
474 * @returns Pointer to the page directory entry in question.
475 * @param pVCpu The cross context virtual CPU structure.
476 */
477DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
478{
479 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
480 if (RT_UNLIKELY(!pGuestPD))
481 {
482 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
483 if (RT_FAILURE(rc))
484 return NULL;
485 }
486 return pGuestPD;
487}
488
489
490/**
491 * Gets the guest page directory pointer table.
492 *
493 * @returns VBox status code.
494 * @param pVCpu The cross context virtual CPU structure.
495 * @param ppPdpt Where to return the mapping. This is always set.
496 */
497DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
498{
499 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
500 if (RT_UNLIKELY(!*ppPdpt))
501 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Gets the guest page directory pointer table.
508 *
509 * @returns Pointer to the page directory in question.
510 * @returns NULL if the page directory is not present or on an invalid page.
511 * @param pVCpu The cross context virtual CPU structure.
512 */
513DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
514{
515 PX86PDPT pGuestPdpt;
516 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
517 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
518 return pGuestPdpt;
519}
520
521
522/**
523 * Gets the guest page directory pointer table entry for the specified address.
524 *
525 * @returns Pointer to the page directory in question.
526 * @returns NULL if the page directory is not present or on an invalid page.
527 * @param pVCpu The cross context virtual CPU structure.
528 * @param GCPtr The address.
529 */
530DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
531{
532 AssertGCPtr32(GCPtr);
533
534 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
535 if (RT_UNLIKELY(!pGuestPDPT))
536 {
537 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
538 if (RT_FAILURE(rc))
539 return NULL;
540 }
541 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
542}
543
544
545/**
546 * Gets the page directory entry for the specified address.
547 *
548 * @returns The page directory entry in question.
549 * @returns A non-present entry if the page directory is not present or on an invalid page.
550 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
551 * @param GCPtr The address.
552 */
553DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
554{
555 AssertGCPtr32(GCPtr);
556 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
557 if (RT_LIKELY(pGuestPDPT))
558 {
559 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
560 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
561 {
562 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
563 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
564 if ( !pGuestPD
565 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
566 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
567 if (pGuestPD)
568 return pGuestPD->a[iPD];
569 }
570 }
571
572 X86PDEPAE ZeroPde = {0};
573 return ZeroPde;
574}
575
576
577/**
578 * Gets the page directory pointer table entry for the specified address
579 * and returns the index into the page directory
580 *
581 * @returns Pointer to the page directory in question.
582 * @returns NULL if the page directory is not present or on an invalid page.
583 * @param pVCpu The cross context virtual CPU structure.
584 * @param GCPtr The address.
585 * @param piPD Receives the index into the returned page directory
586 * @param pPdpe Receives the page directory pointer entry. Optional.
587 */
588DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
589{
590 AssertGCPtr32(GCPtr);
591
592 /* The PDPE. */
593 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
594 if (pGuestPDPT)
595 {
596 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
597 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
598 if (pPdpe)
599 pPdpe->u = uPdpe;
600 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
601 {
602
603 /* The PDE. */
604 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
605 if ( !pGuestPD
606 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
607 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
608 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
609 return pGuestPD;
610 }
611 }
612 return NULL;
613}
614
615
616/**
617 * Gets the page map level-4 pointer for the guest.
618 *
619 * @returns VBox status code.
620 * @param pVCpu The cross context virtual CPU structure.
621 * @param ppPml4 Where to return the mapping. Always set.
622 */
623DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
624{
625 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
626 if (RT_UNLIKELY(!*ppPml4))
627 return pgmGstLazyMapPml4(pVCpu, ppPml4);
628 return VINF_SUCCESS;
629}
630
631
632/**
633 * Gets the page map level-4 pointer for the guest.
634 *
635 * @returns Pointer to the PML4 page.
636 * @param pVCpu The cross context virtual CPU structure.
637 */
638DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
639{
640 PX86PML4 pGuestPml4;
641 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
642 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
643 return pGuestPml4;
644}
645
646
647/**
648 * Gets the pointer to a page map level-4 entry.
649 *
650 * @returns Pointer to the PML4 entry.
651 * @param pVCpu The cross context virtual CPU structure.
652 * @param iPml4 The index.
653 * @remarks Only used by AssertCR3.
654 */
655DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
656{
657 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
658 if (pGuestPml4)
659 { /* likely */ }
660 else
661 {
662 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
663 AssertRCReturn(rc, NULL);
664 }
665 return &pGuestPml4->a[iPml4];
666}
667
668
669/**
670 * Gets the page directory entry for the specified address.
671 *
672 * @returns The page directory entry in question.
673 * @returns A non-present entry if the page directory is not present or on an invalid page.
674 * @param pVCpu The cross context virtual CPU structure.
675 * @param GCPtr The address.
676 */
677DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
678{
679 /*
680 * Note! To keep things simple, ASSUME invalid physical addresses will
681 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
682 * supporting 52-bit wide physical guest addresses.
683 */
684 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
685 if (RT_LIKELY(pGuestPml4))
686 {
687 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
688 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
689 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
690 {
691 PCX86PDPT pPdptTemp;
692 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
693 if (RT_SUCCESS(rc))
694 {
695 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
696 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
697 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
698 {
699 PCX86PDPAE pPD;
700 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
701 if (RT_SUCCESS(rc))
702 {
703 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
704 return pPD->a[iPD];
705 }
706 }
707 }
708 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
709 }
710 }
711
712 X86PDEPAE ZeroPde = {0};
713 return ZeroPde;
714}
715
716
717/**
718 * Gets the GUEST page directory pointer for the specified address.
719 *
720 * @returns The page directory in question.
721 * @returns NULL if the page directory is not present or on an invalid page.
722 * @param pVCpu The cross context virtual CPU structure.
723 * @param GCPtr The address.
724 * @param ppPml4e Page Map Level-4 Entry (out)
725 * @param pPdpe Page directory pointer table entry (out)
726 * @param piPD Receives the index into the returned page directory
727 */
728DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
729{
730 /* The PMLE4. */
731 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
732 if (pGuestPml4)
733 {
734 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
735 *ppPml4e = &pGuestPml4->a[iPml4];
736 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
737 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
738 {
739 /* The PDPE. */
740 PCX86PDPT pPdptTemp;
741 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
742 if (RT_SUCCESS(rc))
743 {
744 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
745 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
746 pPdpe->u = uPdpe;
747 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
748 {
749 /* The PDE. */
750 PX86PDPAE pPD;
751 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
752 if (RT_SUCCESS(rc))
753 {
754 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
755 return pPD;
756 }
757 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
758 }
759 }
760 else
761 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
762 }
763 }
764 return NULL;
765}
766
767
768#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
769# if 0
770/**
771 * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
772 *
773 * @returns Pointer to the PML4 entry.
774 * @param pVCpu The cross context virtual CPU structure.
775 * @param iPml4 The index.
776 * @remarks Only used by AssertCR3.
777 */
778DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
779{
780 PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
781 if (pEptPml4)
782 { /* likely */ }
783 else
784 {
785 int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
786 AssertRCReturn(rc, NULL);
787 }
788 return &pEptPml4->a[iPml4];
789}
790# endif
791
792
793/**
794 * Gets the page map level-4 pointer for the guest when the guest is using EPT
795 * paging.
796 *
797 * @returns VBox status code.
798 * @param pVCpu The cross context virtual CPU structure.
799 * @param ppEptPml4 Where to return the mapping. Always set.
800 */
801DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
802{
803 /* Shadow CR3 might not have been mapped at this point, see PGMHCChangeMode. */
804 *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
805 if (!*ppEptPml4)
806 return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
807 return VINF_SUCCESS;
808}
809
810
811# if 0
812/**
813 * Gets the page map level-4 pointer for the guest when the guest is using EPT
814 * paging.
815 *
816 * @returns Pointer to the EPT PML4 page.
817 * @param pVCpu The cross context virtual CPU structure.
818 */
819DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
820{
821 PEPTPML4 pEptPml4;
822 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
823 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
824 return pEptPml4;
825}
826# endif
827#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
828
829
830/**
831 * Gets the shadow page directory, 32-bit.
832 *
833 * @returns Pointer to the shadow 32-bit PD.
834 * @param pVCpu The cross context virtual CPU structure.
835 */
836DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
837{
838 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
839}
840
841
842/**
843 * Gets the shadow page directory entry for the specified address, 32-bit.
844 *
845 * @returns Shadow 32-bit PDE.
846 * @param pVCpu The cross context virtual CPU structure.
847 * @param GCPtr The address.
848 */
849DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
850{
851 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
852 if (!pShwPde)
853 {
854 X86PDE ZeroPde = {0};
855 return ZeroPde;
856 }
857 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
858}
859
860
861/**
862 * Gets the pointer to the shadow page directory entry for the specified
863 * address, 32-bit.
864 *
865 * @returns Pointer to the shadow 32-bit PDE.
866 * @param pVCpu The cross context virtual CPU structure.
867 * @param GCPtr The address.
868 */
869DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
870{
871 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
872 AssertReturn(pPde, NULL);
873 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
874}
875
876
877/**
878 * Gets the shadow page pointer table, PAE.
879 *
880 * @returns Pointer to the shadow PAE PDPT.
881 * @param pVCpu The cross context virtual CPU structure.
882 */
883DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
884{
885 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
886}
887
888
889/**
890 * Gets the shadow page directory for the specified address, PAE.
891 *
892 * @returns Pointer to the shadow PD.
893 * @param pVCpu The cross context virtual CPU structure.
894 * @param pPdpt Pointer to the page directory pointer table.
895 * @param GCPtr The address.
896 */
897DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
898{
899 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
900 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
901 {
902 /* Fetch the pgm pool shadow descriptor. */
903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
904 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
905 AssertReturn(pShwPde, NULL);
906
907 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
908 }
909 return NULL;
910}
911
912
913/**
914 * Gets the shadow page directory for the specified address, PAE.
915 *
916 * @returns Pointer to the shadow PD.
917 * @param pVCpu The cross context virtual CPU structure.
918 * @param GCPtr The address.
919 */
920DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
921{
922 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
923}
924
925
926/**
927 * Gets the shadow page directory entry, PAE.
928 *
929 * @returns PDE.
930 * @param pVCpu The cross context virtual CPU structure.
931 * @param GCPtr The address.
932 */
933DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
934{
935 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
936 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
937 if (pShwPde)
938 return pShwPde->a[iPd];
939
940 X86PDEPAE ZeroPde = {0};
941 return ZeroPde;
942}
943
944
945/**
946 * Gets the pointer to the shadow page directory entry for an address, PAE.
947 *
948 * @returns Pointer to the PDE.
949 * @param pVCpu The cross context virtual CPU structure.
950 * @param GCPtr The address.
951 * @remarks Only used by AssertCR3.
952 */
953DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
954{
955 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
956 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
957 AssertReturn(pShwPde, NULL);
958 return &pShwPde->a[iPd];
959}
960
961
962/**
963 * Gets the shadow page map level-4 pointer.
964 *
965 * @returns Pointer to the shadow PML4.
966 * @param pVCpu The cross context virtual CPU structure.
967 */
968DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
969{
970 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
971}
972
973
974/**
975 * Gets the shadow page map level-4 entry for the specified address.
976 *
977 * @returns The entry.
978 * @param pVCpu The cross context virtual CPU structure.
979 * @param GCPtr The address.
980 */
981DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
982{
983 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
984 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
985 if (pShwPml4)
986 return pShwPml4->a[iPml4];
987
988 X86PML4E ZeroPml4e = {0};
989 return ZeroPml4e;
990}
991
992
993/**
994 * Gets the pointer to the specified shadow page map level-4 entry.
995 *
996 * @returns The entry.
997 * @param pVCpu The cross context virtual CPU structure.
998 * @param iPml4 The PML4 index.
999 */
1000DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
1001{
1002 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1003 if (pShwPml4)
1004 return &pShwPml4->a[iPml4];
1005 return NULL;
1006}
1007
1008#endif /* !VBOX_VMM_TARGET_ARMV8 */
1009
1010/**
1011 * Cached physical handler lookup.
1012 *
1013 * @returns VBox status code.
1014 * @retval VERR_NOT_FOUND if no handler.
1015 * @param pVM The cross context VM structure.
1016 * @param GCPhys The lookup address.
1017 * @param ppHandler Where to return the handler pointer.
1018 */
1019DECLINLINE(int) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys, PPGMPHYSHANDLER *ppHandler)
1020{
1021 PPGMPHYSHANDLER pHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrFromInt(pVM->pgm.s.idxLastPhysHandler);
1022 if ( pHandler
1023 && pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.isPtrRetOkay(pHandler)
1024 && GCPhys >= pHandler->Key
1025 && GCPhys < pHandler->KeyLast
1026 && pHandler->hType != NIL_PGMPHYSHANDLERTYPE
1027 && pHandler->hType != 0)
1028
1029 {
1030 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
1031 *ppHandler = pHandler;
1032 return VINF_SUCCESS;
1033 }
1034
1035 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1036 AssertPtrReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
1037 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pHandler);
1038 if (RT_SUCCESS(rc))
1039 {
1040 *ppHandler = pHandler;
1041 pVM->pgm.s.idxLastPhysHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrToInt(pHandler);
1042 return VINF_SUCCESS;
1043 }
1044 *ppHandler = NULL;
1045 return rc;
1046}
1047
1048
1049/**
1050 * Converts a handle to a pointer.
1051 *
1052 * @returns Pointer on success, NULL on failure (asserted).
1053 * @param pVM The cross context VM structure.
1054 * @param hType Physical access handler type handle.
1055 */
1056DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1057{
1058#ifdef IN_RING0
1059 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1060#elif defined(IN_RING3)
1061 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1062#else
1063# error "Invalid context"
1064#endif
1065 AssertReturn(pType->hType == hType, NULL);
1066 return pType;
1067}
1068
1069
1070/**
1071 * Converts a handle to a pointer, never returns NULL.
1072 *
1073 * @returns Pointer on success, dummy on failure (asserted).
1074 * @param pVM The cross context VM structure.
1075 * @param hType Physical access handler type handle.
1076 */
1077DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr2(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1078{
1079#ifdef IN_RING0
1080 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1081#elif defined(IN_RING3)
1082 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1083#else
1084# error "Invalid context"
1085#endif
1086 AssertReturn(pType->hType == hType, &g_pgmHandlerPhysicalDummyType);
1087 return pType;
1088}
1089
1090
1091/**
1092 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1093 *
1094 * @returns Pointer to the shadow page structure.
1095 * @param pPool The pool.
1096 * @param idx The pool page index.
1097 */
1098DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1099{
1100 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1101 return &pPool->aPages[idx];
1102}
1103
1104
1105/**
1106 * Clear references to guest physical memory.
1107 *
1108 * @param pPool The pool.
1109 * @param pPoolPage The pool page.
1110 * @param pPhysPage The physical guest page tracking structure.
1111 * @param iPte Shadow PTE index
1112 */
1113DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1114{
1115 /*
1116 * Just deal with the simple case here.
1117 */
1118#ifdef VBOX_STRICT
1119 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1120#endif
1121#ifdef LOG_ENABLED
1122 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1123#endif
1124 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1125 if (cRefs == 1)
1126 {
1127#if 0 /* for more debug info */
1128 AssertMsg( pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage)
1129 && iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1130 ("idx=%#x iPte=%#x enmKind=%d vs pPhysPage=%R[pgmpage] idx=%#x iPte=%#x enmKind=%d [iPte]=%#RX64\n",
1131 pPoolPage->idx, iPte, pPoolPage->enmKind,
1132 pPhysPage, PGM_PAGE_GET_TD_IDX(pPhysPage), PGM_PAGE_GET_PTE_INDEX(pPhysPage),
1133 pPool->aPages[PGM_PAGE_GET_TD_IDX(pPhysPage)].enmKind,
1134 ((uint64_t *)pPoolPage->CTX_SUFF(pvPage))[iPte]));
1135#else
1136 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1137 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1138#endif
1139 /* Invalidate the tracking data. */
1140 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1141 }
1142 else
1143 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1144 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1145}
1146
1147
1148/**
1149 * Moves the page to the head of the age list.
1150 *
1151 * This is done when the cached page is used in one way or another.
1152 *
1153 * @param pPool The pool.
1154 * @param pPage The cached page.
1155 */
1156DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1157{
1158 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1159
1160 /*
1161 * Move to the head of the age list.
1162 */
1163 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1164 {
1165 /* unlink */
1166 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1167 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1168 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1169 else
1170 pPool->iAgeTail = pPage->iAgePrev;
1171
1172 /* insert at head */
1173 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1174 pPage->iAgeNext = pPool->iAgeHead;
1175 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1176 pPool->iAgeHead = pPage->idx;
1177 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1178 }
1179}
1180
1181
1182/**
1183 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1184 *
1185 * @param pPool The pool.
1186 * @param pPage PGM pool page
1187 */
1188DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1189{
1190 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1191 ASMAtomicIncU32(&pPage->cLocked);
1192}
1193
1194
1195/**
1196 * Unlocks a page to allow flushing again
1197 *
1198 * @param pPool The pool.
1199 * @param pPage PGM pool page
1200 */
1201DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1202{
1203 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1204 Assert(pPage->cLocked);
1205 ASMAtomicDecU32(&pPage->cLocked);
1206}
1207
1208
1209/**
1210 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1211 *
1212 * @returns VBox status code.
1213 * @param pPage PGM pool page
1214 */
1215DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1216{
1217 if (pPage->cLocked)
1218 {
1219 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1220 if (pPage->cModifications)
1221 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1222 return true;
1223 }
1224 return false;
1225}
1226
1227
1228/**
1229 * Check if the specified page is dirty (not write monitored)
1230 *
1231 * @return dirty or not
1232 * @param pVM The cross context VM structure.
1233 * @param GCPhys Guest physical address
1234 */
1235DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1236{
1237 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1238 PGM_LOCK_ASSERT_OWNER(pVM);
1239 if (!pPool->cDirtyPages)
1240 return false;
1241 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1242}
1243
1244
1245/** @} */
1246
1247#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1248
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette