VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 99051

Last change on this file since 99051 was 99051, checked in by vboxsync, 21 months ago

VMM: More ARMv8 x86/amd64 separation work, VBoxVMMArm compiles and links now, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 38.3 KB
Line 
1/* $Id: PGMInline.h 99051 2023-03-19 16:40:06Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_PGMInline_h
29#define VMM_INCLUDED_SRC_include_PGMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/types.h>
36#include <VBox/err.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/pdmcritsect.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/dis.h>
44#include <VBox/vmm/dbgf.h>
45#include <VBox/log.h>
46#include <VBox/vmm/gmm.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/nem.h>
49#include <iprt/asm.h>
50#include <iprt/assert.h>
51#include <iprt/avl.h>
52#include <iprt/critsect.h>
53#include <iprt/sha.h>
54
55
56
57/** @addtogroup grp_pgm_int Internals
58 * @internal
59 * @{
60 */
61
62/**
63 * Gets the PGMRAMRANGE structure for a guest page.
64 *
65 * @returns Pointer to the RAM range on success.
66 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
67 *
68 * @param pVM The cross context VM structure.
69 * @param GCPhys The GC physical address.
70 */
71DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVMCC pVM, RTGCPHYS GCPhys)
72{
73 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
74 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
75 return pgmPhysGetRangeSlow(pVM, GCPhys);
76 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
77 return pRam;
78}
79
80
81/**
82 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
83 * range above it.
84 *
85 * @returns Pointer to the RAM range on success.
86 * @returns NULL if the address is located after the last range.
87 *
88 * @param pVM The cross context VM structure.
89 * @param GCPhys The GC physical address.
90 */
91DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVMCC pVM, RTGCPHYS GCPhys)
92{
93 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
94 if ( !pRam
95 || (GCPhys - pRam->GCPhys) >= pRam->cb)
96 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
97 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
98 return pRam;
99}
100
101
102/**
103 * Gets the PGMPAGE structure for a guest page.
104 *
105 * @returns Pointer to the page on success.
106 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
107 *
108 * @param pVM The cross context VM structure.
109 * @param GCPhys The GC physical address.
110 */
111DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVMCC pVM, RTGCPHYS GCPhys)
112{
113 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
114 RTGCPHYS off;
115 if ( pRam
116 && (off = GCPhys - pRam->GCPhys) < pRam->cb)
117 {
118 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
119 return &pRam->aPages[off >> GUEST_PAGE_SHIFT];
120 }
121 return pgmPhysGetPageSlow(pVM, GCPhys);
122}
123
124
125/**
126 * Gets the PGMPAGE structure for a guest page.
127 *
128 * Old Phys code: Will make sure the page is present.
129 *
130 * @returns VBox status code.
131 * @retval VINF_SUCCESS and a valid *ppPage on success.
132 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
133 *
134 * @param pVM The cross context VM structure.
135 * @param GCPhys The GC physical address.
136 * @param ppPage Where to store the page pointer on success.
137 */
138DECLINLINE(int) pgmPhysGetPageEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
139{
140 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
141 RTGCPHYS off;
142 if ( !pRam
143 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
144 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
145 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
146 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
147 return VINF_SUCCESS;
148}
149
150
151/**
152 * Gets the PGMPAGE structure for a guest page.
153 *
154 * Old Phys code: Will make sure the page is present.
155 *
156 * @returns VBox status code.
157 * @retval VINF_SUCCESS and a valid *ppPage on success.
158 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
159 *
160 * @param pVM The cross context VM structure.
161 * @param GCPhys The GC physical address.
162 * @param ppPage Where to store the page pointer on success.
163 * @param ppRamHint Where to read and store the ram list hint.
164 * The caller initializes this to NULL before the call.
165 */
166DECLINLINE(int) pgmPhysGetPageWithHintEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
167{
168 RTGCPHYS off;
169 PPGMRAMRANGE pRam = *ppRamHint;
170 if ( !pRam
171 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
172 {
173 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
174 if ( !pRam
175 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
176 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
177
178 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
179 *ppRamHint = pRam;
180 }
181 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
182 return VINF_SUCCESS;
183}
184
185
186/**
187 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
188 *
189 * @returns Pointer to the page on success.
190 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
191 *
192 * @param pVM The cross context VM structure.
193 * @param GCPhys The GC physical address.
194 * @param ppPage Where to store the pointer to the PGMPAGE structure.
195 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
196 */
197DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
198{
199 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
200 RTGCPHYS off;
201 if ( !pRam
202 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
203 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
204
205 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbHits));
206 *ppRam = pRam;
207 *ppPage = &pRam->aPages[off >> GUEST_PAGE_SHIFT];
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Convert GC Phys to HC Phys.
214 *
215 * @returns VBox status code.
216 * @param pVM The cross context VM structure.
217 * @param GCPhys The GC physical address.
218 * @param pHCPhys Where to store the corresponding HC physical address.
219 *
220 * @deprecated Doesn't deal with zero, shared or write monitored pages.
221 * Avoid when writing new code!
222 */
223DECLINLINE(int) pgmRamGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
224{
225 PPGMPAGE pPage;
226 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
227 if (RT_FAILURE(rc))
228 return rc;
229 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
230 return VINF_SUCCESS;
231}
232
233
234/**
235 * Queries the Physical TLB entry for a physical guest page,
236 * attempting to load the TLB entry if necessary.
237 *
238 * @returns VBox status code.
239 * @retval VINF_SUCCESS on success
240 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
241 *
242 * @param pVM The cross context VM structure.
243 * @param GCPhys The address of the guest page.
244 * @param ppTlbe Where to store the pointer to the TLB entry.
245 */
246DECLINLINE(int) pgmPhysPageQueryTlbe(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
247{
248 int rc;
249 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
250 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
251 {
252 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
253 rc = VINF_SUCCESS;
254 }
255 else
256 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
257 *ppTlbe = pTlbe;
258 return rc;
259}
260
261
262/**
263 * Queries the Physical TLB entry for a physical guest page,
264 * attempting to load the TLB entry if necessary.
265 *
266 * @returns VBox status code.
267 * @retval VINF_SUCCESS on success
268 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
269 *
270 * @param pVM The cross context VM structure.
271 * @param pPage Pointer to the PGMPAGE structure corresponding to
272 * GCPhys.
273 * @param GCPhys The address of the guest page.
274 * @param ppTlbe Where to store the pointer to the TLB entry.
275 */
276DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
277{
278 int rc;
279 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
280 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
281 {
282 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbHits));
283 rc = VINF_SUCCESS;
284 AssertPtr(pTlbe->pv);
285#ifdef IN_RING3
286 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
287#endif
288 }
289 else
290 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
291 *ppTlbe = pTlbe;
292 return rc;
293}
294
295
296/**
297 * Calculates NEM page protection flags.
298 */
299DECL_FORCE_INLINE(uint32_t) pgmPhysPageCalcNemProtection(PPGMPAGE pPage, PGMPAGETYPE enmType)
300{
301 /*
302 * Deal with potentially writable pages first.
303 */
304 if (PGMPAGETYPE_IS_RWX(enmType))
305 {
306 if (!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
307 {
308 if (PGM_PAGE_IS_ALLOCATED(pPage))
309 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE;
310 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
311 }
312 if (!PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
313 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
314 }
315 /*
316 * Potentially readable & executable pages.
317 */
318 else if ( PGMPAGETYPE_IS_ROX(enmType)
319 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
320 return NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
321
322 /*
323 * The rest is needs special access handling.
324 */
325 return NEM_PAGE_PROT_NONE;
326}
327
328
329/**
330 * Enables write monitoring for an allocated page.
331 *
332 * The caller is responsible for updating the shadow page tables.
333 *
334 * @param pVM The cross context VM structure.
335 * @param pPage The page to write monitor.
336 * @param GCPhysPage The address of the page.
337 */
338DECLINLINE(void) pgmPhysPageWriteMonitor(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
339{
340 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
341 PGM_LOCK_ASSERT_OWNER(pVM);
342
343 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
344 pVM->pgm.s.cMonitoredPages++;
345
346 /* Large pages must disabled. */
347 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
348 {
349 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
350 AssertFatal(pFirstPage);
351 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
352 {
353 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
354 pVM->pgm.s.cLargePagesDisabled++;
355 }
356 else
357 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
358 }
359
360#ifdef VBOX_WITH_NATIVE_NEM
361 /* Tell NEM. */
362 if (VM_IS_NEM_ENABLED(pVM))
363 {
364 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
365 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
366 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
367 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
368 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage) : NULL,
369 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
370 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
371 }
372#endif
373}
374
375
376#ifndef VBOX_VMM_TARGET_ARMV8
377/**
378 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
379 *
380 * Only used when the guest is in PAE or long mode. This is inlined so that we
381 * can perform consistency checks in debug builds.
382 *
383 * @returns true if it is, false if it isn't.
384 * @param pVCpu The cross context virtual CPU structure.
385 */
386DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPUCC pVCpu)
387{
388 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
389 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
390 return pVCpu->pgm.s.fNoExecuteEnabled;
391}
392
393
394/**
395 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
396 *
397 * Only used when the guest is in paged 32-bit mode. This is inlined so that
398 * we can perform consistency checks in debug builds.
399 *
400 * @returns true if it is, false if it isn't.
401 * @param pVCpu The cross context virtual CPU structure.
402 */
403DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPUCC pVCpu)
404{
405 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
406 Assert(!CPUMIsGuestInPAEMode(pVCpu));
407 Assert(!CPUMIsGuestInLongMode(pVCpu));
408 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
409}
410
411
412/**
413 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
414 * Takes PSE-36 into account.
415 *
416 * @returns guest physical address
417 * @param pVM The cross context VM structure.
418 * @param Pde Guest Pde
419 */
420DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVMCC pVM, X86PDE Pde)
421{
422 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
423 GCPhys |= (RTGCPHYS)(Pde.u & X86_PDE4M_PG_HIGH_MASK) << X86_PDE4M_PG_HIGH_SHIFT;
424
425 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
426}
427
428
429/**
430 * Gets the address the guest page directory (32-bit paging).
431 *
432 * @returns VBox status code.
433 * @param pVCpu The cross context virtual CPU structure.
434 * @param ppPd Where to return the mapping. This is always set.
435 */
436DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd)
437{
438 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
439 if (RT_UNLIKELY(!*ppPd))
440 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Gets the address the guest page directory (32-bit paging).
447 *
448 * @returns Pointer to the page directory entry in question.
449 * @param pVCpu The cross context virtual CPU structure.
450 */
451DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu)
452{
453 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
454 if (RT_UNLIKELY(!pGuestPD))
455 {
456 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
457 if (RT_FAILURE(rc))
458 return NULL;
459 }
460 return pGuestPD;
461}
462
463
464/**
465 * Gets the guest page directory pointer table.
466 *
467 * @returns VBox status code.
468 * @param pVCpu The cross context virtual CPU structure.
469 * @param ppPdpt Where to return the mapping. This is always set.
470 */
471DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
472{
473 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
474 if (RT_UNLIKELY(!*ppPdpt))
475 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
476 return VINF_SUCCESS;
477}
478
479
480/**
481 * Gets the guest page directory pointer table.
482 *
483 * @returns Pointer to the page directory in question.
484 * @returns NULL if the page directory is not present or on an invalid page.
485 * @param pVCpu The cross context virtual CPU structure.
486 */
487DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPUCC pVCpu)
488{
489 PX86PDPT pGuestPdpt;
490 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
491 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
492 return pGuestPdpt;
493}
494
495
496/**
497 * Gets the guest page directory pointer table entry for the specified address.
498 *
499 * @returns Pointer to the page directory in question.
500 * @returns NULL if the page directory is not present or on an invalid page.
501 * @param pVCpu The cross context virtual CPU structure.
502 * @param GCPtr The address.
503 */
504DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
505{
506 AssertGCPtr32(GCPtr);
507
508 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
509 if (RT_UNLIKELY(!pGuestPDPT))
510 {
511 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
512 if (RT_FAILURE(rc))
513 return NULL;
514 }
515 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT];
516}
517
518
519/**
520 * Gets the page directory entry for the specified address.
521 *
522 * @returns The page directory entry in question.
523 * @returns A non-present entry if the page directory is not present or on an invalid page.
524 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
525 * @param GCPtr The address.
526 */
527DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
528{
529 AssertGCPtr32(GCPtr);
530 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
531 if (RT_LIKELY(pGuestPDPT))
532 {
533 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
534 if ((pGuestPDPT->a[iPdpt].u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
535 {
536 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
537 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
538 if ( !pGuestPD
539 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
540 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
541 if (pGuestPD)
542 return pGuestPD->a[iPD];
543 }
544 }
545
546 X86PDEPAE ZeroPde = {0};
547 return ZeroPde;
548}
549
550
551/**
552 * Gets the page directory pointer table entry for the specified address
553 * and returns the index into the page directory
554 *
555 * @returns Pointer to the page directory in question.
556 * @returns NULL if the page directory is not present or on an invalid page.
557 * @param pVCpu The cross context virtual CPU structure.
558 * @param GCPtr The address.
559 * @param piPD Receives the index into the returned page directory
560 * @param pPdpe Receives the page directory pointer entry. Optional.
561 */
562DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
563{
564 AssertGCPtr32(GCPtr);
565
566 /* The PDPE. */
567 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
568 if (pGuestPDPT)
569 {
570 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
571 X86PGPAEUINT const uPdpe = pGuestPDPT->a[iPdpt].u;
572 if (pPdpe)
573 pPdpe->u = uPdpe;
574 if ((uPdpe & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
575 {
576
577 /* The PDE. */
578 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
579 if ( !pGuestPD
580 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
581 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
582 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
583 return pGuestPD;
584 }
585 }
586 return NULL;
587}
588
589
590/**
591 * Gets the page map level-4 pointer for the guest.
592 *
593 * @returns VBox status code.
594 * @param pVCpu The cross context virtual CPU structure.
595 * @param ppPml4 Where to return the mapping. Always set.
596 */
597DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
598{
599 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
600 if (RT_UNLIKELY(!*ppPml4))
601 return pgmGstLazyMapPml4(pVCpu, ppPml4);
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * Gets the page map level-4 pointer for the guest.
608 *
609 * @returns Pointer to the PML4 page.
610 * @param pVCpu The cross context virtual CPU structure.
611 */
612DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPUCC pVCpu)
613{
614 PX86PML4 pGuestPml4;
615 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
616 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
617 return pGuestPml4;
618}
619
620
621/**
622 * Gets the pointer to a page map level-4 entry.
623 *
624 * @returns Pointer to the PML4 entry.
625 * @param pVCpu The cross context virtual CPU structure.
626 * @param iPml4 The index.
627 * @remarks Only used by AssertCR3.
628 */
629DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
630{
631 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
632 if (pGuestPml4)
633 { /* likely */ }
634 else
635 {
636 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
637 AssertRCReturn(rc, NULL);
638 }
639 return &pGuestPml4->a[iPml4];
640}
641
642
643/**
644 * Gets the page directory entry for the specified address.
645 *
646 * @returns The page directory entry in question.
647 * @returns A non-present entry if the page directory is not present or on an invalid page.
648 * @param pVCpu The cross context virtual CPU structure.
649 * @param GCPtr The address.
650 */
651DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPUCC pVCpu, RTGCPTR64 GCPtr)
652{
653 /*
654 * Note! To keep things simple, ASSUME invalid physical addresses will
655 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
656 * supporting 52-bit wide physical guest addresses.
657 */
658 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
659 if (RT_LIKELY(pGuestPml4))
660 {
661 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
662 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
663 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
664 {
665 PCX86PDPT pPdptTemp;
666 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
667 if (RT_SUCCESS(rc))
668 {
669 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
670 X86PGPAEUINT const uPdpte = pPdptTemp->a[iPdpt].u;
671 if ((uPdpte & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
672 {
673 PCX86PDPAE pPD;
674 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpte & X86_PDPE_PG_MASK, &pPD);
675 if (RT_SUCCESS(rc))
676 {
677 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
678 return pPD->a[iPD];
679 }
680 }
681 }
682 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
683 }
684 }
685
686 X86PDEPAE ZeroPde = {0};
687 return ZeroPde;
688}
689
690
691/**
692 * Gets the GUEST page directory pointer for the specified address.
693 *
694 * @returns The page directory in question.
695 * @returns NULL if the page directory is not present or on an invalid page.
696 * @param pVCpu The cross context virtual CPU structure.
697 * @param GCPtr The address.
698 * @param ppPml4e Page Map Level-4 Entry (out)
699 * @param pPdpe Page directory pointer table entry (out)
700 * @param piPD Receives the index into the returned page directory
701 */
702DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
703{
704 /* The PMLE4. */
705 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
706 if (pGuestPml4)
707 {
708 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
709 *ppPml4e = &pGuestPml4->a[iPml4];
710 X86PGPAEUINT const uPml4e = pGuestPml4->a[iPml4].u;
711 if ((uPml4e & (pVCpu->pgm.s.fGstAmd64MbzPml4eMask | X86_PML4E_P)) == X86_PML4E_P)
712 {
713 /* The PDPE. */
714 PCX86PDPT pPdptTemp;
715 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPml4e & X86_PML4E_PG_MASK, &pPdptTemp);
716 if (RT_SUCCESS(rc))
717 {
718 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
719 X86PGPAEUINT const uPdpe = pPdptTemp->a[iPdpt].u;
720 pPdpe->u = uPdpe;
721 if ((uPdpe & (pVCpu->pgm.s.fGstAmd64MbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
722 {
723 /* The PDE. */
724 PX86PDPAE pPD;
725 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, uPdpe & X86_PDPE_PG_MASK, &pPD);
726 if (RT_SUCCESS(rc))
727 {
728 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
729 return pPD;
730 }
731 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
732 }
733 }
734 else
735 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
736 }
737 }
738 return NULL;
739}
740
741
742#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
743# if 0
744/**
745 * Gets the pointer to a page map level-4 entry when the guest using EPT paging.
746 *
747 * @returns Pointer to the PML4 entry.
748 * @param pVCpu The cross context virtual CPU structure.
749 * @param iPml4 The index.
750 * @remarks Only used by AssertCR3.
751 */
752DECLINLINE(PEPTPML4E) pgmGstGetEptPML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
753{
754 PEPTPML4 pEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
755 if (pEptPml4)
756 { /* likely */ }
757 else
758 {
759 int const rc = pgmGstLazyMapEptPml4(pVCpu, &pEptPml4);
760 AssertRCReturn(rc, NULL);
761 }
762 return &pEptPml4->a[iPml4];
763}
764# endif
765
766
767/**
768 * Gets the page map level-4 pointer for the guest when the guest is using EPT
769 * paging.
770 *
771 * @returns VBox status code.
772 * @param pVCpu The cross context virtual CPU structure.
773 * @param ppEptPml4 Where to return the mapping. Always set.
774 */
775DECLINLINE(int) pgmGstGetEptPML4PtrEx(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
776{
777 /* Shadow CR3 might not have been mapped at this point, see PGMHCChangeMode. */
778 *ppEptPml4 = pVCpu->pgm.s.CTX_SUFF(pGstEptPml4);
779 if (!*ppEptPml4)
780 return pgmGstLazyMapEptPml4(pVCpu, ppEptPml4);
781 return VINF_SUCCESS;
782}
783
784
785# if 0
786/**
787 * Gets the page map level-4 pointer for the guest when the guest is using EPT
788 * paging.
789 *
790 * @returns Pointer to the EPT PML4 page.
791 * @param pVCpu The cross context virtual CPU structure.
792 */
793DECLINLINE(PEPTPML4) pgmGstGetEptPML4Ptr(PVMCPUCC pVCpu)
794{
795 PEPTPML4 pEptPml4;
796 int rc = pgmGstGetEptPML4PtrEx(pVCpu, &pEptPml4);
797 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
798 return pEptPml4;
799}
800# endif
801#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
802
803
804/**
805 * Gets the shadow page directory, 32-bit.
806 *
807 * @returns Pointer to the shadow 32-bit PD.
808 * @param pVCpu The cross context virtual CPU structure.
809 */
810DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPUCC pVCpu)
811{
812 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
813}
814
815
816/**
817 * Gets the shadow page directory entry for the specified address, 32-bit.
818 *
819 * @returns Shadow 32-bit PDE.
820 * @param pVCpu The cross context virtual CPU structure.
821 * @param GCPtr The address.
822 */
823DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
824{
825 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
826 if (!pShwPde)
827 {
828 X86PDE ZeroPde = {0};
829 return ZeroPde;
830 }
831 return pShwPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
832}
833
834
835/**
836 * Gets the pointer to the shadow page directory entry for the specified
837 * address, 32-bit.
838 *
839 * @returns Pointer to the shadow 32-bit PDE.
840 * @param pVCpu The cross context virtual CPU structure.
841 * @param GCPtr The address.
842 */
843DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
844{
845 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
846 AssertReturn(pPde, NULL);
847 return &pPde->a[(uint32_t)GCPtr >> X86_PD_SHIFT];
848}
849
850
851/**
852 * Gets the shadow page pointer table, PAE.
853 *
854 * @returns Pointer to the shadow PAE PDPT.
855 * @param pVCpu The cross context virtual CPU structure.
856 */
857DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPUCC pVCpu)
858{
859 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
860}
861
862
863/**
864 * Gets the shadow page directory for the specified address, PAE.
865 *
866 * @returns Pointer to the shadow PD.
867 * @param pVCpu The cross context virtual CPU structure.
868 * @param pPdpt Pointer to the page directory pointer table.
869 * @param GCPtr The address.
870 */
871DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
872{
873 const unsigned iPdpt = (uint32_t)GCPtr >> X86_PDPT_SHIFT;
874 if (pPdpt->a[iPdpt].u & X86_PDPE_P)
875 {
876 /* Fetch the pgm pool shadow descriptor. */
877 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
878 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
879 AssertReturn(pShwPde, NULL);
880
881 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
882 }
883 return NULL;
884}
885
886
887/**
888 * Gets the shadow page directory for the specified address, PAE.
889 *
890 * @returns Pointer to the shadow PD.
891 * @param pVCpu The cross context virtual CPU structure.
892 * @param GCPtr The address.
893 */
894DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
895{
896 return pgmShwGetPaePDPtr(pVCpu, pgmShwGetPaePDPTPtr(pVCpu), GCPtr);
897}
898
899
900/**
901 * Gets the shadow page directory entry, PAE.
902 *
903 * @returns PDE.
904 * @param pVCpu The cross context virtual CPU structure.
905 * @param GCPtr The address.
906 */
907DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPUCC pVCpu, RTGCPTR GCPtr)
908{
909 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
910 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
911 if (pShwPde)
912 return pShwPde->a[iPd];
913
914 X86PDEPAE ZeroPde = {0};
915 return ZeroPde;
916}
917
918
919/**
920 * Gets the pointer to the shadow page directory entry for an address, PAE.
921 *
922 * @returns Pointer to the PDE.
923 * @param pVCpu The cross context virtual CPU structure.
924 * @param GCPtr The address.
925 * @remarks Only used by AssertCR3.
926 */
927DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr)
928{
929 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
930 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
931 AssertReturn(pShwPde, NULL);
932 return &pShwPde->a[iPd];
933}
934
935
936/**
937 * Gets the shadow page map level-4 pointer.
938 *
939 * @returns Pointer to the shadow PML4.
940 * @param pVCpu The cross context virtual CPU structure.
941 */
942DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPUCC pVCpu)
943{
944 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
945}
946
947
948/**
949 * Gets the shadow page map level-4 entry for the specified address.
950 *
951 * @returns The entry.
952 * @param pVCpu The cross context virtual CPU structure.
953 * @param GCPtr The address.
954 */
955DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPUCC pVCpu, RTGCPTR GCPtr)
956{
957 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
958 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
959 if (pShwPml4)
960 return pShwPml4->a[iPml4];
961
962 X86PML4E ZeroPml4e = {0};
963 return ZeroPml4e;
964}
965
966
967/**
968 * Gets the pointer to the specified shadow page map level-4 entry.
969 *
970 * @returns The entry.
971 * @param pVCpu The cross context virtual CPU structure.
972 * @param iPml4 The PML4 index.
973 */
974DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4)
975{
976 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
977 if (pShwPml4)
978 return &pShwPml4->a[iPml4];
979 return NULL;
980}
981#endif /* !VBOX_VMM_TARGET_ARMV8 */
982
983
984/**
985 * Cached physical handler lookup.
986 *
987 * @returns VBox status code.
988 * @retval VERR_NOT_FOUND if no handler.
989 * @param pVM The cross context VM structure.
990 * @param GCPhys The lookup address.
991 * @param ppHandler Where to return the handler pointer.
992 */
993DECLINLINE(int) pgmHandlerPhysicalLookup(PVMCC pVM, RTGCPHYS GCPhys, PPGMPHYSHANDLER *ppHandler)
994{
995 PPGMPHYSHANDLER pHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrFromInt(pVM->pgm.s.idxLastPhysHandler);
996 if ( pHandler
997 && pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.isPtrRetOkay(pHandler)
998 && GCPhys >= pHandler->Key
999 && GCPhys < pHandler->KeyLast
1000 && pHandler->hType != NIL_PGMPHYSHANDLERTYPE
1001 && pHandler->hType != 0)
1002
1003 {
1004 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupHits));
1005 *ppHandler = pHandler;
1006 return VINF_SUCCESS;
1007 }
1008
1009 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1010 AssertPtrReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
1011 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pHandler);
1012 if (RT_SUCCESS(rc))
1013 {
1014 *ppHandler = pHandler;
1015 pVM->pgm.s.idxLastPhysHandler = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.ptrToInt(pHandler);
1016 return VINF_SUCCESS;
1017 }
1018 *ppHandler = NULL;
1019 return rc;
1020}
1021
1022
1023/**
1024 * Converts a handle to a pointer.
1025 *
1026 * @returns Pointer on success, NULL on failure (asserted).
1027 * @param pVM The cross context VM structure.
1028 * @param hType Physical access handler type handle.
1029 */
1030DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1031{
1032#ifdef IN_RING0
1033 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1034#elif defined(IN_RING3)
1035 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1036#else
1037# error "Invalid context"
1038#endif
1039 AssertReturn(pType->hType == hType, NULL);
1040 return pType;
1041}
1042
1043
1044/**
1045 * Converts a handle to a pointer, never returns NULL.
1046 *
1047 * @returns Pointer on success, dummy on failure (asserted).
1048 * @param pVM The cross context VM structure.
1049 * @param hType Physical access handler type handle.
1050 */
1051DECLINLINE(PCPGMPHYSHANDLERTYPEINT) pgmHandlerPhysicalTypeHandleToPtr2(PVMCC pVM, PGMPHYSHANDLERTYPE hType)
1052{
1053#ifdef IN_RING0
1054 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1055#elif defined(IN_RING3)
1056 PPGMPHYSHANDLERTYPEINT pType = &pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
1057#else
1058# error "Invalid context"
1059#endif
1060 AssertReturn(pType->hType == hType, &g_pgmHandlerPhysicalDummyType);
1061 return pType;
1062}
1063
1064
1065/**
1066 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1067 *
1068 * @returns Pointer to the shadow page structure.
1069 * @param pPool The pool.
1070 * @param idx The pool page index.
1071 */
1072DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1073{
1074 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1075 return &pPool->aPages[idx];
1076}
1077
1078
1079/**
1080 * Clear references to guest physical memory.
1081 *
1082 * @param pPool The pool.
1083 * @param pPoolPage The pool page.
1084 * @param pPhysPage The physical guest page tracking structure.
1085 * @param iPte Shadow PTE index
1086 */
1087DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1088{
1089 /*
1090 * Just deal with the simple case here.
1091 */
1092#ifdef VBOX_STRICT
1093 PVMCC pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1094#endif
1095#ifdef LOG_ENABLED
1096 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1097#endif
1098 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1099 if (cRefs == 1)
1100 {
1101 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1102 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1103 /* Invalidate the tracking data. */
1104 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1105 }
1106 else
1107 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1108 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1109}
1110
1111
1112/**
1113 * Moves the page to the head of the age list.
1114 *
1115 * This is done when the cached page is used in one way or another.
1116 *
1117 * @param pPool The pool.
1118 * @param pPage The cached page.
1119 */
1120DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1121{
1122 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1123
1124 /*
1125 * Move to the head of the age list.
1126 */
1127 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1128 {
1129 /* unlink */
1130 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1131 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1132 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1133 else
1134 pPool->iAgeTail = pPage->iAgePrev;
1135
1136 /* insert at head */
1137 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1138 pPage->iAgeNext = pPool->iAgeHead;
1139 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1140 pPool->iAgeHead = pPage->idx;
1141 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1142 }
1143}
1144
1145
1146/**
1147 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1148 *
1149 * @param pPool The pool.
1150 * @param pPage PGM pool page
1151 */
1152DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1153{
1154 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1155 ASMAtomicIncU32(&pPage->cLocked);
1156}
1157
1158
1159/**
1160 * Unlocks a page to allow flushing again
1161 *
1162 * @param pPool The pool.
1163 * @param pPage PGM pool page
1164 */
1165DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1166{
1167 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM)); NOREF(pPool);
1168 Assert(pPage->cLocked);
1169 ASMAtomicDecU32(&pPage->cLocked);
1170}
1171
1172
1173/**
1174 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1175 *
1176 * @returns VBox status code.
1177 * @param pPage PGM pool page
1178 */
1179DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1180{
1181 if (pPage->cLocked)
1182 {
1183 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1184 if (pPage->cModifications)
1185 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1186 return true;
1187 }
1188 return false;
1189}
1190
1191
1192/**
1193 * Check if the specified page is dirty (not write monitored)
1194 *
1195 * @return dirty or not
1196 * @param pVM The cross context VM structure.
1197 * @param GCPhys Guest physical address
1198 */
1199DECLINLINE(bool) pgmPoolIsDirtyPage(PVMCC pVM, RTGCPHYS GCPhys)
1200{
1201 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1202 PGM_LOCK_ASSERT_OWNER(pVM);
1203 if (!pPool->cDirtyPages)
1204 return false;
1205 return pgmPoolIsDirtyPageSlow(pVM, GCPhys);
1206}
1207
1208
1209/** @} */
1210
1211#endif /* !VMM_INCLUDED_SRC_include_PGMInline_h */
1212
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette