VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 17534

Last change on this file since 17534 was 17509, checked in by vboxsync, 16 years ago

PGM: Moved the page pool PT flushing code in the access handler bits to where it belongs and called it pgmPoolTrackFlushGCPhys. Fixed a status code corruption bug in PGMR3PhysTlbGCPhys2Ptr (new phys). Made lazy zero page replacement code work in the new code, it's disabled by default because it frequently requires flushing the shadow page pool because the tracking code assuming the HCPhys of a PGMPAGE is unique and never shared.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.3 KB
Line 
1/* $Id: PGMAll.cpp 17509 2009-03-07 01:30:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
73#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75#endif
76
77/*
78 * Shadow - 32-bit mode
79 */
80#define PGM_SHW_TYPE PGM_TYPE_32BIT
81#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
82#include "PGMAllShw.h"
83
84/* Guest - real mode */
85#define PGM_GST_TYPE PGM_TYPE_REAL
86#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
87#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
88#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
89#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
90#include "PGMGstDefs.h"
91#include "PGMAllGst.h"
92#include "PGMAllBth.h"
93#undef BTH_PGMPOOLKIND_PT_FOR_PT
94#undef BTH_PGMPOOLKIND_ROOT
95#undef PGM_BTH_NAME
96#undef PGM_GST_TYPE
97#undef PGM_GST_NAME
98
99/* Guest - protected mode */
100#define PGM_GST_TYPE PGM_TYPE_PROT
101#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
102#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
103#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
104#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
105#include "PGMGstDefs.h"
106#include "PGMAllGst.h"
107#include "PGMAllBth.h"
108#undef BTH_PGMPOOLKIND_PT_FOR_PT
109#undef BTH_PGMPOOLKIND_ROOT
110#undef PGM_BTH_NAME
111#undef PGM_GST_TYPE
112#undef PGM_GST_NAME
113
114/* Guest - 32-bit mode */
115#define PGM_GST_TYPE PGM_TYPE_32BIT
116#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
117#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
118#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
119#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
120#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
121#include "PGMGstDefs.h"
122#include "PGMAllGst.h"
123#include "PGMAllBth.h"
124#undef BTH_PGMPOOLKIND_PT_FOR_BIG
125#undef BTH_PGMPOOLKIND_PT_FOR_PT
126#undef BTH_PGMPOOLKIND_ROOT
127#undef PGM_BTH_NAME
128#undef PGM_GST_TYPE
129#undef PGM_GST_NAME
130
131#undef PGM_SHW_TYPE
132#undef PGM_SHW_NAME
133
134
135/*
136 * Shadow - PAE mode
137 */
138#define PGM_SHW_TYPE PGM_TYPE_PAE
139#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#include "PGMAllShw.h"
142
143/* Guest - real mode */
144#define PGM_GST_TYPE PGM_TYPE_REAL
145#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
146#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
147#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
148#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
149#include "PGMGstDefs.h"
150#include "PGMAllBth.h"
151#undef BTH_PGMPOOLKIND_PT_FOR_PT
152#undef BTH_PGMPOOLKIND_ROOT
153#undef PGM_BTH_NAME
154#undef PGM_GST_TYPE
155#undef PGM_GST_NAME
156
157/* Guest - protected mode */
158#define PGM_GST_TYPE PGM_TYPE_PROT
159#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
160#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
161#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
162#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
163#include "PGMGstDefs.h"
164#include "PGMAllBth.h"
165#undef BTH_PGMPOOLKIND_PT_FOR_PT
166#undef BTH_PGMPOOLKIND_ROOT
167#undef PGM_BTH_NAME
168#undef PGM_GST_TYPE
169#undef PGM_GST_NAME
170
171/* Guest - 32-bit mode */
172#define PGM_GST_TYPE PGM_TYPE_32BIT
173#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
174#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
175#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
176#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
177#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
178#include "PGMGstDefs.h"
179#include "PGMAllBth.h"
180#undef BTH_PGMPOOLKIND_PT_FOR_BIG
181#undef BTH_PGMPOOLKIND_PT_FOR_PT
182#undef BTH_PGMPOOLKIND_ROOT
183#undef PGM_BTH_NAME
184#undef PGM_GST_TYPE
185#undef PGM_GST_NAME
186
187
188/* Guest - PAE mode */
189#define PGM_GST_TYPE PGM_TYPE_PAE
190#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
191#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
192#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
193#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
194#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
195#include "PGMGstDefs.h"
196#include "PGMAllGst.h"
197#include "PGMAllBth.h"
198#undef BTH_PGMPOOLKIND_PT_FOR_BIG
199#undef BTH_PGMPOOLKIND_PT_FOR_PT
200#undef BTH_PGMPOOLKIND_ROOT
201#undef PGM_BTH_NAME
202#undef PGM_GST_TYPE
203#undef PGM_GST_NAME
204
205#undef PGM_SHW_TYPE
206#undef PGM_SHW_NAME
207
208
209#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
210/*
211 * Shadow - AMD64 mode
212 */
213# define PGM_SHW_TYPE PGM_TYPE_AMD64
214# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
215# include "PGMAllShw.h"
216
217/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
218# define PGM_GST_TYPE PGM_TYPE_PROT
219# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
220# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
221# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
222# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
223# include "PGMGstDefs.h"
224# include "PGMAllBth.h"
225# undef BTH_PGMPOOLKIND_PT_FOR_PT
226# undef BTH_PGMPOOLKIND_ROOT
227# undef PGM_BTH_NAME
228# undef PGM_GST_TYPE
229# undef PGM_GST_NAME
230
231# ifdef VBOX_WITH_64_BITS_GUESTS
232/* Guest - AMD64 mode */
233# define PGM_GST_TYPE PGM_TYPE_AMD64
234# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
235# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
236# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
237# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
238# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
239# include "PGMGstDefs.h"
240# include "PGMAllGst.h"
241# include "PGMAllBth.h"
242# undef BTH_PGMPOOLKIND_PT_FOR_BIG
243# undef BTH_PGMPOOLKIND_PT_FOR_PT
244# undef BTH_PGMPOOLKIND_ROOT
245# undef PGM_BTH_NAME
246# undef PGM_GST_TYPE
247# undef PGM_GST_NAME
248# endif /* VBOX_WITH_64_BITS_GUESTS */
249
250# undef PGM_SHW_TYPE
251# undef PGM_SHW_NAME
252
253
254/*
255 * Shadow - Nested paging mode
256 */
257# define PGM_SHW_TYPE PGM_TYPE_NESTED
258# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
259# include "PGMAllShw.h"
260
261/* Guest - real mode */
262# define PGM_GST_TYPE PGM_TYPE_REAL
263# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
264# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
265# include "PGMGstDefs.h"
266# include "PGMAllBth.h"
267# undef PGM_BTH_NAME
268# undef PGM_GST_TYPE
269# undef PGM_GST_NAME
270
271/* Guest - protected mode */
272# define PGM_GST_TYPE PGM_TYPE_PROT
273# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
274# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
275# include "PGMGstDefs.h"
276# include "PGMAllBth.h"
277# undef PGM_BTH_NAME
278# undef PGM_GST_TYPE
279# undef PGM_GST_NAME
280
281/* Guest - 32-bit mode */
282# define PGM_GST_TYPE PGM_TYPE_32BIT
283# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
284# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
285# include "PGMGstDefs.h"
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290
291/* Guest - PAE mode */
292# define PGM_GST_TYPE PGM_TYPE_PAE
293# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
294# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
295# include "PGMGstDefs.h"
296# include "PGMAllBth.h"
297# undef PGM_BTH_NAME
298# undef PGM_GST_TYPE
299# undef PGM_GST_NAME
300
301# ifdef VBOX_WITH_64_BITS_GUESTS
302/* Guest - AMD64 mode */
303# define PGM_GST_TYPE PGM_TYPE_AMD64
304# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
305# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
306# include "PGMGstDefs.h"
307# include "PGMAllBth.h"
308# undef PGM_BTH_NAME
309# undef PGM_GST_TYPE
310# undef PGM_GST_NAME
311# endif /* VBOX_WITH_64_BITS_GUESTS */
312
313# undef PGM_SHW_TYPE
314# undef PGM_SHW_NAME
315
316
317/*
318 * Shadow - EPT
319 */
320# define PGM_SHW_TYPE PGM_TYPE_EPT
321# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
322# include "PGMAllShw.h"
323
324/* Guest - real mode */
325# define PGM_GST_TYPE PGM_TYPE_REAL
326# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
327# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
328# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
329# include "PGMGstDefs.h"
330# include "PGMAllBth.h"
331# undef BTH_PGMPOOLKIND_PT_FOR_PT
332# undef PGM_BTH_NAME
333# undef PGM_GST_TYPE
334# undef PGM_GST_NAME
335
336/* Guest - protected mode */
337# define PGM_GST_TYPE PGM_TYPE_PROT
338# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
339# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
340# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
341# include "PGMGstDefs.h"
342# include "PGMAllBth.h"
343# undef BTH_PGMPOOLKIND_PT_FOR_PT
344# undef PGM_BTH_NAME
345# undef PGM_GST_TYPE
346# undef PGM_GST_NAME
347
348/* Guest - 32-bit mode */
349# define PGM_GST_TYPE PGM_TYPE_32BIT
350# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
351# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
352# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
353# include "PGMGstDefs.h"
354# include "PGMAllBth.h"
355# undef BTH_PGMPOOLKIND_PT_FOR_PT
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - PAE mode */
361# define PGM_GST_TYPE PGM_TYPE_PAE
362# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
364# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef BTH_PGMPOOLKIND_PT_FOR_PT
368# undef PGM_BTH_NAME
369# undef PGM_GST_TYPE
370# undef PGM_GST_NAME
371
372# ifdef VBOX_WITH_64_BITS_GUESTS
373/* Guest - AMD64 mode */
374# define PGM_GST_TYPE PGM_TYPE_AMD64
375# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
376# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
377# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
378# include "PGMGstDefs.h"
379# include "PGMAllBth.h"
380# undef BTH_PGMPOOLKIND_PT_FOR_PT
381# undef PGM_BTH_NAME
382# undef PGM_GST_TYPE
383# undef PGM_GST_NAME
384# endif /* VBOX_WITH_64_BITS_GUESTS */
385
386# undef PGM_SHW_TYPE
387# undef PGM_SHW_NAME
388
389#endif /* !IN_RC */
390
391
392#ifndef IN_RING3
393/**
394 * #PF Handler.
395 *
396 * @returns VBox status code (appropriate for trap handling and GC return).
397 * @param pVM VM Handle.
398 * @param uErr The trap error code.
399 * @param pRegFrame Trap register frame.
400 * @param pvFault The fault address.
401 */
402VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
403{
404 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
405 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
406 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
407
408
409#ifdef VBOX_WITH_STATISTICS
410 /*
411 * Error code stats.
412 */
413 if (uErr & X86_TRAP_PF_US)
414 {
415 if (!(uErr & X86_TRAP_PF_P))
416 {
417 if (uErr & X86_TRAP_PF_RW)
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
419 else
420 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
421 }
422 else if (uErr & X86_TRAP_PF_RW)
423 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
424 else if (uErr & X86_TRAP_PF_RSVD)
425 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
426 else if (uErr & X86_TRAP_PF_ID)
427 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
428 else
429 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
430 }
431 else
432 { /* Supervisor */
433 if (!(uErr & X86_TRAP_PF_P))
434 {
435 if (uErr & X86_TRAP_PF_RW)
436 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
437 else
438 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
439 }
440 else if (uErr & X86_TRAP_PF_RW)
441 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
442 else if (uErr & X86_TRAP_PF_ID)
443 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
444 else if (uErr & X86_TRAP_PF_RSVD)
445 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
446 }
447#endif /* VBOX_WITH_STATISTICS */
448
449 /*
450 * Call the worker.
451 */
452 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
453 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
454 rc = VINF_SUCCESS;
455 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
456 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
457 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
458 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
459 return rc;
460}
461#endif /* !IN_RING3 */
462
463
464/**
465 * Prefetch a page
466 *
467 * Typically used to sync commonly used pages before entering raw mode
468 * after a CR3 reload.
469 *
470 * @returns VBox status code suitable for scheduling.
471 * @retval VINF_SUCCESS on success.
472 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
473 * @param pVM VM handle.
474 * @param GCPtrPage Page to invalidate.
475 */
476VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
477{
478 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
479 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
480 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
481 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
482 return rc;
483}
484
485
486/**
487 * Gets the mapping corresponding to the specified address (if any).
488 *
489 * @returns Pointer to the mapping.
490 * @returns NULL if not
491 *
492 * @param pVM The virtual machine.
493 * @param GCPtr The guest context pointer.
494 */
495PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
496{
497 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
498 while (pMapping)
499 {
500 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
501 break;
502 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
503 return pMapping;
504 pMapping = pMapping->CTX_SUFF(pNext);
505 }
506 return NULL;
507}
508
509
510/**
511 * Verifies a range of pages for read or write access
512 *
513 * Only checks the guest's page tables
514 *
515 * @returns VBox status code.
516 * @param pVM VM handle.
517 * @param Addr Guest virtual address to check
518 * @param cbSize Access size
519 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
520 * @remarks Current not in use.
521 */
522VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
523{
524 /*
525 * Validate input.
526 */
527 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
528 {
529 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
530 return VERR_INVALID_PARAMETER;
531 }
532
533 uint64_t fPage;
534 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
535 if (RT_FAILURE(rc))
536 {
537 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
538 return VINF_EM_RAW_GUEST_TRAP;
539 }
540
541 /*
542 * Check if the access would cause a page fault
543 *
544 * Note that hypervisor page directories are not present in the guest's tables, so this check
545 * is sufficient.
546 */
547 bool fWrite = !!(fAccess & X86_PTE_RW);
548 bool fUser = !!(fAccess & X86_PTE_US);
549 if ( !(fPage & X86_PTE_P)
550 || (fWrite && !(fPage & X86_PTE_RW))
551 || (fUser && !(fPage & X86_PTE_US)) )
552 {
553 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
554 return VINF_EM_RAW_GUEST_TRAP;
555 }
556 if ( RT_SUCCESS(rc)
557 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
558 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
559 return rc;
560}
561
562
563/**
564 * Verifies a range of pages for read or write access
565 *
566 * Supports handling of pages marked for dirty bit tracking and CSAM
567 *
568 * @returns VBox status code.
569 * @param pVM VM handle.
570 * @param Addr Guest virtual address to check
571 * @param cbSize Access size
572 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
573 */
574VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
575{
576 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
577
578 /*
579 * Get going.
580 */
581 uint64_t fPageGst;
582 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
583 if (RT_FAILURE(rc))
584 {
585 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
586 return VINF_EM_RAW_GUEST_TRAP;
587 }
588
589 /*
590 * Check if the access would cause a page fault
591 *
592 * Note that hypervisor page directories are not present in the guest's tables, so this check
593 * is sufficient.
594 */
595 const bool fWrite = !!(fAccess & X86_PTE_RW);
596 const bool fUser = !!(fAccess & X86_PTE_US);
597 if ( !(fPageGst & X86_PTE_P)
598 || (fWrite && !(fPageGst & X86_PTE_RW))
599 || (fUser && !(fPageGst & X86_PTE_US)) )
600 {
601 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
602 return VINF_EM_RAW_GUEST_TRAP;
603 }
604
605 if (!HWACCMIsNestedPagingActive(pVM))
606 {
607 /*
608 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
609 */
610 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
611 if ( rc == VERR_PAGE_NOT_PRESENT
612 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
613 {
614 /*
615 * Page is not present in our page tables.
616 * Try to sync it!
617 */
618 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
619 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
620 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
621 if (rc != VINF_SUCCESS)
622 return rc;
623 }
624 else
625 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
626 }
627
628#if 0 /* def VBOX_STRICT; triggers too often now */
629 /*
630 * This check is a bit paranoid, but useful.
631 */
632 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
633 uint64_t fPageShw;
634 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
635 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
636 || (fWrite && !(fPageShw & X86_PTE_RW))
637 || (fUser && !(fPageShw & X86_PTE_US)) )
638 {
639 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
640 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
641 return VINF_EM_RAW_GUEST_TRAP;
642 }
643#endif
644
645 if ( RT_SUCCESS(rc)
646 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
647 || Addr + cbSize < Addr))
648 {
649 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
650 for (;;)
651 {
652 Addr += PAGE_SIZE;
653 if (cbSize > PAGE_SIZE)
654 cbSize -= PAGE_SIZE;
655 else
656 cbSize = 1;
657 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
658 if (rc != VINF_SUCCESS)
659 break;
660 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
661 break;
662 }
663 }
664 return rc;
665}
666
667
668/**
669 * Emulation of the invlpg instruction (HC only actually).
670 *
671 * @returns VBox status code, special care required.
672 * @retval VINF_PGM_SYNC_CR3 - handled.
673 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
674 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
675 *
676 * @param pVM VM handle.
677 * @param GCPtrPage Page to invalidate.
678 *
679 * @remark ASSUMES the page table entry or page directory is valid. Fairly
680 * safe, but there could be edge cases!
681 *
682 * @todo Flush page or page directory only if necessary!
683 */
684VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
685{
686 int rc;
687 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
688
689#ifndef IN_RING3
690 /*
691 * Notify the recompiler so it can record this instruction.
692 * Failure happens when it's out of space. We'll return to HC in that case.
693 */
694 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
695 if (rc != VINF_SUCCESS)
696 return rc;
697#endif /* !IN_RING3 */
698
699
700#ifdef IN_RC
701 /*
702 * Check for conflicts and pending CR3 monitoring updates.
703 */
704 if (!pVM->pgm.s.fMappingsFixed)
705 {
706 if ( pgmGetMapping(pVM, GCPtrPage)
707 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
708 {
709 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
710 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
711 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
712 return VINF_PGM_SYNC_CR3;
713 }
714
715 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
716 {
717 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
718 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
719 return VINF_EM_RAW_EMULATE_INSTR;
720 }
721 }
722#endif /* IN_RC */
723
724 /*
725 * Call paging mode specific worker.
726 */
727 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
728 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
729 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
730
731#ifdef IN_RING3
732 /*
733 * Check if we have a pending update of the CR3 monitoring.
734 */
735 if ( RT_SUCCESS(rc)
736 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
737 {
738 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
739 Assert(!pVM->pgm.s.fMappingsFixed);
740#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
741 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
742 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
743#endif
744 }
745
746 /*
747 * Inform CSAM about the flush
748 *
749 * Note: This is to check if monitored pages have been changed; when we implement
750 * callbacks for virtual handlers, this is no longer required.
751 */
752 CSAMR3FlushPage(pVM, GCPtrPage);
753#endif /* IN_RING3 */
754 return rc;
755}
756
757
758/**
759 * Executes an instruction using the interpreter.
760 *
761 * @returns VBox status code (appropriate for trap handling and GC return).
762 * @param pVM VM handle.
763 * @param pRegFrame Register frame.
764 * @param pvFault Fault address.
765 */
766VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
767{
768 uint32_t cb;
769 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
770 if (rc == VERR_EM_INTERPRETER)
771 rc = VINF_EM_RAW_EMULATE_INSTR;
772 if (rc != VINF_SUCCESS)
773 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
774 return rc;
775}
776
777
778/**
779 * Gets effective page information (from the VMM page directory).
780 *
781 * @returns VBox status.
782 * @param pVM VM Handle.
783 * @param GCPtr Guest Context virtual address of the page.
784 * @param pfFlags Where to store the flags. These are X86_PTE_*.
785 * @param pHCPhys Where to store the HC physical address of the page.
786 * This is page aligned.
787 * @remark You should use PGMMapGetPage() for pages in a mapping.
788 */
789VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
790{
791 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
792}
793
794
795/**
796 * Sets (replaces) the page flags for a range of pages in the shadow context.
797 *
798 * @returns VBox status.
799 * @param pVM VM handle.
800 * @param GCPtr The address of the first page.
801 * @param cb The size of the range in bytes.
802 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
803 * @remark You must use PGMMapSetPage() for pages in a mapping.
804 */
805VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
806{
807 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
808}
809
810
811/**
812 * Modify page flags for a range of pages in the shadow context.
813 *
814 * The existing flags are ANDed with the fMask and ORed with the fFlags.
815 *
816 * @returns VBox status code.
817 * @param pVM VM handle.
818 * @param GCPtr Virtual address of the first page in the range.
819 * @param cb Size (in bytes) of the range to apply the modification to.
820 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
821 * @param fMask The AND mask - page flags X86_PTE_*.
822 * Be very CAREFUL when ~'ing constants which could be 32-bit!
823 * @remark You must use PGMMapModifyPage() for pages in a mapping.
824 */
825VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
826{
827 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
828 Assert(cb);
829
830 /*
831 * Align the input.
832 */
833 cb += GCPtr & PAGE_OFFSET_MASK;
834 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
835 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
836
837 /*
838 * Call worker.
839 */
840 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
841}
842
843
844/**
845 * Gets the SHADOW page directory pointer for the specified address.
846 *
847 * @returns VBox status.
848 * @param pVM VM handle.
849 * @param GCPtr The address.
850 * @param ppPdpt Receives address of pdpt
851 * @param ppPD Receives address of page directory
852 * @remarks Unused.
853 */
854DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
855{
856 PPGM pPGM = &pVM->pgm.s;
857 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
858 PPGMPOOLPAGE pShwPage;
859
860 Assert(!HWACCMIsNestedPagingActive(pVM));
861
862 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
863 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
864 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
865
866 *ppPdpt = pPdpt;
867 if (!pPdpe->n.u1Present)
868 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
869
870 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
871 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
872
873 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
874 return VINF_SUCCESS;
875}
876
877#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
878
879/**
880 * Gets the shadow page directory for the specified address, PAE.
881 *
882 * @returns Pointer to the shadow PD.
883 * @param pVM VM handle.
884 * @param GCPtr The address.
885 * @param pGstPdpe Guest PDPT entry
886 * @param ppPD Receives address of page directory
887 */
888int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
889{
890 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
891 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
892 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
893 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
894 PPGMPOOLPAGE pShwPage;
895 int rc;
896
897 /* Allocate page directory if not present. */
898 if ( !pPdpe->n.u1Present
899 && !(pPdpe->u & X86_PDPE_PG_MASK))
900 {
901 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
902 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
903 RTGCPTR64 GCPdPt;
904 PGMPOOLKIND enmKind;
905
906# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
907 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
908 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
909# endif
910
911 if (fNestedPaging || !fPaging)
912 {
913 /* AMD-V nested paging or real/protected mode without paging */
914 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
915 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
916 }
917 else
918 {
919 Assert(pGstPdpe);
920
921 if (CPUMGetGuestCR4(pVM) & X86_CR4_PAE)
922 {
923 if (!pGstPdpe->n.u1Present)
924 {
925 /* PD not present; guest must reload CR3 to change it.
926 * No need to monitor anything in this case.
927 */
928 Assert(!HWACCMIsEnabled(pVM));
929
930 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
931 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
932 pGstPdpe->n.u1Present = 1;
933 }
934 else
935 {
936 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
937 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
938 }
939 }
940 else
941 {
942 GCPdPt = CPUMGetGuestCR3(pVM);
943 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
944 }
945 }
946
947 /* Create a reference back to the PDPT by using the index in its shadow page. */
948 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
949 if (rc == VERR_PGM_POOL_FLUSHED)
950 {
951 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
952 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
953 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
954# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
955 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
956# endif
957 return VINF_PGM_SYNC_CR3;
958 }
959 AssertRCReturn(rc, rc);
960
961 /* The PD was cached or created; hook it up now. */
962 pPdpe->u |= pShwPage->Core.Key
963 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
964
965# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
966 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
967 * non-present PDPT will continue to cause page faults.
968 */
969 ASMReloadCR3();
970 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
971# endif
972 }
973 else
974 {
975 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
976 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
977
978 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
979 }
980 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
981 return VINF_SUCCESS;
982}
983
984
985/**
986 * Gets the pointer to the shadow page directory entry for an address, PAE.
987 *
988 * @returns Pointer to the PDE.
989 * @param pPGM Pointer to the PGM instance data.
990 * @param GCPtr The address.
991 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
992 */
993DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
994{
995 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
996 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
997 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
998 if (!pPdpt->a[iPdPt].n.u1Present)
999 {
1000 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1001 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1002 }
1003
1004 /* Fetch the pgm pool shadow descriptor. */
1005 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1006 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1007
1008 *ppShwPde = pShwPde;
1009 return VINF_SUCCESS;
1010}
1011
1012#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
1013#ifndef IN_RC
1014
1015/**
1016 * Syncs the SHADOW page directory pointer for the specified address.
1017 *
1018 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1019 *
1020 * The caller is responsible for making sure the guest has a valid PD before
1021 * calling this function.
1022 *
1023 * @returns VBox status.
1024 * @param pVM VM handle.
1025 * @param GCPtr The address.
1026 * @param pGstPml4e Guest PML4 entry
1027 * @param pGstPdpe Guest PDPT entry
1028 * @param ppPD Receives address of page directory
1029 */
1030int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1031{
1032 PPGM pPGM = &pVM->pgm.s;
1033 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1034 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1035 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1036 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1037#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1038 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
1039#endif
1040 PPGMPOOLPAGE pShwPage;
1041 int rc;
1042
1043 /* Allocate page directory pointer table if not present. */
1044 if ( !pPml4e->n.u1Present
1045 && !(pPml4e->u & X86_PML4E_PG_MASK))
1046 {
1047#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1048 RTGCPTR64 GCPml4;
1049 PGMPOOLKIND enmKind;
1050
1051 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1052
1053 if (fNestedPaging || !fPaging)
1054 {
1055 /* AMD-V nested paging or real/protected mode without paging */
1056 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1057 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1058 }
1059 else
1060 {
1061 Assert(pGstPml4e && pGstPdpe);
1062
1063 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1064 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1065 }
1066
1067 /* Create a reference back to the PDPT by using the index in its shadow page. */
1068 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1069#else
1070 if (!fNestedPaging)
1071 {
1072 Assert(pGstPml4e && pGstPdpe);
1073 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1074
1075 rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
1076 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1077 }
1078 else
1079 {
1080 /* AMD-V nested paging. (Intel EPT never comes here) */
1081 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1082 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
1083 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1084 }
1085#endif
1086 if (rc == VERR_PGM_POOL_FLUSHED)
1087 {
1088 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1089 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1090 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1091 return VINF_PGM_SYNC_CR3;
1092 }
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1098 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1099 }
1100 /* The PDPT was cached or created; hook it up now. */
1101 pPml4e->u |= pShwPage->Core.Key
1102 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1103
1104 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1105 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1106 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1107
1108 /* Allocate page directory if not present. */
1109 if ( !pPdpe->n.u1Present
1110 && !(pPdpe->u & X86_PDPE_PG_MASK))
1111 {
1112#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPaging || !fPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 Assert(pGstPdpe);
1125
1126 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1127 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1128 }
1129
1130 /* Create a reference back to the PDPT by using the index in its shadow page. */
1131 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1132#else
1133 if (!fNestedPaging)
1134 {
1135 Assert(pGstPml4e && pGstPdpe);
1136 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
1137 /* Create a reference back to the PDPT by using the index in its shadow page. */
1138 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
1139 }
1140 else
1141 {
1142 /* AMD-V nested paging. (Intel EPT never comes here) */
1143 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1144
1145 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1146 }
1147#endif
1148 if (rc == VERR_PGM_POOL_FLUSHED)
1149 {
1150 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1151 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1152 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1153 return VINF_PGM_SYNC_CR3;
1154 }
1155 AssertRCReturn(rc, rc);
1156 }
1157 else
1158 {
1159 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1160 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1161 }
1162 /* The PD was cached or created; hook it up now. */
1163 pPdpe->u |= pShwPage->Core.Key
1164 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1165
1166 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1167 return VINF_SUCCESS;
1168}
1169
1170
1171/**
1172 * Gets the SHADOW page directory pointer for the specified address (long mode).
1173 *
1174 * @returns VBox status.
1175 * @param pVM VM handle.
1176 * @param GCPtr The address.
1177 * @param ppPdpt Receives address of pdpt
1178 * @param ppPD Receives address of page directory
1179 */
1180DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1181{
1182 PPGM pPGM = &pVM->pgm.s;
1183 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1184 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1185 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1186 if (ppPml4e)
1187 *ppPml4e = (PX86PML4E)pPml4e;
1188 if (!pPml4e->n.u1Present)
1189 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1190
1191 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1192 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1193 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1194
1195 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1196 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1197 if (!pPdpt->a[iPdPt].n.u1Present)
1198 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1199
1200 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1201 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1202
1203 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1204 return VINF_SUCCESS;
1205}
1206
1207
1208/**
1209 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1210 * backing pages in case the PDPT or PML4 entry is missing.
1211 *
1212 * @returns VBox status.
1213 * @param pVM VM handle.
1214 * @param GCPtr The address.
1215 * @param ppPdpt Receives address of pdpt
1216 * @param ppPD Receives address of page directory
1217 */
1218int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1219{
1220 PPGM pPGM = &pVM->pgm.s;
1221 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1222 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1223 PEPTPML4 pPml4;
1224 PEPTPML4E pPml4e;
1225 PPGMPOOLPAGE pShwPage;
1226 int rc;
1227
1228 Assert(HWACCMIsNestedPagingActive(pVM));
1229
1230#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1231 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1232#else
1233# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1234 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
1235 AssertRCReturn(rc, rc);
1236# else
1237 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
1238# endif
1239#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
1240 Assert(pPml4);
1241
1242 /* Allocate page directory pointer table if not present. */
1243 pPml4e = &pPml4->a[iPml4];
1244 if ( !pPml4e->n.u1Present
1245 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1246 {
1247 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1248 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1249
1250#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1251 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1252#else
1253 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1254#endif
1255 if (rc == VERR_PGM_POOL_FLUSHED)
1256 {
1257 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1258 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1259 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1260 return VINF_PGM_SYNC_CR3;
1261 }
1262 AssertRCReturn(rc, rc);
1263 }
1264 else
1265 {
1266 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1267 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1268 }
1269 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1270 pPml4e->u = pShwPage->Core.Key;
1271 pPml4e->n.u1Present = 1;
1272 pPml4e->n.u1Write = 1;
1273 pPml4e->n.u1Execute = 1;
1274
1275 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1276 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1277 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1278
1279 if (ppPdpt)
1280 *ppPdpt = pPdpt;
1281
1282 /* Allocate page directory if not present. */
1283 if ( !pPdpe->n.u1Present
1284 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1285 {
1286 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1287
1288#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1289 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1290#else
1291 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1292#endif
1293 if (rc == VERR_PGM_POOL_FLUSHED)
1294 {
1295 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1296 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1297 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1298 return VINF_PGM_SYNC_CR3;
1299 }
1300 AssertRCReturn(rc, rc);
1301 }
1302 else
1303 {
1304 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1305 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1306 }
1307 /* The PD was cached or created; hook it up now and fill with the default value. */
1308 pPdpe->u = pShwPage->Core.Key;
1309 pPdpe->n.u1Present = 1;
1310 pPdpe->n.u1Write = 1;
1311 pPdpe->n.u1Execute = 1;
1312
1313 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1314 return VINF_SUCCESS;
1315}
1316
1317#endif /* IN_RC */
1318
1319/**
1320 * Gets effective Guest OS page information.
1321 *
1322 * When GCPtr is in a big page, the function will return as if it was a normal
1323 * 4KB page. If the need for distinguishing between big and normal page becomes
1324 * necessary at a later point, a PGMGstGetPage() will be created for that
1325 * purpose.
1326 *
1327 * @returns VBox status.
1328 * @param pVM VM Handle.
1329 * @param GCPtr Guest Context virtual address of the page.
1330 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1331 * @param pGCPhys Where to store the GC physical address of the page.
1332 * This is page aligned. The fact that the
1333 */
1334VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1335{
1336 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1337}
1338
1339
1340/**
1341 * Checks if the page is present.
1342 *
1343 * @returns true if the page is present.
1344 * @returns false if the page is not present.
1345 * @param pVM The VM handle.
1346 * @param GCPtr Address within the page.
1347 */
1348VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1349{
1350 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1351 return RT_SUCCESS(rc);
1352}
1353
1354
1355/**
1356 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1357 *
1358 * @returns VBox status.
1359 * @param pVM VM handle.
1360 * @param GCPtr The address of the first page.
1361 * @param cb The size of the range in bytes.
1362 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1363 */
1364VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1365{
1366 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1367}
1368
1369
1370/**
1371 * Modify page flags for a range of pages in the guest's tables
1372 *
1373 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1374 *
1375 * @returns VBox status code.
1376 * @param pVM VM handle.
1377 * @param GCPtr Virtual address of the first page in the range.
1378 * @param cb Size (in bytes) of the range to apply the modification to.
1379 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1380 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1381 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1382 */
1383VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1384{
1385 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1386
1387 /*
1388 * Validate input.
1389 */
1390 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1391 Assert(cb);
1392
1393 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1394
1395 /*
1396 * Adjust input.
1397 */
1398 cb += GCPtr & PAGE_OFFSET_MASK;
1399 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1400 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1401
1402 /*
1403 * Call worker.
1404 */
1405 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1406
1407 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1408 return rc;
1409}
1410
1411
1412/**
1413 * Gets the specified page directory pointer table entry.
1414 *
1415 * @returns PDP entry
1416 * @param pPGM Pointer to the PGM instance data.
1417 * @param iPdpt PDPT index
1418 */
1419VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1420{
1421 Assert(iPdpt <= 3);
1422 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1423}
1424
1425
1426/**
1427 * Gets the current CR3 register value for the shadow memory context.
1428 * @returns CR3 value.
1429 * @param pVM The VM handle.
1430 */
1431VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1432{
1433#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1434 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1435 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1436#else
1437 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1438 switch (enmShadowMode)
1439 {
1440 case PGMMODE_32_BIT:
1441 return pVM->pgm.s.HCPhysShw32BitPD;
1442
1443 case PGMMODE_PAE:
1444 case PGMMODE_PAE_NX:
1445 return pVM->pgm.s.HCPhysShwPaePdpt;
1446
1447 case PGMMODE_AMD64:
1448 case PGMMODE_AMD64_NX:
1449 return pVM->pgm.s.HCPhysShwCR3;
1450
1451 case PGMMODE_EPT:
1452 return pVM->pgm.s.HCPhysShwNestedRoot;
1453
1454 case PGMMODE_NESTED:
1455 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1456
1457 default:
1458 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1459 return ~0;
1460 }
1461#endif
1462}
1463
1464
1465/**
1466 * Gets the current CR3 register value for the nested memory context.
1467 * @returns CR3 value.
1468 * @param pVM The VM handle.
1469 */
1470VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1471{
1472#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1473 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1474 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1475#else
1476 switch (enmShadowMode)
1477 {
1478 case PGMMODE_32_BIT:
1479 return pVM->pgm.s.HCPhysShw32BitPD;
1480
1481 case PGMMODE_PAE:
1482 case PGMMODE_PAE_NX:
1483 return pVM->pgm.s.HCPhysShwPaePdpt;
1484
1485 case PGMMODE_AMD64:
1486 case PGMMODE_AMD64_NX:
1487 return pVM->pgm.s.HCPhysShwCR3;
1488
1489 default:
1490 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1491 return ~0;
1492 }
1493#endif
1494}
1495
1496
1497/**
1498 * Gets the CR3 register value for the 32-Bit shadow memory context.
1499 * @returns CR3 value.
1500 * @param pVM The VM handle.
1501 */
1502VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1503{
1504#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1505 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1506 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1507#else
1508 return pVM->pgm.s.HCPhysShw32BitPD;
1509#endif
1510}
1511
1512
1513/**
1514 * Gets the CR3 register value for the PAE shadow memory context.
1515 * @returns CR3 value.
1516 * @param pVM The VM handle.
1517 */
1518VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1519{
1520#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1521 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1522 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1523#else
1524 return pVM->pgm.s.HCPhysShwPaePdpt;
1525#endif
1526}
1527
1528
1529/**
1530 * Gets the CR3 register value for the AMD64 shadow memory context.
1531 * @returns CR3 value.
1532 * @param pVM The VM handle.
1533 */
1534VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1535{
1536#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1537 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1538 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1539#else
1540 return pVM->pgm.s.HCPhysShwCR3;
1541#endif
1542}
1543
1544
1545/**
1546 * Gets the current CR3 register value for the HC intermediate memory context.
1547 * @returns CR3 value.
1548 * @param pVM The VM handle.
1549 */
1550VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1551{
1552 switch (pVM->pgm.s.enmHostMode)
1553 {
1554 case SUPPAGINGMODE_32_BIT:
1555 case SUPPAGINGMODE_32_BIT_GLOBAL:
1556 return pVM->pgm.s.HCPhysInterPD;
1557
1558 case SUPPAGINGMODE_PAE:
1559 case SUPPAGINGMODE_PAE_GLOBAL:
1560 case SUPPAGINGMODE_PAE_NX:
1561 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1562 return pVM->pgm.s.HCPhysInterPaePDPT;
1563
1564 case SUPPAGINGMODE_AMD64:
1565 case SUPPAGINGMODE_AMD64_GLOBAL:
1566 case SUPPAGINGMODE_AMD64_NX:
1567 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1568 return pVM->pgm.s.HCPhysInterPaePDPT;
1569
1570 default:
1571 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1572 return ~0;
1573 }
1574}
1575
1576
1577/**
1578 * Gets the current CR3 register value for the RC intermediate memory context.
1579 * @returns CR3 value.
1580 * @param pVM The VM handle.
1581 */
1582VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1583{
1584 switch (pVM->pgm.s.enmShadowMode)
1585 {
1586 case PGMMODE_32_BIT:
1587 return pVM->pgm.s.HCPhysInterPD;
1588
1589 case PGMMODE_PAE:
1590 case PGMMODE_PAE_NX:
1591 return pVM->pgm.s.HCPhysInterPaePDPT;
1592
1593 case PGMMODE_AMD64:
1594 case PGMMODE_AMD64_NX:
1595 return pVM->pgm.s.HCPhysInterPaePML4;
1596
1597 case PGMMODE_EPT:
1598 case PGMMODE_NESTED:
1599 return 0; /* not relevant */
1600
1601 default:
1602 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1603 return ~0;
1604 }
1605}
1606
1607
1608/**
1609 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1610 * @returns CR3 value.
1611 * @param pVM The VM handle.
1612 */
1613VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1614{
1615 return pVM->pgm.s.HCPhysInterPD;
1616}
1617
1618
1619/**
1620 * Gets the CR3 register value for the PAE intermediate memory context.
1621 * @returns CR3 value.
1622 * @param pVM The VM handle.
1623 */
1624VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1625{
1626 return pVM->pgm.s.HCPhysInterPaePDPT;
1627}
1628
1629
1630/**
1631 * Gets the CR3 register value for the AMD64 intermediate memory context.
1632 * @returns CR3 value.
1633 * @param pVM The VM handle.
1634 */
1635VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1636{
1637 return pVM->pgm.s.HCPhysInterPaePML4;
1638}
1639
1640
1641/**
1642 * Performs and schedules necessary updates following a CR3 load or reload.
1643 *
1644 * This will normally involve mapping the guest PD or nPDPT
1645 *
1646 * @returns VBox status code.
1647 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1648 * safely be ignored and overridden since the FF will be set too then.
1649 * @param pVM VM handle.
1650 * @param cr3 The new cr3.
1651 * @param fGlobal Indicates whether this is a global flush or not.
1652 */
1653VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1654{
1655 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1656
1657 /*
1658 * Always flag the necessary updates; necessary for hardware acceleration
1659 */
1660 /** @todo optimize this, it shouldn't always be necessary. */
1661 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1662 if (fGlobal)
1663 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1664 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1665
1666 /*
1667 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1668 */
1669 int rc = VINF_SUCCESS;
1670 RTGCPHYS GCPhysCR3;
1671 switch (pVM->pgm.s.enmGuestMode)
1672 {
1673 case PGMMODE_PAE:
1674 case PGMMODE_PAE_NX:
1675 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1676 break;
1677 case PGMMODE_AMD64:
1678 case PGMMODE_AMD64_NX:
1679 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1680 break;
1681 default:
1682 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1683 break;
1684 }
1685
1686 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1687 {
1688 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1689 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1690 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1691 if (RT_LIKELY(rc == VINF_SUCCESS))
1692 {
1693 if (!pVM->pgm.s.fMappingsFixed)
1694 {
1695 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1696#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1697 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1698#endif
1699 }
1700 }
1701 else
1702 {
1703 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1704 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1705 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1706 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1707 if (!pVM->pgm.s.fMappingsFixed)
1708 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1709 }
1710
1711 if (fGlobal)
1712 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1713 else
1714 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1715 }
1716 else
1717 {
1718 /*
1719 * Check if we have a pending update of the CR3 monitoring.
1720 */
1721 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1722 {
1723 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1724 Assert(!pVM->pgm.s.fMappingsFixed);
1725#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1726 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1727#endif
1728 }
1729 if (fGlobal)
1730 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1731 else
1732 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1733 }
1734
1735 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1736 return rc;
1737}
1738
1739
1740/**
1741 * Performs and schedules necessary updates following a CR3 load or reload when
1742 * using nested or extended paging.
1743 *
1744 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1745 * TLB and triggering a SyncCR3.
1746 *
1747 * This will normally involve mapping the guest PD or nPDPT
1748 *
1749 * @returns VBox status code.
1750 * @retval VINF_SUCCESS.
1751 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1752 * requires a CR3 sync. This can safely be ignored and overridden since
1753 * the FF will be set too then.)
1754 * @param pVM VM handle.
1755 * @param cr3 The new cr3.
1756 */
1757VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1758{
1759 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1760
1761 /* We assume we're only called in nested paging mode. */
1762 Assert(pVM->pgm.s.fMappingsFixed);
1763 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1764 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1765
1766 /*
1767 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1768 */
1769 int rc = VINF_SUCCESS;
1770 RTGCPHYS GCPhysCR3;
1771 switch (pVM->pgm.s.enmGuestMode)
1772 {
1773 case PGMMODE_PAE:
1774 case PGMMODE_PAE_NX:
1775 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1776 break;
1777 case PGMMODE_AMD64:
1778 case PGMMODE_AMD64_NX:
1779 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1780 break;
1781 default:
1782 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1783 break;
1784 }
1785 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1786 {
1787 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1788 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1789 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1790 }
1791 return rc;
1792}
1793
1794
1795/**
1796 * Synchronize the paging structures.
1797 *
1798 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1799 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1800 * in several places, most importantly whenever the CR3 is loaded.
1801 *
1802 * @returns VBox status code.
1803 * @param pVM The virtual machine.
1804 * @param cr0 Guest context CR0 register
1805 * @param cr3 Guest context CR3 register
1806 * @param cr4 Guest context CR4 register
1807 * @param fGlobal Including global page directories or not
1808 */
1809VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1810{
1811 int rc;
1812
1813 /*
1814 * We might be called when we shouldn't.
1815 *
1816 * The mode switching will ensure that the PD is resynced
1817 * after every mode switch. So, if we find ourselves here
1818 * when in protected or real mode we can safely disable the
1819 * FF and return immediately.
1820 */
1821 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1822 {
1823 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1824 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1825 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1826 return VINF_SUCCESS;
1827 }
1828
1829 /* If global pages are not supported, then all flushes are global. */
1830 if (!(cr4 & X86_CR4_PGE))
1831 fGlobal = true;
1832 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1833 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1834
1835#ifdef PGMPOOL_WITH_MONITORING
1836 /*
1837 * The pool may have pending stuff and even require a return to ring-3 to
1838 * clear the whole thing.
1839 */
1840 rc = pgmPoolSyncCR3(pVM);
1841 if (rc != VINF_SUCCESS)
1842 return rc;
1843#endif
1844
1845 /*
1846 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1847 * This should be done before SyncCR3.
1848 */
1849 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1850 {
1851 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1852
1853 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1854 RTGCPHYS GCPhysCR3;
1855 switch (pVM->pgm.s.enmGuestMode)
1856 {
1857 case PGMMODE_PAE:
1858 case PGMMODE_PAE_NX:
1859 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1860 break;
1861 case PGMMODE_AMD64:
1862 case PGMMODE_AMD64_NX:
1863 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1864 break;
1865 default:
1866 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1867 break;
1868 }
1869
1870#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1871 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1872 {
1873 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1874 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1875 }
1876#else
1877 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1878 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1879#endif
1880#ifdef IN_RING3
1881 if (rc == VINF_PGM_SYNC_CR3)
1882 rc = pgmPoolSyncCR3(pVM);
1883#else
1884 if (rc == VINF_PGM_SYNC_CR3)
1885 {
1886 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1887 return rc;
1888 }
1889#endif
1890 AssertRCReturn(rc, rc);
1891 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1892 }
1893
1894 /*
1895 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1896 */
1897 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1898 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1899 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1900 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1901 if (rc == VINF_SUCCESS)
1902 {
1903 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1904 {
1905 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1906 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1907 }
1908
1909 /*
1910 * Check if we have a pending update of the CR3 monitoring.
1911 */
1912 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1913 {
1914 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1915 Assert(!pVM->pgm.s.fMappingsFixed);
1916#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1917 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1918 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1919#endif
1920 }
1921 }
1922
1923 /*
1924 * Now flush the CR3 (guest context).
1925 */
1926 if (rc == VINF_SUCCESS)
1927 PGM_INVL_GUEST_TLBS();
1928 return rc;
1929}
1930
1931
1932/**
1933 * Called whenever CR0 or CR4 in a way which may change
1934 * the paging mode.
1935 *
1936 * @returns VBox status code fit for scheduling in GC and R0.
1937 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1938 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1939 * @param pVM VM handle.
1940 * @param cr0 The new cr0.
1941 * @param cr4 The new cr4.
1942 * @param efer The new extended feature enable register.
1943 */
1944VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1945{
1946 PGMMODE enmGuestMode;
1947
1948 /*
1949 * Calc the new guest mode.
1950 */
1951 if (!(cr0 & X86_CR0_PE))
1952 enmGuestMode = PGMMODE_REAL;
1953 else if (!(cr0 & X86_CR0_PG))
1954 enmGuestMode = PGMMODE_PROTECTED;
1955 else if (!(cr4 & X86_CR4_PAE))
1956 enmGuestMode = PGMMODE_32_BIT;
1957 else if (!(efer & MSR_K6_EFER_LME))
1958 {
1959 if (!(efer & MSR_K6_EFER_NXE))
1960 enmGuestMode = PGMMODE_PAE;
1961 else
1962 enmGuestMode = PGMMODE_PAE_NX;
1963 }
1964 else
1965 {
1966 if (!(efer & MSR_K6_EFER_NXE))
1967 enmGuestMode = PGMMODE_AMD64;
1968 else
1969 enmGuestMode = PGMMODE_AMD64_NX;
1970 }
1971
1972 /*
1973 * Did it change?
1974 */
1975 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1976 return VINF_SUCCESS;
1977
1978 /* Flush the TLB */
1979 PGM_INVL_GUEST_TLBS();
1980
1981#ifdef IN_RING3
1982 return PGMR3ChangeMode(pVM, enmGuestMode);
1983#else
1984 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1985 return VINF_PGM_CHANGE_MODE;
1986#endif
1987}
1988
1989
1990/**
1991 * Gets the current guest paging mode.
1992 *
1993 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1994 *
1995 * @returns The current paging mode.
1996 * @param pVM The VM handle.
1997 */
1998VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1999{
2000 return pVM->pgm.s.enmGuestMode;
2001}
2002
2003
2004/**
2005 * Gets the current shadow paging mode.
2006 *
2007 * @returns The current paging mode.
2008 * @param pVM The VM handle.
2009 */
2010VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
2011{
2012 return pVM->pgm.s.enmShadowMode;
2013}
2014
2015/**
2016 * Gets the current host paging mode.
2017 *
2018 * @returns The current paging mode.
2019 * @param pVM The VM handle.
2020 */
2021VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2022{
2023 switch (pVM->pgm.s.enmHostMode)
2024 {
2025 case SUPPAGINGMODE_32_BIT:
2026 case SUPPAGINGMODE_32_BIT_GLOBAL:
2027 return PGMMODE_32_BIT;
2028
2029 case SUPPAGINGMODE_PAE:
2030 case SUPPAGINGMODE_PAE_GLOBAL:
2031 return PGMMODE_PAE;
2032
2033 case SUPPAGINGMODE_PAE_NX:
2034 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2035 return PGMMODE_PAE_NX;
2036
2037 case SUPPAGINGMODE_AMD64:
2038 case SUPPAGINGMODE_AMD64_GLOBAL:
2039 return PGMMODE_AMD64;
2040
2041 case SUPPAGINGMODE_AMD64_NX:
2042 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2043 return PGMMODE_AMD64_NX;
2044
2045 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2046 }
2047
2048 return PGMMODE_INVALID;
2049}
2050
2051
2052/**
2053 * Get mode name.
2054 *
2055 * @returns read-only name string.
2056 * @param enmMode The mode which name is desired.
2057 */
2058VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2059{
2060 switch (enmMode)
2061 {
2062 case PGMMODE_REAL: return "Real";
2063 case PGMMODE_PROTECTED: return "Protected";
2064 case PGMMODE_32_BIT: return "32-bit";
2065 case PGMMODE_PAE: return "PAE";
2066 case PGMMODE_PAE_NX: return "PAE+NX";
2067 case PGMMODE_AMD64: return "AMD64";
2068 case PGMMODE_AMD64_NX: return "AMD64+NX";
2069 case PGMMODE_NESTED: return "Nested";
2070 case PGMMODE_EPT: return "EPT";
2071 default: return "unknown mode value";
2072 }
2073}
2074
2075
2076/**
2077 * Acquire the PGM lock.
2078 *
2079 * @returns VBox status code
2080 * @param pVM The VM to operate on.
2081 */
2082int pgmLock(PVM pVM)
2083{
2084 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2085#ifdef IN_RC
2086 if (rc == VERR_SEM_BUSY)
2087 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2088#elif defined(IN_RING0)
2089 if (rc == VERR_SEM_BUSY)
2090 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2091#endif
2092 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2093 return rc;
2094}
2095
2096
2097/**
2098 * Release the PGM lock.
2099 *
2100 * @returns VBox status code
2101 * @param pVM The VM to operate on.
2102 */
2103void pgmUnlock(PVM pVM)
2104{
2105 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2106}
2107
2108#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2109
2110/**
2111 * Temporarily maps one guest page specified by GC physical address.
2112 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2113 *
2114 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2115 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2116 *
2117 * @returns VBox status.
2118 * @param pVM VM handle.
2119 * @param GCPhys GC Physical address of the page.
2120 * @param ppv Where to store the address of the mapping.
2121 */
2122VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2123{
2124 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2125
2126 /*
2127 * Get the ram range.
2128 */
2129 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2130 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2131 pRam = pRam->CTX_SUFF(pNext);
2132 if (!pRam)
2133 {
2134 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2135 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2136 }
2137
2138 /*
2139 * Pass it on to PGMDynMapHCPage.
2140 */
2141 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2142 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2143#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2144 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2145#else
2146 PGMDynMapHCPage(pVM, HCPhys, ppv);
2147#endif
2148 return VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Temporarily maps one guest page specified by unaligned GC physical address.
2154 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2155 *
2156 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2157 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2158 *
2159 * The caller is aware that only the speicifed page is mapped and that really bad things
2160 * will happen if writing beyond the page!
2161 *
2162 * @returns VBox status.
2163 * @param pVM VM handle.
2164 * @param GCPhys GC Physical address within the page to be mapped.
2165 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2166 */
2167VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2168{
2169 /*
2170 * Get the ram range.
2171 */
2172 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2173 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2174 pRam = pRam->CTX_SUFF(pNext);
2175 if (!pRam)
2176 {
2177 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2178 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2179 }
2180
2181 /*
2182 * Pass it on to PGMDynMapHCPage.
2183 */
2184 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2185#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2186 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2187#else
2188 PGMDynMapHCPage(pVM, HCPhys, ppv);
2189#endif
2190 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2191 return VINF_SUCCESS;
2192}
2193
2194# ifdef IN_RC
2195
2196/**
2197 * Temporarily maps one host page specified by HC physical address.
2198 *
2199 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2200 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2201 *
2202 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2203 * @param pVM VM handle.
2204 * @param HCPhys HC Physical address of the page.
2205 * @param ppv Where to store the address of the mapping. This is the
2206 * address of the PAGE not the exact address corresponding
2207 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2208 * page offset.
2209 */
2210VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2211{
2212 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2213
2214 /*
2215 * Check the cache.
2216 */
2217 register unsigned iCache;
2218 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2219 {
2220 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2221 {
2222 { 0, 9, 10, 11, 12, 13, 14, 15},
2223 { 0, 1, 10, 11, 12, 13, 14, 15},
2224 { 0, 1, 2, 11, 12, 13, 14, 15},
2225 { 0, 1, 2, 3, 12, 13, 14, 15},
2226 { 0, 1, 2, 3, 4, 13, 14, 15},
2227 { 0, 1, 2, 3, 4, 5, 14, 15},
2228 { 0, 1, 2, 3, 4, 5, 6, 15},
2229 { 0, 1, 2, 3, 4, 5, 6, 7},
2230 { 8, 1, 2, 3, 4, 5, 6, 7},
2231 { 8, 9, 2, 3, 4, 5, 6, 7},
2232 { 8, 9, 10, 3, 4, 5, 6, 7},
2233 { 8, 9, 10, 11, 4, 5, 6, 7},
2234 { 8, 9, 10, 11, 12, 5, 6, 7},
2235 { 8, 9, 10, 11, 12, 13, 6, 7},
2236 { 8, 9, 10, 11, 12, 13, 14, 7},
2237 { 8, 9, 10, 11, 12, 13, 14, 15},
2238 };
2239 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2240 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2241
2242 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2243 {
2244 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2245
2246 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2247 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2248 {
2249 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2250 *ppv = pv;
2251 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2252 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2253 return VINF_SUCCESS;
2254 }
2255 }
2256 }
2257 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2258 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2259 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2260
2261 /*
2262 * Update the page tables.
2263 */
2264 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2265# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2266 unsigned i;
2267 for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
2268 {
2269 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2270 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2271 break;
2272 iPage++;
2273 }
2274 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2275# else
2276 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2277# endif
2278
2279 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2280 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2281 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2282 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2283
2284 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2285 *ppv = pv;
2286 ASMInvalidatePage(pv);
2287 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2288 return VINF_SUCCESS;
2289}
2290
2291
2292/**
2293 * Temporarily lock a dynamic page to prevent it from being reused.
2294 *
2295 * @param pVM VM handle.
2296 * @param GCPage GC address of page
2297 */
2298VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2299{
2300 unsigned iPage;
2301
2302 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2303 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2304 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2305}
2306
2307
2308/**
2309 * Unlock a dynamic page
2310 *
2311 * @param pVM VM handle.
2312 * @param GCPage GC address of page
2313 */
2314VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2315{
2316 unsigned iPage;
2317
2318 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache));
2319
2320 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2321 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2322 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2323 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2324}
2325
2326
2327# ifdef VBOX_STRICT
2328/**
2329 * Check for lock leaks.
2330 *
2331 * @param pVM VM handle.
2332 */
2333VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2334{
2335 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2336 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2337}
2338# endif /* VBOX_STRICT */
2339
2340# endif /* IN_RC */
2341#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2342
2343#if !defined(IN_R0) || defined(LOG_ENABLED)
2344
2345/** Format handler for PGMPAGE.
2346 * @copydoc FNRTSTRFORMATTYPE */
2347static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2348 const char *pszType, void const *pvValue,
2349 int cchWidth, int cchPrecision, unsigned fFlags,
2350 void *pvUser)
2351{
2352 size_t cch;
2353 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2354 if (VALID_PTR(pPage))
2355 {
2356 char szTmp[64+80];
2357
2358 cch = 0;
2359
2360 /* The single char state stuff. */
2361 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2362 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2363
2364#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2365 if (IS_PART_INCLUDED(5))
2366 {
2367 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2368 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2369 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2370 }
2371
2372 /* The type. */
2373 if (IS_PART_INCLUDED(4))
2374 {
2375 szTmp[cch++] = ':';
2376 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2377 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2378 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2379 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2380 }
2381
2382 /* The numbers. */
2383 if (IS_PART_INCLUDED(3))
2384 {
2385 szTmp[cch++] = ':';
2386 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2387 }
2388
2389 if (IS_PART_INCLUDED(2))
2390 {
2391 szTmp[cch++] = ':';
2392 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2393 }
2394
2395 if (IS_PART_INCLUDED(6))
2396 {
2397 szTmp[cch++] = ':';
2398 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2399 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2400 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2401 }
2402#undef IS_PART_INCLUDED
2403
2404 cch = pfnOutput(pvArgOutput, szTmp, cch);
2405 }
2406 else
2407 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2408 return cch;
2409}
2410
2411
2412/** Format handler for PGMRAMRANGE.
2413 * @copydoc FNRTSTRFORMATTYPE */
2414static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2415 const char *pszType, void const *pvValue,
2416 int cchWidth, int cchPrecision, unsigned fFlags,
2417 void *pvUser)
2418{
2419 size_t cch;
2420 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2421 if (VALID_PTR(pRam))
2422 {
2423 char szTmp[80];
2424 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2425 cch = pfnOutput(pvArgOutput, szTmp, cch);
2426 }
2427 else
2428 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2429 return cch;
2430}
2431
2432/** Format type andlers to be registered/deregistered. */
2433static const struct
2434{
2435 char szType[24];
2436 PFNRTSTRFORMATTYPE pfnHandler;
2437} g_aPgmFormatTypes[] =
2438{
2439 { "pgmpage", pgmFormatTypeHandlerPage },
2440 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2441};
2442
2443#endif /* !IN_R0 || LOG_ENABLED */
2444
2445
2446/**
2447 * Registers the global string format types.
2448 *
2449 * This should be called at module load time or in some other manner that ensure
2450 * that it's called exactly one time.
2451 *
2452 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2453 */
2454VMMDECL(int) PGMRegisterStringFormatTypes(void)
2455{
2456#if !defined(IN_R0) || defined(LOG_ENABLED)
2457 int rc = VINF_SUCCESS;
2458 unsigned i;
2459 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2460 {
2461 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2462# ifdef IN_RING0
2463 if (rc == VERR_ALREADY_EXISTS)
2464 {
2465 /* in case of cleanup failure in ring-0 */
2466 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2467 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2468 }
2469# endif
2470 }
2471 if (RT_FAILURE(rc))
2472 while (i-- > 0)
2473 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2474
2475 return rc;
2476#else
2477 return VINF_SUCCESS;
2478#endif
2479}
2480
2481
2482/**
2483 * Deregisters the global string format types.
2484 *
2485 * This should be called at module unload time or in some other manner that
2486 * ensure that it's called exactly one time.
2487 */
2488VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2489{
2490#if !defined(IN_R0) || defined(LOG_ENABLED)
2491 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2492 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2493#endif
2494}
2495
2496#ifdef VBOX_STRICT
2497
2498/**
2499 * Asserts that there are no mapping conflicts.
2500 *
2501 * @returns Number of conflicts.
2502 * @param pVM The VM Handle.
2503 */
2504VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2505{
2506 unsigned cErrors = 0;
2507
2508 /*
2509 * Check for mapping conflicts.
2510 */
2511 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2512 pMapping;
2513 pMapping = pMapping->CTX_SUFF(pNext))
2514 {
2515 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2516 for (RTGCPTR GCPtr = pMapping->GCPtr;
2517 GCPtr <= pMapping->GCPtrLast;
2518 GCPtr += PAGE_SIZE)
2519 {
2520 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2521 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2522 {
2523 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2524 cErrors++;
2525 break;
2526 }
2527 }
2528 }
2529
2530 return cErrors;
2531}
2532
2533
2534/**
2535 * Asserts that everything related to the guest CR3 is correctly shadowed.
2536 *
2537 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2538 * and assert the correctness of the guest CR3 mapping before asserting that the
2539 * shadow page tables is in sync with the guest page tables.
2540 *
2541 * @returns Number of conflicts.
2542 * @param pVM The VM Handle.
2543 * @param cr3 The current guest CR3 register value.
2544 * @param cr4 The current guest CR4 register value.
2545 */
2546VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2547{
2548 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2549 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2550 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2551 return cErrors;
2552}
2553
2554#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette