VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 17445

Last change on this file since 17445 was 17433, checked in by vboxsync, 16 years ago

PGMAll.cpp: type reg build fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.2 KB
Line 
1/* $Id: PGMAll.cpp 17433 2009-03-06 02:18:34Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
73#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75#endif
76
77/*
78 * Shadow - 32-bit mode
79 */
80#define PGM_SHW_TYPE PGM_TYPE_32BIT
81#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
82#include "PGMAllShw.h"
83
84/* Guest - real mode */
85#define PGM_GST_TYPE PGM_TYPE_REAL
86#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
87#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
88#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
89#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
90#include "PGMGstDefs.h"
91#include "PGMAllGst.h"
92#include "PGMAllBth.h"
93#undef BTH_PGMPOOLKIND_PT_FOR_PT
94#undef BTH_PGMPOOLKIND_ROOT
95#undef PGM_BTH_NAME
96#undef PGM_GST_TYPE
97#undef PGM_GST_NAME
98
99/* Guest - protected mode */
100#define PGM_GST_TYPE PGM_TYPE_PROT
101#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
102#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
103#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
104#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
105#include "PGMGstDefs.h"
106#include "PGMAllGst.h"
107#include "PGMAllBth.h"
108#undef BTH_PGMPOOLKIND_PT_FOR_PT
109#undef BTH_PGMPOOLKIND_ROOT
110#undef PGM_BTH_NAME
111#undef PGM_GST_TYPE
112#undef PGM_GST_NAME
113
114/* Guest - 32-bit mode */
115#define PGM_GST_TYPE PGM_TYPE_32BIT
116#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
117#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
118#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
119#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
120#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
121#include "PGMGstDefs.h"
122#include "PGMAllGst.h"
123#include "PGMAllBth.h"
124#undef BTH_PGMPOOLKIND_PT_FOR_BIG
125#undef BTH_PGMPOOLKIND_PT_FOR_PT
126#undef BTH_PGMPOOLKIND_ROOT
127#undef PGM_BTH_NAME
128#undef PGM_GST_TYPE
129#undef PGM_GST_NAME
130
131#undef PGM_SHW_TYPE
132#undef PGM_SHW_NAME
133
134
135/*
136 * Shadow - PAE mode
137 */
138#define PGM_SHW_TYPE PGM_TYPE_PAE
139#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#include "PGMAllShw.h"
142
143/* Guest - real mode */
144#define PGM_GST_TYPE PGM_TYPE_REAL
145#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
146#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
147#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
148#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
149#include "PGMGstDefs.h"
150#include "PGMAllBth.h"
151#undef BTH_PGMPOOLKIND_PT_FOR_PT
152#undef BTH_PGMPOOLKIND_ROOT
153#undef PGM_BTH_NAME
154#undef PGM_GST_TYPE
155#undef PGM_GST_NAME
156
157/* Guest - protected mode */
158#define PGM_GST_TYPE PGM_TYPE_PROT
159#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
160#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
161#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
162#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
163#include "PGMGstDefs.h"
164#include "PGMAllBth.h"
165#undef BTH_PGMPOOLKIND_PT_FOR_PT
166#undef BTH_PGMPOOLKIND_ROOT
167#undef PGM_BTH_NAME
168#undef PGM_GST_TYPE
169#undef PGM_GST_NAME
170
171/* Guest - 32-bit mode */
172#define PGM_GST_TYPE PGM_TYPE_32BIT
173#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
174#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
175#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
176#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
177#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
178#include "PGMGstDefs.h"
179#include "PGMAllBth.h"
180#undef BTH_PGMPOOLKIND_PT_FOR_BIG
181#undef BTH_PGMPOOLKIND_PT_FOR_PT
182#undef BTH_PGMPOOLKIND_ROOT
183#undef PGM_BTH_NAME
184#undef PGM_GST_TYPE
185#undef PGM_GST_NAME
186
187
188/* Guest - PAE mode */
189#define PGM_GST_TYPE PGM_TYPE_PAE
190#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
191#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
192#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
193#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
194#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
195#include "PGMGstDefs.h"
196#include "PGMAllGst.h"
197#include "PGMAllBth.h"
198#undef BTH_PGMPOOLKIND_PT_FOR_BIG
199#undef BTH_PGMPOOLKIND_PT_FOR_PT
200#undef BTH_PGMPOOLKIND_ROOT
201#undef PGM_BTH_NAME
202#undef PGM_GST_TYPE
203#undef PGM_GST_NAME
204
205#undef PGM_SHW_TYPE
206#undef PGM_SHW_NAME
207
208
209#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
210/*
211 * Shadow - AMD64 mode
212 */
213# define PGM_SHW_TYPE PGM_TYPE_AMD64
214# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
215# include "PGMAllShw.h"
216
217/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
218# define PGM_GST_TYPE PGM_TYPE_PROT
219# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
220# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
221# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
222# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
223# include "PGMGstDefs.h"
224# include "PGMAllBth.h"
225# undef BTH_PGMPOOLKIND_PT_FOR_PT
226# undef BTH_PGMPOOLKIND_ROOT
227# undef PGM_BTH_NAME
228# undef PGM_GST_TYPE
229# undef PGM_GST_NAME
230
231# ifdef VBOX_WITH_64_BITS_GUESTS
232/* Guest - AMD64 mode */
233# define PGM_GST_TYPE PGM_TYPE_AMD64
234# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
235# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
236# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
237# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
238# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
239# include "PGMGstDefs.h"
240# include "PGMAllGst.h"
241# include "PGMAllBth.h"
242# undef BTH_PGMPOOLKIND_PT_FOR_BIG
243# undef BTH_PGMPOOLKIND_PT_FOR_PT
244# undef BTH_PGMPOOLKIND_ROOT
245# undef PGM_BTH_NAME
246# undef PGM_GST_TYPE
247# undef PGM_GST_NAME
248# endif /* VBOX_WITH_64_BITS_GUESTS */
249
250# undef PGM_SHW_TYPE
251# undef PGM_SHW_NAME
252
253
254/*
255 * Shadow - Nested paging mode
256 */
257# define PGM_SHW_TYPE PGM_TYPE_NESTED
258# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
259# include "PGMAllShw.h"
260
261/* Guest - real mode */
262# define PGM_GST_TYPE PGM_TYPE_REAL
263# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
264# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
265# include "PGMGstDefs.h"
266# include "PGMAllBth.h"
267# undef PGM_BTH_NAME
268# undef PGM_GST_TYPE
269# undef PGM_GST_NAME
270
271/* Guest - protected mode */
272# define PGM_GST_TYPE PGM_TYPE_PROT
273# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
274# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
275# include "PGMGstDefs.h"
276# include "PGMAllBth.h"
277# undef PGM_BTH_NAME
278# undef PGM_GST_TYPE
279# undef PGM_GST_NAME
280
281/* Guest - 32-bit mode */
282# define PGM_GST_TYPE PGM_TYPE_32BIT
283# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
284# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
285# include "PGMGstDefs.h"
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290
291/* Guest - PAE mode */
292# define PGM_GST_TYPE PGM_TYPE_PAE
293# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
294# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
295# include "PGMGstDefs.h"
296# include "PGMAllBth.h"
297# undef PGM_BTH_NAME
298# undef PGM_GST_TYPE
299# undef PGM_GST_NAME
300
301# ifdef VBOX_WITH_64_BITS_GUESTS
302/* Guest - AMD64 mode */
303# define PGM_GST_TYPE PGM_TYPE_AMD64
304# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
305# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
306# include "PGMGstDefs.h"
307# include "PGMAllBth.h"
308# undef PGM_BTH_NAME
309# undef PGM_GST_TYPE
310# undef PGM_GST_NAME
311# endif /* VBOX_WITH_64_BITS_GUESTS */
312
313# undef PGM_SHW_TYPE
314# undef PGM_SHW_NAME
315
316
317/*
318 * Shadow - EPT
319 */
320# define PGM_SHW_TYPE PGM_TYPE_EPT
321# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
322# include "PGMAllShw.h"
323
324/* Guest - real mode */
325# define PGM_GST_TYPE PGM_TYPE_REAL
326# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
327# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
328# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
329# include "PGMGstDefs.h"
330# include "PGMAllBth.h"
331# undef BTH_PGMPOOLKIND_PT_FOR_PT
332# undef PGM_BTH_NAME
333# undef PGM_GST_TYPE
334# undef PGM_GST_NAME
335
336/* Guest - protected mode */
337# define PGM_GST_TYPE PGM_TYPE_PROT
338# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
339# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
340# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
341# include "PGMGstDefs.h"
342# include "PGMAllBth.h"
343# undef BTH_PGMPOOLKIND_PT_FOR_PT
344# undef PGM_BTH_NAME
345# undef PGM_GST_TYPE
346# undef PGM_GST_NAME
347
348/* Guest - 32-bit mode */
349# define PGM_GST_TYPE PGM_TYPE_32BIT
350# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
351# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
352# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
353# include "PGMGstDefs.h"
354# include "PGMAllBth.h"
355# undef BTH_PGMPOOLKIND_PT_FOR_PT
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - PAE mode */
361# define PGM_GST_TYPE PGM_TYPE_PAE
362# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
364# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef BTH_PGMPOOLKIND_PT_FOR_PT
368# undef PGM_BTH_NAME
369# undef PGM_GST_TYPE
370# undef PGM_GST_NAME
371
372# ifdef VBOX_WITH_64_BITS_GUESTS
373/* Guest - AMD64 mode */
374# define PGM_GST_TYPE PGM_TYPE_AMD64
375# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
376# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
377# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
378# include "PGMGstDefs.h"
379# include "PGMAllBth.h"
380# undef BTH_PGMPOOLKIND_PT_FOR_PT
381# undef PGM_BTH_NAME
382# undef PGM_GST_TYPE
383# undef PGM_GST_NAME
384# endif /* VBOX_WITH_64_BITS_GUESTS */
385
386# undef PGM_SHW_TYPE
387# undef PGM_SHW_NAME
388
389#endif /* !IN_RC */
390
391
392#ifndef IN_RING3
393/**
394 * #PF Handler.
395 *
396 * @returns VBox status code (appropriate for trap handling and GC return).
397 * @param pVM VM Handle.
398 * @param uErr The trap error code.
399 * @param pRegFrame Trap register frame.
400 * @param pvFault The fault address.
401 */
402VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
403{
404 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
405 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
406 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
407
408
409#ifdef VBOX_WITH_STATISTICS
410 /*
411 * Error code stats.
412 */
413 if (uErr & X86_TRAP_PF_US)
414 {
415 if (!(uErr & X86_TRAP_PF_P))
416 {
417 if (uErr & X86_TRAP_PF_RW)
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
419 else
420 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
421 }
422 else if (uErr & X86_TRAP_PF_RW)
423 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
424 else if (uErr & X86_TRAP_PF_RSVD)
425 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
426 else if (uErr & X86_TRAP_PF_ID)
427 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
428 else
429 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
430 }
431 else
432 { /* Supervisor */
433 if (!(uErr & X86_TRAP_PF_P))
434 {
435 if (uErr & X86_TRAP_PF_RW)
436 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
437 else
438 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
439 }
440 else if (uErr & X86_TRAP_PF_RW)
441 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
442 else if (uErr & X86_TRAP_PF_ID)
443 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
444 else if (uErr & X86_TRAP_PF_RSVD)
445 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
446 }
447#endif /* VBOX_WITH_STATISTICS */
448
449 /*
450 * Call the worker.
451 */
452 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
453 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
454 rc = VINF_SUCCESS;
455 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
456 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
457 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
458 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
459 return rc;
460}
461#endif /* !IN_RING3 */
462
463
464/**
465 * Prefetch a page
466 *
467 * Typically used to sync commonly used pages before entering raw mode
468 * after a CR3 reload.
469 *
470 * @returns VBox status code suitable for scheduling.
471 * @retval VINF_SUCCESS on success.
472 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
473 * @param pVM VM handle.
474 * @param GCPtrPage Page to invalidate.
475 */
476VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
477{
478 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
479 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
480 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
481 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
482 return rc;
483}
484
485
486/**
487 * Gets the mapping corresponding to the specified address (if any).
488 *
489 * @returns Pointer to the mapping.
490 * @returns NULL if not
491 *
492 * @param pVM The virtual machine.
493 * @param GCPtr The guest context pointer.
494 */
495PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
496{
497 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
498 while (pMapping)
499 {
500 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
501 break;
502 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
503 return pMapping;
504 pMapping = pMapping->CTX_SUFF(pNext);
505 }
506 return NULL;
507}
508
509
510/**
511 * Verifies a range of pages for read or write access
512 *
513 * Only checks the guest's page tables
514 *
515 * @returns VBox status code.
516 * @param pVM VM handle.
517 * @param Addr Guest virtual address to check
518 * @param cbSize Access size
519 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
520 * @remarks Current not in use.
521 */
522VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
523{
524 /*
525 * Validate input.
526 */
527 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
528 {
529 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
530 return VERR_INVALID_PARAMETER;
531 }
532
533 uint64_t fPage;
534 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
535 if (RT_FAILURE(rc))
536 {
537 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
538 return VINF_EM_RAW_GUEST_TRAP;
539 }
540
541 /*
542 * Check if the access would cause a page fault
543 *
544 * Note that hypervisor page directories are not present in the guest's tables, so this check
545 * is sufficient.
546 */
547 bool fWrite = !!(fAccess & X86_PTE_RW);
548 bool fUser = !!(fAccess & X86_PTE_US);
549 if ( !(fPage & X86_PTE_P)
550 || (fWrite && !(fPage & X86_PTE_RW))
551 || (fUser && !(fPage & X86_PTE_US)) )
552 {
553 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
554 return VINF_EM_RAW_GUEST_TRAP;
555 }
556 if ( RT_SUCCESS(rc)
557 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
558 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
559 return rc;
560}
561
562
563/**
564 * Verifies a range of pages for read or write access
565 *
566 * Supports handling of pages marked for dirty bit tracking and CSAM
567 *
568 * @returns VBox status code.
569 * @param pVM VM handle.
570 * @param Addr Guest virtual address to check
571 * @param cbSize Access size
572 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
573 */
574VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
575{
576 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
577
578 /*
579 * Get going.
580 */
581 uint64_t fPageGst;
582 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
583 if (RT_FAILURE(rc))
584 {
585 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
586 return VINF_EM_RAW_GUEST_TRAP;
587 }
588
589 /*
590 * Check if the access would cause a page fault
591 *
592 * Note that hypervisor page directories are not present in the guest's tables, so this check
593 * is sufficient.
594 */
595 const bool fWrite = !!(fAccess & X86_PTE_RW);
596 const bool fUser = !!(fAccess & X86_PTE_US);
597 if ( !(fPageGst & X86_PTE_P)
598 || (fWrite && !(fPageGst & X86_PTE_RW))
599 || (fUser && !(fPageGst & X86_PTE_US)) )
600 {
601 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
602 return VINF_EM_RAW_GUEST_TRAP;
603 }
604
605 if (!HWACCMIsNestedPagingActive(pVM))
606 {
607 /*
608 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
609 */
610 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
611 if ( rc == VERR_PAGE_NOT_PRESENT
612 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
613 {
614 /*
615 * Page is not present in our page tables.
616 * Try to sync it!
617 */
618 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
619 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
620 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
621 if (rc != VINF_SUCCESS)
622 return rc;
623 }
624 else
625 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
626 }
627
628#if 0 /* def VBOX_STRICT; triggers too often now */
629 /*
630 * This check is a bit paranoid, but useful.
631 */
632 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
633 uint64_t fPageShw;
634 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
635 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
636 || (fWrite && !(fPageShw & X86_PTE_RW))
637 || (fUser && !(fPageShw & X86_PTE_US)) )
638 {
639 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
640 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
641 return VINF_EM_RAW_GUEST_TRAP;
642 }
643#endif
644
645 if ( RT_SUCCESS(rc)
646 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
647 || Addr + cbSize < Addr))
648 {
649 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
650 for (;;)
651 {
652 Addr += PAGE_SIZE;
653 if (cbSize > PAGE_SIZE)
654 cbSize -= PAGE_SIZE;
655 else
656 cbSize = 1;
657 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
658 if (rc != VINF_SUCCESS)
659 break;
660 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
661 break;
662 }
663 }
664 return rc;
665}
666
667
668/**
669 * Emulation of the invlpg instruction (HC only actually).
670 *
671 * @returns VBox status code, special care required.
672 * @retval VINF_PGM_SYNC_CR3 - handled.
673 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
674 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
675 *
676 * @param pVM VM handle.
677 * @param GCPtrPage Page to invalidate.
678 *
679 * @remark ASSUMES the page table entry or page directory is valid. Fairly
680 * safe, but there could be edge cases!
681 *
682 * @todo Flush page or page directory only if necessary!
683 */
684VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
685{
686 int rc;
687 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
688
689#ifndef IN_RING3
690 /*
691 * Notify the recompiler so it can record this instruction.
692 * Failure happens when it's out of space. We'll return to HC in that case.
693 */
694 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
695 if (rc != VINF_SUCCESS)
696 return rc;
697#endif /* !IN_RING3 */
698
699
700#ifdef IN_RC
701 /*
702 * Check for conflicts and pending CR3 monitoring updates.
703 */
704 if (!pVM->pgm.s.fMappingsFixed)
705 {
706 if ( pgmGetMapping(pVM, GCPtrPage)
707 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
708 {
709 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
710 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
711 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
712 return VINF_PGM_SYNC_CR3;
713 }
714
715 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
716 {
717 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
718 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
719 return VINF_EM_RAW_EMULATE_INSTR;
720 }
721 }
722#endif /* IN_RC */
723
724 /*
725 * Call paging mode specific worker.
726 */
727 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
728 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
729 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
730
731#ifdef IN_RING3
732 /*
733 * Check if we have a pending update of the CR3 monitoring.
734 */
735 if ( RT_SUCCESS(rc)
736 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
737 {
738 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
739 Assert(!pVM->pgm.s.fMappingsFixed);
740#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
741 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
742 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
743#endif
744 }
745
746 /*
747 * Inform CSAM about the flush
748 *
749 * Note: This is to check if monitored pages have been changed; when we implement
750 * callbacks for virtual handlers, this is no longer required.
751 */
752 CSAMR3FlushPage(pVM, GCPtrPage);
753#endif /* IN_RING3 */
754 return rc;
755}
756
757
758/**
759 * Executes an instruction using the interpreter.
760 *
761 * @returns VBox status code (appropriate for trap handling and GC return).
762 * @param pVM VM handle.
763 * @param pRegFrame Register frame.
764 * @param pvFault Fault address.
765 */
766VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
767{
768 uint32_t cb;
769 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
770 if (rc == VERR_EM_INTERPRETER)
771 rc = VINF_EM_RAW_EMULATE_INSTR;
772 if (rc != VINF_SUCCESS)
773 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
774 return rc;
775}
776
777
778/**
779 * Gets effective page information (from the VMM page directory).
780 *
781 * @returns VBox status.
782 * @param pVM VM Handle.
783 * @param GCPtr Guest Context virtual address of the page.
784 * @param pfFlags Where to store the flags. These are X86_PTE_*.
785 * @param pHCPhys Where to store the HC physical address of the page.
786 * This is page aligned.
787 * @remark You should use PGMMapGetPage() for pages in a mapping.
788 */
789VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
790{
791 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
792}
793
794
795/**
796 * Sets (replaces) the page flags for a range of pages in the shadow context.
797 *
798 * @returns VBox status.
799 * @param pVM VM handle.
800 * @param GCPtr The address of the first page.
801 * @param cb The size of the range in bytes.
802 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
803 * @remark You must use PGMMapSetPage() for pages in a mapping.
804 */
805VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
806{
807 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
808}
809
810
811/**
812 * Modify page flags for a range of pages in the shadow context.
813 *
814 * The existing flags are ANDed with the fMask and ORed with the fFlags.
815 *
816 * @returns VBox status code.
817 * @param pVM VM handle.
818 * @param GCPtr Virtual address of the first page in the range.
819 * @param cb Size (in bytes) of the range to apply the modification to.
820 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
821 * @param fMask The AND mask - page flags X86_PTE_*.
822 * Be very CAREFUL when ~'ing constants which could be 32-bit!
823 * @remark You must use PGMMapModifyPage() for pages in a mapping.
824 */
825VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
826{
827 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
828 Assert(cb);
829
830 /*
831 * Align the input.
832 */
833 cb += GCPtr & PAGE_OFFSET_MASK;
834 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
835 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
836
837 /*
838 * Call worker.
839 */
840 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
841}
842
843
844/**
845 * Gets the SHADOW page directory pointer for the specified address.
846 *
847 * @returns VBox status.
848 * @param pVM VM handle.
849 * @param GCPtr The address.
850 * @param ppPdpt Receives address of pdpt
851 * @param ppPD Receives address of page directory
852 * @remarks Unused.
853 */
854DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
855{
856 PPGM pPGM = &pVM->pgm.s;
857 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
858 PPGMPOOLPAGE pShwPage;
859
860 Assert(!HWACCMIsNestedPagingActive(pVM));
861
862 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
863 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
864 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
865
866 *ppPdpt = pPdpt;
867 if (!pPdpe->n.u1Present)
868 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
869
870 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
871 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
872
873 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
874 return VINF_SUCCESS;
875}
876
877#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
878
879/**
880 * Gets the shadow page directory for the specified address, PAE.
881 *
882 * @returns Pointer to the shadow PD.
883 * @param pVM VM handle.
884 * @param GCPtr The address.
885 * @param pGstPdpe Guest PDPT entry
886 * @param ppPD Receives address of page directory
887 */
888int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
889{
890 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
891 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
892 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
893 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
894 PPGMPOOLPAGE pShwPage;
895 int rc;
896
897 /* Allocate page directory if not present. */
898 if ( !pPdpe->n.u1Present
899 && !(pPdpe->u & X86_PDPE_PG_MASK))
900 {
901 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
902 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
903 RTGCPTR64 GCPdPt;
904 PGMPOOLKIND enmKind;
905
906# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
907 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
908 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
909# endif
910
911 if (fNestedPaging || !fPaging)
912 {
913 /* AMD-V nested paging or real/protected mode without paging */
914 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
915 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
916 }
917 else
918 {
919 Assert(pGstPdpe);
920
921 if (CPUMGetGuestCR4(pVM) & X86_CR4_PAE)
922 {
923 if (!pGstPdpe->n.u1Present)
924 {
925 /* PD not present; guest must reload CR3 to change it.
926 * No need to monitor anything in this case.
927 */
928 Assert(!HWACCMIsEnabled(pVM));
929
930 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
931 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
932 pGstPdpe->n.u1Present = 1;
933 }
934 else
935 {
936 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
937 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
938 }
939 }
940 else
941 {
942 GCPdPt = CPUMGetGuestCR3(pVM);
943 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
944 }
945 }
946
947 /* Create a reference back to the PDPT by using the index in its shadow page. */
948 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
949 if (rc == VERR_PGM_POOL_FLUSHED)
950 {
951 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
952 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
953 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
954# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
955 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
956# endif
957 return VINF_PGM_SYNC_CR3;
958 }
959 AssertRCReturn(rc, rc);
960
961 /* The PD was cached or created; hook it up now. */
962 pPdpe->u |= pShwPage->Core.Key
963 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
964
965# if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
966 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
967 * non-present PDPT will continue to cause page faults.
968 */
969 ASMReloadCR3();
970 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
971# endif
972 }
973 else
974 {
975 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
976 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
977
978 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
979 }
980 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
981 return VINF_SUCCESS;
982}
983
984
985/**
986 * Gets the pointer to the shadow page directory entry for an address, PAE.
987 *
988 * @returns Pointer to the PDE.
989 * @param pPGM Pointer to the PGM instance data.
990 * @param GCPtr The address.
991 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
992 */
993DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
994{
995 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
996 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
997 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
998 if (!pPdpt->a[iPdPt].n.u1Present)
999 {
1000 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1001 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1002 }
1003
1004 /* Fetch the pgm pool shadow descriptor. */
1005 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1006 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1007
1008 *ppShwPde = pShwPde;
1009 return VINF_SUCCESS;
1010}
1011
1012#endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
1013#ifndef IN_RC
1014
1015/**
1016 * Syncs the SHADOW page directory pointer for the specified address.
1017 *
1018 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1019 *
1020 * The caller is responsible for making sure the guest has a valid PD before
1021 * calling this function.
1022 *
1023 * @returns VBox status.
1024 * @param pVM VM handle.
1025 * @param GCPtr The address.
1026 * @param pGstPml4e Guest PML4 entry
1027 * @param pGstPdpe Guest PDPT entry
1028 * @param ppPD Receives address of page directory
1029 */
1030int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1031{
1032 PPGM pPGM = &pVM->pgm.s;
1033 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1034 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1035 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1036 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1037#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1038 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
1039#endif
1040 PPGMPOOLPAGE pShwPage;
1041 int rc;
1042
1043 /* Allocate page directory pointer table if not present. */
1044 if ( !pPml4e->n.u1Present
1045 && !(pPml4e->u & X86_PML4E_PG_MASK))
1046 {
1047#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1048 RTGCPTR64 GCPml4;
1049 PGMPOOLKIND enmKind;
1050
1051 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1052
1053 if (fNestedPaging || !fPaging)
1054 {
1055 /* AMD-V nested paging or real/protected mode without paging */
1056 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1057 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1058 }
1059 else
1060 {
1061 Assert(pGstPml4e && pGstPdpe);
1062
1063 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1064 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1065 }
1066
1067 /* Create a reference back to the PDPT by using the index in its shadow page. */
1068 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1069#else
1070 if (!fNestedPaging)
1071 {
1072 Assert(pGstPml4e && pGstPdpe);
1073 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1074
1075 rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
1076 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1077 }
1078 else
1079 {
1080 /* AMD-V nested paging. (Intel EPT never comes here) */
1081 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1082 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
1083 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1084 }
1085#endif
1086 if (rc == VERR_PGM_POOL_FLUSHED)
1087 {
1088 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1089 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1090 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1091 return VINF_PGM_SYNC_CR3;
1092 }
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1098 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1099 }
1100 /* The PDPT was cached or created; hook it up now. */
1101 pPml4e->u |= pShwPage->Core.Key
1102 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1103
1104 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1105 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1106 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1107
1108 /* Allocate page directory if not present. */
1109 if ( !pPdpe->n.u1Present
1110 && !(pPdpe->u & X86_PDPE_PG_MASK))
1111 {
1112#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPaging || !fPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 Assert(pGstPdpe);
1125
1126 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1127 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1128 }
1129
1130 /* Create a reference back to the PDPT by using the index in its shadow page. */
1131 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1132#else
1133 if (!fNestedPaging)
1134 {
1135 Assert(pGstPml4e && pGstPdpe);
1136 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
1137 /* Create a reference back to the PDPT by using the index in its shadow page. */
1138 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
1139 }
1140 else
1141 {
1142 /* AMD-V nested paging. (Intel EPT never comes here) */
1143 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1144
1145 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1146 }
1147#endif
1148 if (rc == VERR_PGM_POOL_FLUSHED)
1149 {
1150 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1151 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1152 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1153 return VINF_PGM_SYNC_CR3;
1154 }
1155 AssertRCReturn(rc, rc);
1156 }
1157 else
1158 {
1159 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1160 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1161 }
1162 /* The PD was cached or created; hook it up now. */
1163 pPdpe->u |= pShwPage->Core.Key
1164 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1165
1166 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1167 return VINF_SUCCESS;
1168}
1169
1170
1171/**
1172 * Gets the SHADOW page directory pointer for the specified address (long mode).
1173 *
1174 * @returns VBox status.
1175 * @param pVM VM handle.
1176 * @param GCPtr The address.
1177 * @param ppPdpt Receives address of pdpt
1178 * @param ppPD Receives address of page directory
1179 */
1180DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1181{
1182 PPGM pPGM = &pVM->pgm.s;
1183 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1184 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1185 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1186 if (ppPml4e)
1187 *ppPml4e = (PX86PML4E)pPml4e;
1188 if (!pPml4e->n.u1Present)
1189 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1190
1191 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1192 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1193 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1194
1195 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1196 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1197 if (!pPdpt->a[iPdPt].n.u1Present)
1198 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1199
1200 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1201 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1202
1203 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1204 return VINF_SUCCESS;
1205}
1206
1207
1208/**
1209 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1210 * backing pages in case the PDPT or PML4 entry is missing.
1211 *
1212 * @returns VBox status.
1213 * @param pVM VM handle.
1214 * @param GCPtr The address.
1215 * @param ppPdpt Receives address of pdpt
1216 * @param ppPD Receives address of page directory
1217 */
1218int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1219{
1220 PPGM pPGM = &pVM->pgm.s;
1221 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1222 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1223 PEPTPML4 pPml4;
1224 PEPTPML4E pPml4e;
1225 PPGMPOOLPAGE pShwPage;
1226 int rc;
1227
1228 Assert(HWACCMIsNestedPagingActive(pVM));
1229
1230# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1231 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
1232 AssertRCReturn(rc, rc);
1233# else
1234 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
1235# endif
1236 Assert(pPml4);
1237
1238 /* Allocate page directory pointer table if not present. */
1239 pPml4e = &pPml4->a[iPml4];
1240 if ( !pPml4e->n.u1Present
1241 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1242 {
1243 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1244 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1245
1246#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1247 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1248#else
1249 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1250#endif
1251 if (rc == VERR_PGM_POOL_FLUSHED)
1252 {
1253 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1254 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1255 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1256 return VINF_PGM_SYNC_CR3;
1257 }
1258 AssertRCReturn(rc, rc);
1259 }
1260 else
1261 {
1262 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1263 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1264 }
1265 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1266 pPml4e->u = pShwPage->Core.Key;
1267 pPml4e->n.u1Present = 1;
1268 pPml4e->n.u1Write = 1;
1269 pPml4e->n.u1Execute = 1;
1270
1271 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1272 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1273 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1274
1275 if (ppPdpt)
1276 *ppPdpt = pPdpt;
1277
1278 /* Allocate page directory if not present. */
1279 if ( !pPdpe->n.u1Present
1280 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1281 {
1282 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1283
1284#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1285 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1286#else
1287 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1288#endif
1289 if (rc == VERR_PGM_POOL_FLUSHED)
1290 {
1291 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1292 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1293 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1294 return VINF_PGM_SYNC_CR3;
1295 }
1296 AssertRCReturn(rc, rc);
1297 }
1298 else
1299 {
1300 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1301 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1302 }
1303 /* The PD was cached or created; hook it up now and fill with the default value. */
1304 pPdpe->u = pShwPage->Core.Key;
1305 pPdpe->n.u1Present = 1;
1306 pPdpe->n.u1Write = 1;
1307 pPdpe->n.u1Execute = 1;
1308
1309 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1310 return VINF_SUCCESS;
1311}
1312
1313#endif /* IN_RC */
1314
1315/**
1316 * Gets effective Guest OS page information.
1317 *
1318 * When GCPtr is in a big page, the function will return as if it was a normal
1319 * 4KB page. If the need for distinguishing between big and normal page becomes
1320 * necessary at a later point, a PGMGstGetPage() will be created for that
1321 * purpose.
1322 *
1323 * @returns VBox status.
1324 * @param pVM VM Handle.
1325 * @param GCPtr Guest Context virtual address of the page.
1326 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1327 * @param pGCPhys Where to store the GC physical address of the page.
1328 * This is page aligned. The fact that the
1329 */
1330VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1331{
1332 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1333}
1334
1335
1336/**
1337 * Checks if the page is present.
1338 *
1339 * @returns true if the page is present.
1340 * @returns false if the page is not present.
1341 * @param pVM The VM handle.
1342 * @param GCPtr Address within the page.
1343 */
1344VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1345{
1346 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1347 return RT_SUCCESS(rc);
1348}
1349
1350
1351/**
1352 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1353 *
1354 * @returns VBox status.
1355 * @param pVM VM handle.
1356 * @param GCPtr The address of the first page.
1357 * @param cb The size of the range in bytes.
1358 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1359 */
1360VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1361{
1362 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1363}
1364
1365
1366/**
1367 * Modify page flags for a range of pages in the guest's tables
1368 *
1369 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1370 *
1371 * @returns VBox status code.
1372 * @param pVM VM handle.
1373 * @param GCPtr Virtual address of the first page in the range.
1374 * @param cb Size (in bytes) of the range to apply the modification to.
1375 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1376 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1377 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1378 */
1379VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1380{
1381 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1382
1383 /*
1384 * Validate input.
1385 */
1386 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1387 Assert(cb);
1388
1389 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1390
1391 /*
1392 * Adjust input.
1393 */
1394 cb += GCPtr & PAGE_OFFSET_MASK;
1395 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1396 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1397
1398 /*
1399 * Call worker.
1400 */
1401 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1402
1403 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1404 return rc;
1405}
1406
1407
1408/**
1409 * Gets the specified page directory pointer table entry.
1410 *
1411 * @returns PDP entry
1412 * @param pPGM Pointer to the PGM instance data.
1413 * @param iPdpt PDPT index
1414 */
1415VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1416{
1417 Assert(iPdpt <= 3);
1418 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1419}
1420
1421
1422/**
1423 * Gets the current CR3 register value for the shadow memory context.
1424 * @returns CR3 value.
1425 * @param pVM The VM handle.
1426 */
1427VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1428{
1429#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1430 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1431 switch (enmShadowMode)
1432 {
1433 case PGMMODE_EPT:
1434 return pVM->pgm.s.HCPhysShwNestedRoot;
1435
1436 default:
1437 return pVM->pgm.s.HCPhysShwCR3;
1438 }
1439#else
1440 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1441 switch (enmShadowMode)
1442 {
1443 case PGMMODE_32_BIT:
1444 return pVM->pgm.s.HCPhysShw32BitPD;
1445
1446 case PGMMODE_PAE:
1447 case PGMMODE_PAE_NX:
1448 return pVM->pgm.s.HCPhysShwPaePdpt;
1449
1450 case PGMMODE_AMD64:
1451 case PGMMODE_AMD64_NX:
1452 return pVM->pgm.s.HCPhysShwCR3;
1453
1454 case PGMMODE_EPT:
1455 return pVM->pgm.s.HCPhysShwNestedRoot;
1456
1457 case PGMMODE_NESTED:
1458 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1459
1460 default:
1461 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1462 return ~0;
1463 }
1464#endif
1465}
1466
1467
1468/**
1469 * Gets the current CR3 register value for the nested memory context.
1470 * @returns CR3 value.
1471 * @param pVM The VM handle.
1472 */
1473VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1474{
1475#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1476 return pVM->pgm.s.HCPhysShwCR3;
1477#else
1478 switch (enmShadowMode)
1479 {
1480 case PGMMODE_32_BIT:
1481 return pVM->pgm.s.HCPhysShw32BitPD;
1482
1483 case PGMMODE_PAE:
1484 case PGMMODE_PAE_NX:
1485 return pVM->pgm.s.HCPhysShwPaePdpt;
1486
1487 case PGMMODE_AMD64:
1488 case PGMMODE_AMD64_NX:
1489 return pVM->pgm.s.HCPhysShwCR3;
1490
1491 default:
1492 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1493 return ~0;
1494 }
1495#endif
1496}
1497
1498
1499/**
1500 * Gets the current CR3 register value for the EPT paging memory context.
1501 * @returns CR3 value.
1502 * @param pVM The VM handle.
1503 */
1504VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
1505{
1506 return pVM->pgm.s.HCPhysShwNestedRoot;
1507}
1508
1509
1510/**
1511 * Gets the CR3 register value for the 32-Bit shadow memory context.
1512 * @returns CR3 value.
1513 * @param pVM The VM handle.
1514 */
1515VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1516{
1517#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1518 return pVM->pgm.s.HCPhysShwCR3;
1519#else
1520 return pVM->pgm.s.HCPhysShw32BitPD;
1521#endif
1522}
1523
1524
1525/**
1526 * Gets the CR3 register value for the PAE shadow memory context.
1527 * @returns CR3 value.
1528 * @param pVM The VM handle.
1529 */
1530VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1531{
1532#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1533 return pVM->pgm.s.HCPhysShwCR3;
1534#else
1535 return pVM->pgm.s.HCPhysShwPaePdpt;
1536#endif
1537}
1538
1539
1540/**
1541 * Gets the CR3 register value for the AMD64 shadow memory context.
1542 * @returns CR3 value.
1543 * @param pVM The VM handle.
1544 */
1545VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1546{
1547 return pVM->pgm.s.HCPhysShwCR3;
1548}
1549
1550
1551/**
1552 * Gets the current CR3 register value for the HC intermediate memory context.
1553 * @returns CR3 value.
1554 * @param pVM The VM handle.
1555 */
1556VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1557{
1558 switch (pVM->pgm.s.enmHostMode)
1559 {
1560 case SUPPAGINGMODE_32_BIT:
1561 case SUPPAGINGMODE_32_BIT_GLOBAL:
1562 return pVM->pgm.s.HCPhysInterPD;
1563
1564 case SUPPAGINGMODE_PAE:
1565 case SUPPAGINGMODE_PAE_GLOBAL:
1566 case SUPPAGINGMODE_PAE_NX:
1567 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1568 return pVM->pgm.s.HCPhysInterPaePDPT;
1569
1570 case SUPPAGINGMODE_AMD64:
1571 case SUPPAGINGMODE_AMD64_GLOBAL:
1572 case SUPPAGINGMODE_AMD64_NX:
1573 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1574 return pVM->pgm.s.HCPhysInterPaePDPT;
1575
1576 default:
1577 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1578 return ~0;
1579 }
1580}
1581
1582
1583/**
1584 * Gets the current CR3 register value for the RC intermediate memory context.
1585 * @returns CR3 value.
1586 * @param pVM The VM handle.
1587 */
1588VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1589{
1590 switch (pVM->pgm.s.enmShadowMode)
1591 {
1592 case PGMMODE_32_BIT:
1593 return pVM->pgm.s.HCPhysInterPD;
1594
1595 case PGMMODE_PAE:
1596 case PGMMODE_PAE_NX:
1597 return pVM->pgm.s.HCPhysInterPaePDPT;
1598
1599 case PGMMODE_AMD64:
1600 case PGMMODE_AMD64_NX:
1601 return pVM->pgm.s.HCPhysInterPaePML4;
1602
1603 case PGMMODE_EPT:
1604 case PGMMODE_NESTED:
1605 return 0; /* not relevant */
1606
1607 default:
1608 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1609 return ~0;
1610 }
1611}
1612
1613
1614/**
1615 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1616 * @returns CR3 value.
1617 * @param pVM The VM handle.
1618 */
1619VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1620{
1621 return pVM->pgm.s.HCPhysInterPD;
1622}
1623
1624
1625/**
1626 * Gets the CR3 register value for the PAE intermediate memory context.
1627 * @returns CR3 value.
1628 * @param pVM The VM handle.
1629 */
1630VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1631{
1632 return pVM->pgm.s.HCPhysInterPaePDPT;
1633}
1634
1635
1636/**
1637 * Gets the CR3 register value for the AMD64 intermediate memory context.
1638 * @returns CR3 value.
1639 * @param pVM The VM handle.
1640 */
1641VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1642{
1643 return pVM->pgm.s.HCPhysInterPaePML4;
1644}
1645
1646
1647/**
1648 * Performs and schedules necessary updates following a CR3 load or reload.
1649 *
1650 * This will normally involve mapping the guest PD or nPDPT
1651 *
1652 * @returns VBox status code.
1653 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1654 * safely be ignored and overridden since the FF will be set too then.
1655 * @param pVM VM handle.
1656 * @param cr3 The new cr3.
1657 * @param fGlobal Indicates whether this is a global flush or not.
1658 */
1659VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1660{
1661 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1662
1663 /*
1664 * Always flag the necessary updates; necessary for hardware acceleration
1665 */
1666 /** @todo optimize this, it shouldn't always be necessary. */
1667 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1668 if (fGlobal)
1669 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1670 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1671
1672 /*
1673 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1674 */
1675 int rc = VINF_SUCCESS;
1676 RTGCPHYS GCPhysCR3;
1677 switch (pVM->pgm.s.enmGuestMode)
1678 {
1679 case PGMMODE_PAE:
1680 case PGMMODE_PAE_NX:
1681 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1682 break;
1683 case PGMMODE_AMD64:
1684 case PGMMODE_AMD64_NX:
1685 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1686 break;
1687 default:
1688 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1689 break;
1690 }
1691
1692 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1693 {
1694 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1695 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1696 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1697 if (RT_LIKELY(rc == VINF_SUCCESS))
1698 {
1699 if (!pVM->pgm.s.fMappingsFixed)
1700 {
1701 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1702#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1703 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1704#endif
1705 }
1706 }
1707 else
1708 {
1709 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1710 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1711 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1712 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1713 if (!pVM->pgm.s.fMappingsFixed)
1714 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1715 }
1716
1717 if (fGlobal)
1718 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1719 else
1720 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1721 }
1722 else
1723 {
1724 /*
1725 * Check if we have a pending update of the CR3 monitoring.
1726 */
1727 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1728 {
1729 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1730 Assert(!pVM->pgm.s.fMappingsFixed);
1731#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1732 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1733#endif
1734 }
1735 if (fGlobal)
1736 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1737 else
1738 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1739 }
1740
1741 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1742 return rc;
1743}
1744
1745
1746/**
1747 * Performs and schedules necessary updates following a CR3 load or reload when
1748 * using nested or extended paging.
1749 *
1750 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1751 * TLB and triggering a SyncCR3.
1752 *
1753 * This will normally involve mapping the guest PD or nPDPT
1754 *
1755 * @returns VBox status code.
1756 * @retval VINF_SUCCESS.
1757 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1758 * requires a CR3 sync. This can safely be ignored and overridden since
1759 * the FF will be set too then.)
1760 * @param pVM VM handle.
1761 * @param cr3 The new cr3.
1762 */
1763VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1764{
1765 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1766
1767 /* We assume we're only called in nested paging mode. */
1768 Assert(pVM->pgm.s.fMappingsFixed);
1769 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1770 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1771
1772 /*
1773 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1774 */
1775 int rc = VINF_SUCCESS;
1776 RTGCPHYS GCPhysCR3;
1777 switch (pVM->pgm.s.enmGuestMode)
1778 {
1779 case PGMMODE_PAE:
1780 case PGMMODE_PAE_NX:
1781 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1782 break;
1783 case PGMMODE_AMD64:
1784 case PGMMODE_AMD64_NX:
1785 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1786 break;
1787 default:
1788 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1789 break;
1790 }
1791 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1792 {
1793 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1794 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1795 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */
1796 }
1797 return rc;
1798}
1799
1800
1801/**
1802 * Synchronize the paging structures.
1803 *
1804 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1805 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1806 * in several places, most importantly whenever the CR3 is loaded.
1807 *
1808 * @returns VBox status code.
1809 * @param pVM The virtual machine.
1810 * @param cr0 Guest context CR0 register
1811 * @param cr3 Guest context CR3 register
1812 * @param cr4 Guest context CR4 register
1813 * @param fGlobal Including global page directories or not
1814 */
1815VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1816{
1817 int rc;
1818
1819 /*
1820 * We might be called when we shouldn't.
1821 *
1822 * The mode switching will ensure that the PD is resynced
1823 * after every mode switch. So, if we find ourselves here
1824 * when in protected or real mode we can safely disable the
1825 * FF and return immediately.
1826 */
1827 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1828 {
1829 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1830 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1831 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1832 return VINF_SUCCESS;
1833 }
1834
1835 /* If global pages are not supported, then all flushes are global. */
1836 if (!(cr4 & X86_CR4_PGE))
1837 fGlobal = true;
1838 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1839 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1840
1841#ifdef PGMPOOL_WITH_MONITORING
1842 /*
1843 * The pool may have pending stuff and even require a return to ring-3 to
1844 * clear the whole thing.
1845 */
1846 rc = pgmPoolSyncCR3(pVM);
1847 if (rc != VINF_SUCCESS)
1848 return rc;
1849#endif
1850
1851 /*
1852 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1853 * This should be done before SyncCR3.
1854 */
1855 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1856 {
1857 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1858
1859 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1860 RTGCPHYS GCPhysCR3;
1861 switch (pVM->pgm.s.enmGuestMode)
1862 {
1863 case PGMMODE_PAE:
1864 case PGMMODE_PAE_NX:
1865 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1866 break;
1867 case PGMMODE_AMD64:
1868 case PGMMODE_AMD64_NX:
1869 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1870 break;
1871 default:
1872 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1873 break;
1874 }
1875
1876#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1877 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1878 {
1879 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1880 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1881 }
1882#else
1883 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1884 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1885#endif
1886#ifdef IN_RING3
1887 if (rc == VINF_PGM_SYNC_CR3)
1888 rc = pgmPoolSyncCR3(pVM);
1889#else
1890 if (rc == VINF_PGM_SYNC_CR3)
1891 {
1892 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1893 return rc;
1894 }
1895#endif
1896 AssertRCReturn(rc, rc);
1897 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1898 }
1899
1900 /*
1901 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1902 */
1903 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1904 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1905 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1906 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1907 if (rc == VINF_SUCCESS)
1908 {
1909 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1910 {
1911 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1912 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1913 }
1914
1915 /*
1916 * Check if we have a pending update of the CR3 monitoring.
1917 */
1918 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1919 {
1920 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1921 Assert(!pVM->pgm.s.fMappingsFixed);
1922#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1923 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1924 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1925#endif
1926 }
1927 }
1928
1929 /*
1930 * Now flush the CR3 (guest context).
1931 */
1932 if (rc == VINF_SUCCESS)
1933 PGM_INVL_GUEST_TLBS();
1934 return rc;
1935}
1936
1937
1938/**
1939 * Called whenever CR0 or CR4 in a way which may change
1940 * the paging mode.
1941 *
1942 * @returns VBox status code fit for scheduling in GC and R0.
1943 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1944 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1945 * @param pVM VM handle.
1946 * @param cr0 The new cr0.
1947 * @param cr4 The new cr4.
1948 * @param efer The new extended feature enable register.
1949 */
1950VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1951{
1952 PGMMODE enmGuestMode;
1953
1954 /*
1955 * Calc the new guest mode.
1956 */
1957 if (!(cr0 & X86_CR0_PE))
1958 enmGuestMode = PGMMODE_REAL;
1959 else if (!(cr0 & X86_CR0_PG))
1960 enmGuestMode = PGMMODE_PROTECTED;
1961 else if (!(cr4 & X86_CR4_PAE))
1962 enmGuestMode = PGMMODE_32_BIT;
1963 else if (!(efer & MSR_K6_EFER_LME))
1964 {
1965 if (!(efer & MSR_K6_EFER_NXE))
1966 enmGuestMode = PGMMODE_PAE;
1967 else
1968 enmGuestMode = PGMMODE_PAE_NX;
1969 }
1970 else
1971 {
1972 if (!(efer & MSR_K6_EFER_NXE))
1973 enmGuestMode = PGMMODE_AMD64;
1974 else
1975 enmGuestMode = PGMMODE_AMD64_NX;
1976 }
1977
1978 /*
1979 * Did it change?
1980 */
1981 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1982 return VINF_SUCCESS;
1983
1984 /* Flush the TLB */
1985 PGM_INVL_GUEST_TLBS();
1986
1987#ifdef IN_RING3
1988 return PGMR3ChangeMode(pVM, enmGuestMode);
1989#else
1990 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1991 return VINF_PGM_CHANGE_MODE;
1992#endif
1993}
1994
1995
1996/**
1997 * Gets the current guest paging mode.
1998 *
1999 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2000 *
2001 * @returns The current paging mode.
2002 * @param pVM The VM handle.
2003 */
2004VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
2005{
2006 return pVM->pgm.s.enmGuestMode;
2007}
2008
2009
2010/**
2011 * Gets the current shadow paging mode.
2012 *
2013 * @returns The current paging mode.
2014 * @param pVM The VM handle.
2015 */
2016VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
2017{
2018 return pVM->pgm.s.enmShadowMode;
2019}
2020
2021/**
2022 * Gets the current host paging mode.
2023 *
2024 * @returns The current paging mode.
2025 * @param pVM The VM handle.
2026 */
2027VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2028{
2029 switch (pVM->pgm.s.enmHostMode)
2030 {
2031 case SUPPAGINGMODE_32_BIT:
2032 case SUPPAGINGMODE_32_BIT_GLOBAL:
2033 return PGMMODE_32_BIT;
2034
2035 case SUPPAGINGMODE_PAE:
2036 case SUPPAGINGMODE_PAE_GLOBAL:
2037 return PGMMODE_PAE;
2038
2039 case SUPPAGINGMODE_PAE_NX:
2040 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2041 return PGMMODE_PAE_NX;
2042
2043 case SUPPAGINGMODE_AMD64:
2044 case SUPPAGINGMODE_AMD64_GLOBAL:
2045 return PGMMODE_AMD64;
2046
2047 case SUPPAGINGMODE_AMD64_NX:
2048 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2049 return PGMMODE_AMD64_NX;
2050
2051 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2052 }
2053
2054 return PGMMODE_INVALID;
2055}
2056
2057
2058/**
2059 * Get mode name.
2060 *
2061 * @returns read-only name string.
2062 * @param enmMode The mode which name is desired.
2063 */
2064VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2065{
2066 switch (enmMode)
2067 {
2068 case PGMMODE_REAL: return "Real";
2069 case PGMMODE_PROTECTED: return "Protected";
2070 case PGMMODE_32_BIT: return "32-bit";
2071 case PGMMODE_PAE: return "PAE";
2072 case PGMMODE_PAE_NX: return "PAE+NX";
2073 case PGMMODE_AMD64: return "AMD64";
2074 case PGMMODE_AMD64_NX: return "AMD64+NX";
2075 case PGMMODE_NESTED: return "Nested";
2076 case PGMMODE_EPT: return "EPT";
2077 default: return "unknown mode value";
2078 }
2079}
2080
2081
2082/**
2083 * Acquire the PGM lock.
2084 *
2085 * @returns VBox status code
2086 * @param pVM The VM to operate on.
2087 */
2088int pgmLock(PVM pVM)
2089{
2090 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2091#ifdef IN_RC
2092 if (rc == VERR_SEM_BUSY)
2093 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2094#elif defined(IN_RING0)
2095 if (rc == VERR_SEM_BUSY)
2096 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2097#endif
2098 AssertRC(rc);
2099 return rc;
2100}
2101
2102
2103/**
2104 * Release the PGM lock.
2105 *
2106 * @returns VBox status code
2107 * @param pVM The VM to operate on.
2108 */
2109void pgmUnlock(PVM pVM)
2110{
2111 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2112}
2113
2114#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2115
2116/**
2117 * Temporarily maps one guest page specified by GC physical address.
2118 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2119 *
2120 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2121 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2122 *
2123 * @returns VBox status.
2124 * @param pVM VM handle.
2125 * @param GCPhys GC Physical address of the page.
2126 * @param ppv Where to store the address of the mapping.
2127 */
2128VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2129{
2130 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2131
2132 /*
2133 * Get the ram range.
2134 */
2135 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2136 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2137 pRam = pRam->CTX_SUFF(pNext);
2138 if (!pRam)
2139 {
2140 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2141 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2142 }
2143
2144 /*
2145 * Pass it on to PGMDynMapHCPage.
2146 */
2147 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2148 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2149#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2150 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2151#else
2152 PGMDynMapHCPage(pVM, HCPhys, ppv);
2153#endif
2154 return VINF_SUCCESS;
2155}
2156
2157
2158/**
2159 * Temporarily maps one guest page specified by unaligned GC physical address.
2160 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2161 *
2162 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2163 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2164 *
2165 * The caller is aware that only the speicifed page is mapped and that really bad things
2166 * will happen if writing beyond the page!
2167 *
2168 * @returns VBox status.
2169 * @param pVM VM handle.
2170 * @param GCPhys GC Physical address within the page to be mapped.
2171 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2172 */
2173VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2174{
2175 /*
2176 * Get the ram range.
2177 */
2178 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2179 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2180 pRam = pRam->CTX_SUFF(pNext);
2181 if (!pRam)
2182 {
2183 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2184 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2185 }
2186
2187 /*
2188 * Pass it on to PGMDynMapHCPage.
2189 */
2190 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2191#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2192 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2193#else
2194 PGMDynMapHCPage(pVM, HCPhys, ppv);
2195#endif
2196 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2197 return VINF_SUCCESS;
2198}
2199
2200# ifdef IN_RC
2201
2202/**
2203 * Temporarily maps one host page specified by HC physical address.
2204 *
2205 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2206 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2207 *
2208 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2209 * @param pVM VM handle.
2210 * @param HCPhys HC Physical address of the page.
2211 * @param ppv Where to store the address of the mapping. This is the
2212 * address of the PAGE not the exact address corresponding
2213 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2214 * page offset.
2215 */
2216VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2217{
2218 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2219
2220 /*
2221 * Check the cache.
2222 */
2223 register unsigned iCache;
2224 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2225 {
2226 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2227 {
2228 { 0, 9, 10, 11, 12, 13, 14, 15},
2229 { 0, 1, 10, 11, 12, 13, 14, 15},
2230 { 0, 1, 2, 11, 12, 13, 14, 15},
2231 { 0, 1, 2, 3, 12, 13, 14, 15},
2232 { 0, 1, 2, 3, 4, 13, 14, 15},
2233 { 0, 1, 2, 3, 4, 5, 14, 15},
2234 { 0, 1, 2, 3, 4, 5, 6, 15},
2235 { 0, 1, 2, 3, 4, 5, 6, 7},
2236 { 8, 1, 2, 3, 4, 5, 6, 7},
2237 { 8, 9, 2, 3, 4, 5, 6, 7},
2238 { 8, 9, 10, 3, 4, 5, 6, 7},
2239 { 8, 9, 10, 11, 4, 5, 6, 7},
2240 { 8, 9, 10, 11, 12, 5, 6, 7},
2241 { 8, 9, 10, 11, 12, 13, 6, 7},
2242 { 8, 9, 10, 11, 12, 13, 14, 7},
2243 { 8, 9, 10, 11, 12, 13, 14, 15},
2244 };
2245 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2246 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2247
2248 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2249 {
2250 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2251
2252 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2253 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2254 {
2255 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2256 *ppv = pv;
2257 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2258 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2259 return VINF_SUCCESS;
2260 }
2261 }
2262 }
2263 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2264 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2265 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2266
2267 /*
2268 * Update the page tables.
2269 */
2270 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2271# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
2272 unsigned i;
2273 for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
2274 {
2275 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2276 if (!(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED))
2277 break;
2278 iPage++;
2279 }
2280 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2281# else
2282 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2283# endif
2284
2285 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2286 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2287 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2288
2289 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2290 *ppv = pv;
2291 ASMInvalidatePage(pv);
2292 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2293 return VINF_SUCCESS;
2294}
2295
2296
2297/**
2298 * Temporarily lock a dynamic page to prevent it from being reused.
2299 *
2300 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2301 * @param pVM VM handle.
2302 * @param GCPage GC address of page
2303 */
2304VMMDECL(int) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2305{
2306 unsigned iPage;
2307
2308 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2309 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2310 Assert(!(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED));
2311 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u |= PGM_PTFLAGS_DYN_LOCKED;
2312 return VINF_SUCCESS;
2313}
2314
2315
2316/**
2317 * Unlock a dynamic page
2318 *
2319 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2320 * @param pVM VM handle.
2321 * @param GCPage GC address of page
2322 */
2323VMMDECL(int) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2324{
2325 unsigned iPage;
2326
2327 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2328 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2329 Assert(pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & PGM_PTFLAGS_DYN_LOCKED);
2330 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u &= ~PGM_PTFLAGS_DYN_LOCKED;
2331 return VINF_SUCCESS;
2332}
2333
2334
2335# ifdef VBOX_STRICT
2336/**
2337 * Check for lock leaks.
2338 *
2339 * @param pVM VM handle.
2340 */
2341VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2342{
2343 for (unsigned i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
2344 Assert(!(pVM->pgm.s.paDynPageMap32BitPTEsGC[i].u & PGM_PTFLAGS_DYN_LOCKED));
2345}
2346# endif /* VBOX_STRICT */
2347
2348# endif /* IN_RC */
2349#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2350
2351#if !defined(IN_R0) || defined(LOG_ENABLED)
2352
2353/** Format handler for PGMPAGE.
2354 * @copydoc FNRTSTRFORMATTYPE */
2355static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2356 const char *pszType, void const *pvValue,
2357 int cchWidth, int cchPrecision, unsigned fFlags,
2358 void *pvUser)
2359{
2360 size_t cch;
2361 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2362 if (VALID_PTR(pPage))
2363 {
2364 char szTmp[64+80];
2365
2366 cch = 0;
2367
2368 /* The single char state stuff. */
2369 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2370 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2371
2372#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2373 if (IS_PART_INCLUDED(5))
2374 {
2375 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2376 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2377 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2378 }
2379
2380 /* The type. */
2381 if (IS_PART_INCLUDED(4))
2382 {
2383 szTmp[cch++] = ':';
2384 static const char s_achPageTypes[8][4] = { "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2385 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2386 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2387 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2388 }
2389
2390 /* The numbers. */
2391 if (IS_PART_INCLUDED(3))
2392 {
2393 szTmp[cch++] = ':';
2394 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2395 }
2396
2397 if (IS_PART_INCLUDED(2))
2398 {
2399 szTmp[cch++] = ':';
2400 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2401 }
2402
2403 if (IS_PART_INCLUDED(6))
2404 {
2405 szTmp[cch++] = ':';
2406 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2407 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2408 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2409 }
2410#undef IS_PART_INCLUDED
2411
2412 cch = pfnOutput(pvArgOutput, szTmp, cch);
2413 }
2414 else
2415 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2416 return cch;
2417}
2418
2419
2420/** Format handler for PGMRAMRANGE.
2421 * @copydoc FNRTSTRFORMATTYPE */
2422static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2423 const char *pszType, void const *pvValue,
2424 int cchWidth, int cchPrecision, unsigned fFlags,
2425 void *pvUser)
2426{
2427 size_t cch;
2428 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2429 if (VALID_PTR(pRam))
2430 {
2431 char szTmp[80];
2432 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2433 cch = pfnOutput(pvArgOutput, szTmp, cch);
2434 }
2435 else
2436 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2437 return cch;
2438}
2439
2440/** Format type andlers to be registered/deregistered. */
2441static const struct
2442{
2443 char szType[24];
2444 PFNRTSTRFORMATTYPE pfnHandler;
2445} g_aPgmFormatTypes[] =
2446{
2447 { "pgmpage", pgmFormatTypeHandlerPage },
2448 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2449};
2450
2451#endif /* !IN_R0 || LOG_ENABLED */
2452
2453
2454/**
2455 * Registers the global string format types.
2456 *
2457 * This should be called at module load time or in some other manner that ensure
2458 * that it's called exactly one time.
2459 *
2460 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2461 */
2462VMMDECL(int) PGMRegisterStringFormatTypes(void)
2463{
2464#if !defined(IN_R0) || defined(LOG_ENABLED)
2465 int rc = VINF_SUCCESS;
2466 unsigned i;
2467 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2468 {
2469 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2470# ifdef IN_RING0
2471 if (rc == VERR_ALREADY_EXISTS)
2472 {
2473 /* in case of cleanup failure in ring-0 */
2474 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2475 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2476 }
2477# endif
2478 }
2479 if (RT_FAILURE(rc))
2480 while (i-- > 0)
2481 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2482
2483 return rc;
2484#else
2485 return VINF_SUCCESS;
2486#endif
2487}
2488
2489
2490/**
2491 * Deregisters the global string format types.
2492 *
2493 * This should be called at module unload time or in some other manner that
2494 * ensure that it's called exactly one time.
2495 */
2496VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2497{
2498#if !defined(IN_R0) || defined(LOG_ENABLED)
2499 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2500 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2501#endif
2502}
2503
2504#ifdef VBOX_STRICT
2505
2506/**
2507 * Asserts that there are no mapping conflicts.
2508 *
2509 * @returns Number of conflicts.
2510 * @param pVM The VM Handle.
2511 */
2512VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2513{
2514 unsigned cErrors = 0;
2515
2516 /*
2517 * Check for mapping conflicts.
2518 */
2519 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2520 pMapping;
2521 pMapping = pMapping->CTX_SUFF(pNext))
2522 {
2523 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2524 for (RTGCPTR GCPtr = pMapping->GCPtr;
2525 GCPtr <= pMapping->GCPtrLast;
2526 GCPtr += PAGE_SIZE)
2527 {
2528 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2529 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2530 {
2531 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2532 cErrors++;
2533 break;
2534 }
2535 }
2536 }
2537
2538 return cErrors;
2539}
2540
2541
2542/**
2543 * Asserts that everything related to the guest CR3 is correctly shadowed.
2544 *
2545 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2546 * and assert the correctness of the guest CR3 mapping before asserting that the
2547 * shadow page tables is in sync with the guest page tables.
2548 *
2549 * @returns Number of conflicts.
2550 * @param pVM The VM Handle.
2551 * @param cr3 The current guest CR3 register value.
2552 * @param cr4 The current guest CR4 register value.
2553 */
2554VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2555{
2556 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2557 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2558 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2559 return cErrors;
2560}
2561
2562#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette