VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 26202

Last change on this file since 26202 was 26202, checked in by vboxsync, 15 years ago

Broke up guest page fault and dirty page checking to avoid taking the big pgm lock. (risky change)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.3 KB
Line 
1/* $Id: PGMAll.cpp 26202 2010-02-03 15:19:36Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "../PGMInternal.h"
41#include <VBox/vm.h>
42#include "../PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** The VM handle. */
61 PVM pVM;
62 /** The VMCPU handle. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*******************************************************************************
72* Internal Functions *
73*******************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76
77/*
78 * Shadow - 32-bit mode
79 */
80#define PGM_SHW_TYPE PGM_TYPE_32BIT
81#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
82#include "PGMAllShw.h"
83
84/* Guest - real mode */
85#define PGM_GST_TYPE PGM_TYPE_REAL
86#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
87#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
88#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
89#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
90#include "PGMGstDefs.h"
91#include "PGMAllGst.h"
92#include "PGMAllBth.h"
93#undef BTH_PGMPOOLKIND_PT_FOR_PT
94#undef BTH_PGMPOOLKIND_ROOT
95#undef PGM_BTH_NAME
96#undef PGM_GST_TYPE
97#undef PGM_GST_NAME
98
99/* Guest - protected mode */
100#define PGM_GST_TYPE PGM_TYPE_PROT
101#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
102#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
103#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
104#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
105#include "PGMGstDefs.h"
106#include "PGMAllGst.h"
107#include "PGMAllBth.h"
108#undef BTH_PGMPOOLKIND_PT_FOR_PT
109#undef BTH_PGMPOOLKIND_ROOT
110#undef PGM_BTH_NAME
111#undef PGM_GST_TYPE
112#undef PGM_GST_NAME
113
114/* Guest - 32-bit mode */
115#define PGM_GST_TYPE PGM_TYPE_32BIT
116#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
117#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
118#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
119#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
120#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
121#include "PGMGstDefs.h"
122#include "PGMAllGst.h"
123#include "PGMAllBth.h"
124#undef BTH_PGMPOOLKIND_PT_FOR_BIG
125#undef BTH_PGMPOOLKIND_PT_FOR_PT
126#undef BTH_PGMPOOLKIND_ROOT
127#undef PGM_BTH_NAME
128#undef PGM_GST_TYPE
129#undef PGM_GST_NAME
130
131#undef PGM_SHW_TYPE
132#undef PGM_SHW_NAME
133
134
135/*
136 * Shadow - PAE mode
137 */
138#define PGM_SHW_TYPE PGM_TYPE_PAE
139#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#include "PGMAllShw.h"
142
143/* Guest - real mode */
144#define PGM_GST_TYPE PGM_TYPE_REAL
145#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
146#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
147#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
148#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
149#include "PGMGstDefs.h"
150#include "PGMAllBth.h"
151#undef BTH_PGMPOOLKIND_PT_FOR_PT
152#undef BTH_PGMPOOLKIND_ROOT
153#undef PGM_BTH_NAME
154#undef PGM_GST_TYPE
155#undef PGM_GST_NAME
156
157/* Guest - protected mode */
158#define PGM_GST_TYPE PGM_TYPE_PROT
159#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
160#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
161#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
162#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
163#include "PGMGstDefs.h"
164#include "PGMAllBth.h"
165#undef BTH_PGMPOOLKIND_PT_FOR_PT
166#undef BTH_PGMPOOLKIND_ROOT
167#undef PGM_BTH_NAME
168#undef PGM_GST_TYPE
169#undef PGM_GST_NAME
170
171/* Guest - 32-bit mode */
172#define PGM_GST_TYPE PGM_TYPE_32BIT
173#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
174#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
175#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
176#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
177#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
178#include "PGMGstDefs.h"
179#include "PGMAllBth.h"
180#undef BTH_PGMPOOLKIND_PT_FOR_BIG
181#undef BTH_PGMPOOLKIND_PT_FOR_PT
182#undef BTH_PGMPOOLKIND_ROOT
183#undef PGM_BTH_NAME
184#undef PGM_GST_TYPE
185#undef PGM_GST_NAME
186
187
188/* Guest - PAE mode */
189#define PGM_GST_TYPE PGM_TYPE_PAE
190#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
191#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
192#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
193#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
194#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
195#include "PGMGstDefs.h"
196#include "PGMAllGst.h"
197#include "PGMAllBth.h"
198#undef BTH_PGMPOOLKIND_PT_FOR_BIG
199#undef BTH_PGMPOOLKIND_PT_FOR_PT
200#undef BTH_PGMPOOLKIND_ROOT
201#undef PGM_BTH_NAME
202#undef PGM_GST_TYPE
203#undef PGM_GST_NAME
204
205#undef PGM_SHW_TYPE
206#undef PGM_SHW_NAME
207
208
209#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
210/*
211 * Shadow - AMD64 mode
212 */
213# define PGM_SHW_TYPE PGM_TYPE_AMD64
214# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
215# include "PGMAllShw.h"
216
217/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
218# define PGM_GST_TYPE PGM_TYPE_PROT
219# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
220# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
221# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
222# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
223# include "PGMGstDefs.h"
224# include "PGMAllBth.h"
225# undef BTH_PGMPOOLKIND_PT_FOR_PT
226# undef BTH_PGMPOOLKIND_ROOT
227# undef PGM_BTH_NAME
228# undef PGM_GST_TYPE
229# undef PGM_GST_NAME
230
231# ifdef VBOX_WITH_64_BITS_GUESTS
232/* Guest - AMD64 mode */
233# define PGM_GST_TYPE PGM_TYPE_AMD64
234# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
235# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
236# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
237# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
238# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
239# include "PGMGstDefs.h"
240# include "PGMAllGst.h"
241# include "PGMAllBth.h"
242# undef BTH_PGMPOOLKIND_PT_FOR_BIG
243# undef BTH_PGMPOOLKIND_PT_FOR_PT
244# undef BTH_PGMPOOLKIND_ROOT
245# undef PGM_BTH_NAME
246# undef PGM_GST_TYPE
247# undef PGM_GST_NAME
248# endif /* VBOX_WITH_64_BITS_GUESTS */
249
250# undef PGM_SHW_TYPE
251# undef PGM_SHW_NAME
252
253
254/*
255 * Shadow - Nested paging mode
256 */
257# define PGM_SHW_TYPE PGM_TYPE_NESTED
258# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
259# include "PGMAllShw.h"
260
261/* Guest - real mode */
262# define PGM_GST_TYPE PGM_TYPE_REAL
263# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
264# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
265# include "PGMGstDefs.h"
266# include "PGMAllBth.h"
267# undef PGM_BTH_NAME
268# undef PGM_GST_TYPE
269# undef PGM_GST_NAME
270
271/* Guest - protected mode */
272# define PGM_GST_TYPE PGM_TYPE_PROT
273# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
274# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
275# include "PGMGstDefs.h"
276# include "PGMAllBth.h"
277# undef PGM_BTH_NAME
278# undef PGM_GST_TYPE
279# undef PGM_GST_NAME
280
281/* Guest - 32-bit mode */
282# define PGM_GST_TYPE PGM_TYPE_32BIT
283# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
284# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
285# include "PGMGstDefs.h"
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290
291/* Guest - PAE mode */
292# define PGM_GST_TYPE PGM_TYPE_PAE
293# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
294# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
295# include "PGMGstDefs.h"
296# include "PGMAllBth.h"
297# undef PGM_BTH_NAME
298# undef PGM_GST_TYPE
299# undef PGM_GST_NAME
300
301# ifdef VBOX_WITH_64_BITS_GUESTS
302/* Guest - AMD64 mode */
303# define PGM_GST_TYPE PGM_TYPE_AMD64
304# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
305# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
306# include "PGMGstDefs.h"
307# include "PGMAllBth.h"
308# undef PGM_BTH_NAME
309# undef PGM_GST_TYPE
310# undef PGM_GST_NAME
311# endif /* VBOX_WITH_64_BITS_GUESTS */
312
313# undef PGM_SHW_TYPE
314# undef PGM_SHW_NAME
315
316
317/*
318 * Shadow - EPT
319 */
320# define PGM_SHW_TYPE PGM_TYPE_EPT
321# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
322# include "PGMAllShw.h"
323
324/* Guest - real mode */
325# define PGM_GST_TYPE PGM_TYPE_REAL
326# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
327# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
328# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
329# include "PGMGstDefs.h"
330# include "PGMAllBth.h"
331# undef BTH_PGMPOOLKIND_PT_FOR_PT
332# undef PGM_BTH_NAME
333# undef PGM_GST_TYPE
334# undef PGM_GST_NAME
335
336/* Guest - protected mode */
337# define PGM_GST_TYPE PGM_TYPE_PROT
338# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
339# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
340# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
341# include "PGMGstDefs.h"
342# include "PGMAllBth.h"
343# undef BTH_PGMPOOLKIND_PT_FOR_PT
344# undef PGM_BTH_NAME
345# undef PGM_GST_TYPE
346# undef PGM_GST_NAME
347
348/* Guest - 32-bit mode */
349# define PGM_GST_TYPE PGM_TYPE_32BIT
350# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
351# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
352# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
353# include "PGMGstDefs.h"
354# include "PGMAllBth.h"
355# undef BTH_PGMPOOLKIND_PT_FOR_PT
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - PAE mode */
361# define PGM_GST_TYPE PGM_TYPE_PAE
362# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
364# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef BTH_PGMPOOLKIND_PT_FOR_PT
368# undef PGM_BTH_NAME
369# undef PGM_GST_TYPE
370# undef PGM_GST_NAME
371
372# ifdef VBOX_WITH_64_BITS_GUESTS
373/* Guest - AMD64 mode */
374# define PGM_GST_TYPE PGM_TYPE_AMD64
375# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
376# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
377# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
378# include "PGMGstDefs.h"
379# include "PGMAllBth.h"
380# undef BTH_PGMPOOLKIND_PT_FOR_PT
381# undef PGM_BTH_NAME
382# undef PGM_GST_TYPE
383# undef PGM_GST_NAME
384# endif /* VBOX_WITH_64_BITS_GUESTS */
385
386# undef PGM_SHW_TYPE
387# undef PGM_SHW_NAME
388
389#endif /* !IN_RC */
390
391
392#ifndef IN_RING3
393/**
394 * #PF Handler.
395 *
396 * @returns VBox status code (appropriate for trap handling and GC return).
397 * @param pVCpu VMCPU handle.
398 * @param uErr The trap error code.
399 * @param pRegFrame Trap register frame.
400 * @param pvFault The fault address.
401 */
402VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
403{
404 PVM pVM = pVCpu->CTX_SUFF(pVM);
405
406 Log(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
407 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
408 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
409
410
411#ifdef VBOX_WITH_STATISTICS
412 /*
413 * Error code stats.
414 */
415 if (uErr & X86_TRAP_PF_US)
416 {
417 if (!(uErr & X86_TRAP_PF_P))
418 {
419 if (uErr & X86_TRAP_PF_RW)
420 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
421 else
422 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
423 }
424 else if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
426 else if (uErr & X86_TRAP_PF_RSVD)
427 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
428 else if (uErr & X86_TRAP_PF_ID)
429 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
430 else
431 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
432 }
433 else
434 { /* Supervisor */
435 if (!(uErr & X86_TRAP_PF_P))
436 {
437 if (uErr & X86_TRAP_PF_RW)
438 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
439 else
440 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
441 }
442 else if (uErr & X86_TRAP_PF_RW)
443 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
444 else if (uErr & X86_TRAP_PF_ID)
445 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
446 else if (uErr & X86_TRAP_PF_RSVD)
447 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
448 }
449#endif /* VBOX_WITH_STATISTICS */
450
451 /*
452 * Call the worker.
453 */
454 bool fLockTaken = false;
455 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
456 if (fLockTaken)
457 {
458 Assert(PGMIsLockOwner(pVM));
459 pgmUnlock(pVM);
460 }
461 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
462 rc = VINF_SUCCESS;
463
464# ifdef IN_RING0
465 /* Note: hack alert for difficult to reproduce problem. */
466 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
467 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
468 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
469 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
470 {
471 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
472 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
473 rc = VINF_SUCCESS;
474 }
475# endif
476
477 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
478 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
479 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
480 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
481 return rc;
482}
483#endif /* !IN_RING3 */
484
485
486/**
487 * Prefetch a page
488 *
489 * Typically used to sync commonly used pages before entering raw mode
490 * after a CR3 reload.
491 *
492 * @returns VBox status code suitable for scheduling.
493 * @retval VINF_SUCCESS on success.
494 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
495 * @param pVCpu VMCPU handle.
496 * @param GCPtrPage Page to invalidate.
497 */
498VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
499{
500 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
501 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
502 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
503 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
504 return rc;
505}
506
507
508/**
509 * Gets the mapping corresponding to the specified address (if any).
510 *
511 * @returns Pointer to the mapping.
512 * @returns NULL if not
513 *
514 * @param pVM The virtual machine.
515 * @param GCPtr The guest context pointer.
516 */
517PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
518{
519 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
520 while (pMapping)
521 {
522 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
523 break;
524 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
525 return pMapping;
526 pMapping = pMapping->CTX_SUFF(pNext);
527 }
528 return NULL;
529}
530
531
532/**
533 * Verifies a range of pages for read or write access
534 *
535 * Only checks the guest's page tables
536 *
537 * @returns VBox status code.
538 * @param pVCpu VMCPU handle.
539 * @param Addr Guest virtual address to check
540 * @param cbSize Access size
541 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
542 * @remarks Current not in use.
543 */
544VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
545{
546 /*
547 * Validate input.
548 */
549 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
550 {
551 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
552 return VERR_INVALID_PARAMETER;
553 }
554
555 uint64_t fPage;
556 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
557 if (RT_FAILURE(rc))
558 {
559 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
560 return VINF_EM_RAW_GUEST_TRAP;
561 }
562
563 /*
564 * Check if the access would cause a page fault
565 *
566 * Note that hypervisor page directories are not present in the guest's tables, so this check
567 * is sufficient.
568 */
569 bool fWrite = !!(fAccess & X86_PTE_RW);
570 bool fUser = !!(fAccess & X86_PTE_US);
571 if ( !(fPage & X86_PTE_P)
572 || (fWrite && !(fPage & X86_PTE_RW))
573 || (fUser && !(fPage & X86_PTE_US)) )
574 {
575 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
576 return VINF_EM_RAW_GUEST_TRAP;
577 }
578 if ( RT_SUCCESS(rc)
579 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
580 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
581 return rc;
582}
583
584
585/**
586 * Verifies a range of pages for read or write access
587 *
588 * Supports handling of pages marked for dirty bit tracking and CSAM
589 *
590 * @returns VBox status code.
591 * @param pVCpu VMCPU handle.
592 * @param Addr Guest virtual address to check
593 * @param cbSize Access size
594 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
595 */
596VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
597{
598 PVM pVM = pVCpu->CTX_SUFF(pVM);
599
600 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
601
602 /*
603 * Get going.
604 */
605 uint64_t fPageGst;
606 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
607 if (RT_FAILURE(rc))
608 {
609 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
610 return VINF_EM_RAW_GUEST_TRAP;
611 }
612
613 /*
614 * Check if the access would cause a page fault
615 *
616 * Note that hypervisor page directories are not present in the guest's tables, so this check
617 * is sufficient.
618 */
619 const bool fWrite = !!(fAccess & X86_PTE_RW);
620 const bool fUser = !!(fAccess & X86_PTE_US);
621 if ( !(fPageGst & X86_PTE_P)
622 || (fWrite && !(fPageGst & X86_PTE_RW))
623 || (fUser && !(fPageGst & X86_PTE_US)) )
624 {
625 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
626 return VINF_EM_RAW_GUEST_TRAP;
627 }
628
629 if (!HWACCMIsNestedPagingActive(pVM))
630 {
631 /*
632 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
633 */
634 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
635 if ( rc == VERR_PAGE_NOT_PRESENT
636 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
637 {
638 /*
639 * Page is not present in our page tables.
640 * Try to sync it!
641 */
642 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
643 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
644 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
645 if (rc != VINF_SUCCESS)
646 return rc;
647 }
648 else
649 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
650 }
651
652#if 0 /* def VBOX_STRICT; triggers too often now */
653 /*
654 * This check is a bit paranoid, but useful.
655 */
656 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
657 uint64_t fPageShw;
658 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
659 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
660 || (fWrite && !(fPageShw & X86_PTE_RW))
661 || (fUser && !(fPageShw & X86_PTE_US)) )
662 {
663 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
664 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
665 return VINF_EM_RAW_GUEST_TRAP;
666 }
667#endif
668
669 if ( RT_SUCCESS(rc)
670 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
671 || Addr + cbSize < Addr))
672 {
673 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
674 for (;;)
675 {
676 Addr += PAGE_SIZE;
677 if (cbSize > PAGE_SIZE)
678 cbSize -= PAGE_SIZE;
679 else
680 cbSize = 1;
681 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
682 if (rc != VINF_SUCCESS)
683 break;
684 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
685 break;
686 }
687 }
688 return rc;
689}
690
691
692/**
693 * Emulation of the invlpg instruction (HC only actually).
694 *
695 * @returns VBox status code, special care required.
696 * @retval VINF_PGM_SYNC_CR3 - handled.
697 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
698 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
699 *
700 * @param pVCpu VMCPU handle.
701 * @param GCPtrPage Page to invalidate.
702 *
703 * @remark ASSUMES the page table entry or page directory is valid. Fairly
704 * safe, but there could be edge cases!
705 *
706 * @todo Flush page or page directory only if necessary!
707 */
708VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
709{
710 PVM pVM = pVCpu->CTX_SUFF(pVM);
711 int rc;
712 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
713
714#ifndef IN_RING3
715 /*
716 * Notify the recompiler so it can record this instruction.
717 */
718 REMNotifyInvalidatePage(pVM, GCPtrPage);
719#endif /* !IN_RING3 */
720
721
722#ifdef IN_RC
723 /*
724 * Check for conflicts and pending CR3 monitoring updates.
725 */
726 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
727 {
728 if ( pgmGetMapping(pVM, GCPtrPage)
729 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
730 {
731 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
732 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
733 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
734 return VINF_PGM_SYNC_CR3;
735 }
736
737 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
738 {
739 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
740 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
741 return VINF_EM_RAW_EMULATE_INSTR;
742 }
743 }
744#endif /* IN_RC */
745
746 /*
747 * Call paging mode specific worker.
748 */
749 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
750 pgmLock(pVM);
751 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
752 pgmUnlock(pVM);
753 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
754
755 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
756 PGM_INVL_PG(pVCpu, GCPtrPage);
757
758#ifdef IN_RING3
759 /*
760 * Check if we have a pending update of the CR3 monitoring.
761 */
762 if ( RT_SUCCESS(rc)
763 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
764 {
765 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
766 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
767 }
768
769 /*
770 * Inform CSAM about the flush
771 *
772 * Note: This is to check if monitored pages have been changed; when we implement
773 * callbacks for virtual handlers, this is no longer required.
774 */
775 CSAMR3FlushPage(pVM, GCPtrPage);
776#endif /* IN_RING3 */
777
778 /* Ignore all irrelevant error codes. */
779 if ( rc == VERR_PAGE_NOT_PRESENT
780 || rc == VERR_PAGE_TABLE_NOT_PRESENT
781 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
782 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
783 rc = VINF_SUCCESS;
784
785 return rc;
786}
787
788
789/**
790 * Executes an instruction using the interpreter.
791 *
792 * @returns VBox status code (appropriate for trap handling and GC return).
793 * @param pVM VM handle.
794 * @param pVCpu VMCPU handle.
795 * @param pRegFrame Register frame.
796 * @param pvFault Fault address.
797 */
798VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
799{
800 uint32_t cb;
801 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
802 if (rc == VERR_EM_INTERPRETER)
803 rc = VINF_EM_RAW_EMULATE_INSTR;
804 if (rc != VINF_SUCCESS)
805 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
806 return rc;
807}
808
809
810/**
811 * Gets effective page information (from the VMM page directory).
812 *
813 * @returns VBox status.
814 * @param pVCpu VMCPU handle.
815 * @param GCPtr Guest Context virtual address of the page.
816 * @param pfFlags Where to store the flags. These are X86_PTE_*.
817 * @param pHCPhys Where to store the HC physical address of the page.
818 * This is page aligned.
819 * @remark You should use PGMMapGetPage() for pages in a mapping.
820 */
821VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
822{
823 pgmLock(pVCpu->CTX_SUFF(pVM));
824 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
825 pgmUnlock(pVCpu->CTX_SUFF(pVM));
826 return rc;
827}
828
829
830/**
831 * Sets (replaces) the page flags for a range of pages in the shadow context.
832 *
833 * @returns VBox status.
834 * @param pVCpu VMCPU handle.
835 * @param GCPtr The address of the first page.
836 * @param cb The size of the range in bytes.
837 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
838 * @remark You must use PGMMapSetPage() for pages in a mapping.
839 */
840VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
841{
842 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
843}
844
845
846/**
847 * Modify page flags for a range of pages in the shadow context.
848 *
849 * The existing flags are ANDed with the fMask and ORed with the fFlags.
850 *
851 * @returns VBox status code.
852 * @param pVCpu VMCPU handle.
853 * @param GCPtr Virtual address of the first page in the range.
854 * @param cb Size (in bytes) of the range to apply the modification to.
855 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
856 * @param fMask The AND mask - page flags X86_PTE_*.
857 * Be very CAREFUL when ~'ing constants which could be 32-bit!
858 * @remark You must use PGMMapModifyPage() for pages in a mapping.
859 */
860VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
861{
862 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
863 Assert(cb);
864
865 /*
866 * Align the input.
867 */
868 cb += GCPtr & PAGE_OFFSET_MASK;
869 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
870 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
871
872 /*
873 * Call worker.
874 */
875 PVM pVM = pVCpu->CTX_SUFF(pVM);
876 pgmLock(pVM);
877 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
878 pgmUnlock(pVM);
879 return rc;
880}
881
882/**
883 * Gets the shadow page directory for the specified address, PAE.
884 *
885 * @returns Pointer to the shadow PD.
886 * @param pVCpu The VMCPU handle.
887 * @param GCPtr The address.
888 * @param pGstPdpe Guest PDPT entry
889 * @param ppPD Receives address of page directory
890 */
891int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
892{
893 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
894 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
895 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
896 PVM pVM = pVCpu->CTX_SUFF(pVM);
897 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
898 PPGMPOOLPAGE pShwPage;
899 int rc;
900
901 Assert(PGMIsLockOwner(pVM));
902
903 /* Allocate page directory if not present. */
904 if ( !pPdpe->n.u1Present
905 && !(pPdpe->u & X86_PDPE_PG_MASK))
906 {
907 RTGCPTR64 GCPdPt;
908 PGMPOOLKIND enmKind;
909
910# if defined(IN_RC)
911 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
912 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
913# endif
914
915 if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu))
916 {
917 /* AMD-V nested paging or real/protected mode without paging */
918 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
919 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
920 }
921 else
922 {
923 Assert(pGstPdpe);
924
925 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
926 {
927 if (!pGstPdpe->n.u1Present)
928 {
929 /* PD not present; guest must reload CR3 to change it.
930 * No need to monitor anything in this case.
931 */
932 Assert(!HWACCMIsEnabled(pVM));
933
934 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
935 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
936 pGstPdpe->n.u1Present = 1;
937 }
938 else
939 {
940 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
941 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
942 }
943 }
944 else
945 {
946 GCPdPt = CPUMGetGuestCR3(pVCpu);
947 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
948 }
949 }
950
951 /* Create a reference back to the PDPT by using the index in its shadow page. */
952 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
953 AssertRCReturn(rc, rc);
954
955 /* The PD was cached or created; hook it up now. */
956 pPdpe->u |= pShwPage->Core.Key
957 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
958
959# if defined(IN_RC)
960 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
961 * non-present PDPT will continue to cause page faults.
962 */
963 ASMReloadCR3();
964 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
965# endif
966 }
967 else
968 {
969 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
970 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
971 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
972
973 pgmPoolCacheUsed(pPool, pShwPage);
974 }
975 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
976 return VINF_SUCCESS;
977}
978
979
980/**
981 * Gets the pointer to the shadow page directory entry for an address, PAE.
982 *
983 * @returns Pointer to the PDE.
984 * @param pPGM Pointer to the PGMCPU instance data.
985 * @param GCPtr The address.
986 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
987 */
988DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
989{
990 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
991 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
992
993 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
994
995 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
996 if (!pPdpt->a[iPdPt].n.u1Present)
997 {
998 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
999 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1000 }
1001 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1002
1003 /* Fetch the pgm pool shadow descriptor. */
1004 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1005 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1006
1007 *ppShwPde = pShwPde;
1008 return VINF_SUCCESS;
1009}
1010
1011#ifndef IN_RC
1012
1013/**
1014 * Syncs the SHADOW page directory pointer for the specified address.
1015 *
1016 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1017 *
1018 * The caller is responsible for making sure the guest has a valid PD before
1019 * calling this function.
1020 *
1021 * @returns VBox status.
1022 * @param pVCpu VMCPU handle.
1023 * @param GCPtr The address.
1024 * @param pGstPml4e Guest PML4 entry
1025 * @param pGstPdpe Guest PDPT entry
1026 * @param ppPD Receives address of page directory
1027 */
1028int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1029{
1030 PPGMCPU pPGM = &pVCpu->pgm.s;
1031 PVM pVM = pVCpu->CTX_SUFF(pVM);
1032 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1033 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1034 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1035 bool fNestedPagingOrNoGstPaging = HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu);
1036 PPGMPOOLPAGE pShwPage;
1037 int rc;
1038
1039 Assert(PGMIsLockOwner(pVM));
1040
1041 /* Allocate page directory pointer table if not present. */
1042 if ( !pPml4e->n.u1Present
1043 && !(pPml4e->u & X86_PML4E_PG_MASK))
1044 {
1045 RTGCPTR64 GCPml4;
1046 PGMPOOLKIND enmKind;
1047
1048 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1049
1050 if (fNestedPagingOrNoGstPaging)
1051 {
1052 /* AMD-V nested paging or real/protected mode without paging */
1053 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1054 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1055 }
1056 else
1057 {
1058 Assert(pGstPml4e && pGstPdpe);
1059
1060 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1061 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1062 }
1063
1064 /* Create a reference back to the PDPT by using the index in its shadow page. */
1065 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1066 AssertRCReturn(rc, rc);
1067 }
1068 else
1069 {
1070 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1071 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1072
1073 pgmPoolCacheUsed(pPool, pShwPage);
1074 }
1075 /* The PDPT was cached or created; hook it up now. */
1076 pPml4e->u |= pShwPage->Core.Key
1077 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1078
1079 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1080 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1081 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1082
1083 /* Allocate page directory if not present. */
1084 if ( !pPdpe->n.u1Present
1085 && !(pPdpe->u & X86_PDPE_PG_MASK))
1086 {
1087 RTGCPTR64 GCPdPt;
1088 PGMPOOLKIND enmKind;
1089
1090 if (fNestedPagingOrNoGstPaging)
1091 {
1092 /* AMD-V nested paging or real/protected mode without paging */
1093 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1094 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1095 }
1096 else
1097 {
1098 Assert(pGstPdpe);
1099
1100 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1101 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1102 }
1103
1104 /* Create a reference back to the PDPT by using the index in its shadow page. */
1105 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1106 AssertRCReturn(rc, rc);
1107 }
1108 else
1109 {
1110 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1111 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1112
1113 pgmPoolCacheUsed(pPool, pShwPage);
1114 }
1115 /* The PD was cached or created; hook it up now. */
1116 pPdpe->u |= pShwPage->Core.Key
1117 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1118
1119 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1120 return VINF_SUCCESS;
1121}
1122
1123
1124/**
1125 * Gets the SHADOW page directory pointer for the specified address (long mode).
1126 *
1127 * @returns VBox status.
1128 * @param pVCpu VMCPU handle.
1129 * @param GCPtr The address.
1130 * @param ppPdpt Receives address of pdpt
1131 * @param ppPD Receives address of page directory
1132 */
1133DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1134{
1135 PPGMCPU pPGM = &pVCpu->pgm.s;
1136 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1137 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1138
1139 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1140
1141 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1142 if (ppPml4e)
1143 *ppPml4e = (PX86PML4E)pPml4e;
1144
1145 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1146
1147 if (!pPml4e->n.u1Present)
1148 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1149
1150 PVM pVM = pVCpu->CTX_SUFF(pVM);
1151 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1152 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1153 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1154
1155 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1156 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1157 if (!pPdpt->a[iPdPt].n.u1Present)
1158 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1159
1160 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1161 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1162
1163 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1164 return VINF_SUCCESS;
1165}
1166
1167
1168/**
1169 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1170 * backing pages in case the PDPT or PML4 entry is missing.
1171 *
1172 * @returns VBox status.
1173 * @param pVCpu VMCPU handle.
1174 * @param GCPtr The address.
1175 * @param ppPdpt Receives address of pdpt
1176 * @param ppPD Receives address of page directory
1177 */
1178int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1179{
1180 PPGMCPU pPGM = &pVCpu->pgm.s;
1181 PVM pVM = pVCpu->CTX_SUFF(pVM);
1182 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1183 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1184 PEPTPML4 pPml4;
1185 PEPTPML4E pPml4e;
1186 PPGMPOOLPAGE pShwPage;
1187 int rc;
1188
1189 Assert(HWACCMIsNestedPagingActive(pVM));
1190 Assert(PGMIsLockOwner(pVM));
1191
1192 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1193 Assert(pPml4);
1194
1195 /* Allocate page directory pointer table if not present. */
1196 pPml4e = &pPml4->a[iPml4];
1197 if ( !pPml4e->n.u1Present
1198 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1199 {
1200 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1201 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1202
1203 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1204 AssertRCReturn(rc, rc);
1205 }
1206 else
1207 {
1208 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1209 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1210
1211 pgmPoolCacheUsed(pPool, pShwPage);
1212 }
1213 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1214 pPml4e->u = pShwPage->Core.Key;
1215 pPml4e->n.u1Present = 1;
1216 pPml4e->n.u1Write = 1;
1217 pPml4e->n.u1Execute = 1;
1218
1219 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1220 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1221 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1222
1223 if (ppPdpt)
1224 *ppPdpt = pPdpt;
1225
1226 /* Allocate page directory if not present. */
1227 if ( !pPdpe->n.u1Present
1228 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1229 {
1230 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1231
1232 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1233 AssertRCReturn(rc, rc);
1234 }
1235 else
1236 {
1237 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1238 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1239
1240 pgmPoolCacheUsed(pPool, pShwPage);
1241 }
1242 /* The PD was cached or created; hook it up now and fill with the default value. */
1243 pPdpe->u = pShwPage->Core.Key;
1244 pPdpe->n.u1Present = 1;
1245 pPdpe->n.u1Write = 1;
1246 pPdpe->n.u1Execute = 1;
1247
1248 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1249 return VINF_SUCCESS;
1250}
1251
1252#endif /* IN_RC */
1253
1254/**
1255 * Gets effective Guest OS page information.
1256 *
1257 * When GCPtr is in a big page, the function will return as if it was a normal
1258 * 4KB page. If the need for distinguishing between big and normal page becomes
1259 * necessary at a later point, a PGMGstGetPage() will be created for that
1260 * purpose.
1261 *
1262 * @returns VBox status.
1263 * @param pVCpu VMCPU handle.
1264 * @param GCPtr Guest Context virtual address of the page.
1265 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1266 * @param pGCPhys Where to store the GC physical address of the page.
1267 * This is page aligned. The fact that the
1268 */
1269VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1270{
1271 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1272}
1273
1274
1275/**
1276 * Checks if the page is present.
1277 *
1278 * @returns true if the page is present.
1279 * @returns false if the page is not present.
1280 * @param pVCpu VMCPU handle.
1281 * @param GCPtr Address within the page.
1282 */
1283VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1284{
1285 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1286 return RT_SUCCESS(rc);
1287}
1288
1289
1290/**
1291 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1292 *
1293 * @returns VBox status.
1294 * @param pVCpu VMCPU handle.
1295 * @param GCPtr The address of the first page.
1296 * @param cb The size of the range in bytes.
1297 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1298 */
1299VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1300{
1301 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1302}
1303
1304
1305/**
1306 * Modify page flags for a range of pages in the guest's tables
1307 *
1308 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1309 *
1310 * @returns VBox status code.
1311 * @param pVCpu VMCPU handle.
1312 * @param GCPtr Virtual address of the first page in the range.
1313 * @param cb Size (in bytes) of the range to apply the modification to.
1314 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1315 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1316 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1317 */
1318VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1319{
1320 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1321
1322 /*
1323 * Validate input.
1324 */
1325 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1326 Assert(cb);
1327
1328 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1329
1330 /*
1331 * Adjust input.
1332 */
1333 cb += GCPtr & PAGE_OFFSET_MASK;
1334 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1335 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1336
1337 /*
1338 * Call worker.
1339 */
1340 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1341
1342 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1343 return rc;
1344}
1345
1346#ifdef IN_RING3
1347
1348/**
1349 * Performs the lazy mapping of the 32-bit guest PD.
1350 *
1351 * @returns Pointer to the mapping.
1352 * @param pPGM The PGM instance data.
1353 */
1354PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1355{
1356 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1357 PVM pVM = PGMCPU2VM(pPGM);
1358 pgmLock(pVM);
1359
1360 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1361 AssertReturn(pPage, NULL);
1362
1363 RTHCPTR HCPtrGuestCR3;
1364 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1365 AssertRCReturn(rc, NULL);
1366
1367 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1368# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1369 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1370# endif
1371
1372 pgmUnlock(pVM);
1373 return pPGM->CTX_SUFF(pGst32BitPd);
1374}
1375
1376
1377/**
1378 * Performs the lazy mapping of the PAE guest PDPT.
1379 *
1380 * @returns Pointer to the mapping.
1381 * @param pPGM The PGM instance data.
1382 */
1383PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1384{
1385 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1386 PVM pVM = PGMCPU2VM(pPGM);
1387 pgmLock(pVM);
1388
1389 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1390 AssertReturn(pPage, NULL);
1391
1392 RTHCPTR HCPtrGuestCR3;
1393 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
1394 AssertRCReturn(rc, NULL);
1395
1396 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1397# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1398 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1399# endif
1400
1401 pgmUnlock(pVM);
1402 return pPGM->CTX_SUFF(pGstPaePdpt);
1403}
1404
1405#endif /* IN_RING3 */
1406
1407#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1408/**
1409 * Performs the lazy mapping / updating of a PAE guest PD.
1410 *
1411 * @returns Pointer to the mapping.
1412 * @param pPGM The PGM instance data.
1413 * @param iPdpt Which PD entry to map (0..3).
1414 */
1415PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1416{
1417 PVM pVM = PGMCPU2VM(pPGM);
1418 pgmLock(pVM);
1419
1420 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1421 Assert(pGuestPDPT);
1422 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1423 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1424 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1425
1426 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1427 if (RT_LIKELY(pPage))
1428 {
1429 int rc = VINF_SUCCESS;
1430 RTRCPTR RCPtr = NIL_RTRCPTR;
1431 RTHCPTR HCPtr = NIL_RTHCPTR;
1432#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1433 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1434 AssertRC(rc);
1435#endif
1436 if (RT_SUCCESS(rc) && fChanged)
1437 {
1438 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1439 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1440 }
1441 if (RT_SUCCESS(rc))
1442 {
1443 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1444# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1445 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1446# endif
1447 if (fChanged)
1448 {
1449 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1450 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1451 }
1452
1453 pgmUnlock(pVM);
1454 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1455 }
1456 }
1457
1458 /* Invalid page or some failure, invalidate the entry. */
1459 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1460 pPGM->apGstPaePDsR3[iPdpt] = 0;
1461# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1462 pPGM->apGstPaePDsR0[iPdpt] = 0;
1463# endif
1464 pPGM->apGstPaePDsRC[iPdpt] = 0;
1465
1466 pgmUnlock(pVM);
1467 return NULL;
1468}
1469#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1470
1471
1472#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1473/**
1474 * Performs the lazy mapping of the 32-bit guest PD.
1475 *
1476 * @returns Pointer to the mapping.
1477 * @param pPGM The PGM instance data.
1478 */
1479PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1480{
1481 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1482 PVM pVM = PGMCPU2VM(pPGM);
1483 pgmLock(pVM);
1484
1485 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1486 AssertReturn(pPage, NULL);
1487
1488 RTHCPTR HCPtrGuestCR3;
1489 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
1490 AssertRCReturn(rc, NULL);
1491
1492 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1493# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1494 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1495# endif
1496
1497 pgmUnlock(pVM);
1498 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1499}
1500#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1501
1502
1503/**
1504 * Gets the specified page directory pointer table entry.
1505 *
1506 * @returns PDP entry
1507 * @param pVCpu VMCPU handle.
1508 * @param iPdpt PDPT index
1509 */
1510VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1511{
1512 Assert(iPdpt <= 3);
1513 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1514}
1515
1516
1517/**
1518 * Gets the current CR3 register value for the shadow memory context.
1519 * @returns CR3 value.
1520 * @param pVCpu VMCPU handle.
1521 */
1522VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1523{
1524 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1525 AssertPtrReturn(pPoolPage, 0);
1526 return pPoolPage->Core.Key;
1527}
1528
1529
1530/**
1531 * Gets the current CR3 register value for the nested memory context.
1532 * @returns CR3 value.
1533 * @param pVCpu VMCPU handle.
1534 */
1535VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1536{
1537 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1538 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1539}
1540
1541
1542/**
1543 * Gets the current CR3 register value for the HC intermediate memory context.
1544 * @returns CR3 value.
1545 * @param pVM The VM handle.
1546 */
1547VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1548{
1549 switch (pVM->pgm.s.enmHostMode)
1550 {
1551 case SUPPAGINGMODE_32_BIT:
1552 case SUPPAGINGMODE_32_BIT_GLOBAL:
1553 return pVM->pgm.s.HCPhysInterPD;
1554
1555 case SUPPAGINGMODE_PAE:
1556 case SUPPAGINGMODE_PAE_GLOBAL:
1557 case SUPPAGINGMODE_PAE_NX:
1558 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1559 return pVM->pgm.s.HCPhysInterPaePDPT;
1560
1561 case SUPPAGINGMODE_AMD64:
1562 case SUPPAGINGMODE_AMD64_GLOBAL:
1563 case SUPPAGINGMODE_AMD64_NX:
1564 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1565 return pVM->pgm.s.HCPhysInterPaePDPT;
1566
1567 default:
1568 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1569 return ~0;
1570 }
1571}
1572
1573
1574/**
1575 * Gets the current CR3 register value for the RC intermediate memory context.
1576 * @returns CR3 value.
1577 * @param pVM The VM handle.
1578 * @param pVCpu VMCPU handle.
1579 */
1580VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1581{
1582 switch (pVCpu->pgm.s.enmShadowMode)
1583 {
1584 case PGMMODE_32_BIT:
1585 return pVM->pgm.s.HCPhysInterPD;
1586
1587 case PGMMODE_PAE:
1588 case PGMMODE_PAE_NX:
1589 return pVM->pgm.s.HCPhysInterPaePDPT;
1590
1591 case PGMMODE_AMD64:
1592 case PGMMODE_AMD64_NX:
1593 return pVM->pgm.s.HCPhysInterPaePML4;
1594
1595 case PGMMODE_EPT:
1596 case PGMMODE_NESTED:
1597 return 0; /* not relevant */
1598
1599 default:
1600 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1601 return ~0;
1602 }
1603}
1604
1605
1606/**
1607 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1608 * @returns CR3 value.
1609 * @param pVM The VM handle.
1610 */
1611VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1612{
1613 return pVM->pgm.s.HCPhysInterPD;
1614}
1615
1616
1617/**
1618 * Gets the CR3 register value for the PAE intermediate memory context.
1619 * @returns CR3 value.
1620 * @param pVM The VM handle.
1621 */
1622VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1623{
1624 return pVM->pgm.s.HCPhysInterPaePDPT;
1625}
1626
1627
1628/**
1629 * Gets the CR3 register value for the AMD64 intermediate memory context.
1630 * @returns CR3 value.
1631 * @param pVM The VM handle.
1632 */
1633VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1634{
1635 return pVM->pgm.s.HCPhysInterPaePML4;
1636}
1637
1638
1639/**
1640 * Performs and schedules necessary updates following a CR3 load or reload.
1641 *
1642 * This will normally involve mapping the guest PD or nPDPT
1643 *
1644 * @returns VBox status code.
1645 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1646 * safely be ignored and overridden since the FF will be set too then.
1647 * @param pVCpu VMCPU handle.
1648 * @param cr3 The new cr3.
1649 * @param fGlobal Indicates whether this is a global flush or not.
1650 */
1651VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1652{
1653 PVM pVM = pVCpu->CTX_SUFF(pVM);
1654
1655 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1656
1657 /*
1658 * Always flag the necessary updates; necessary for hardware acceleration
1659 */
1660 /** @todo optimize this, it shouldn't always be necessary. */
1661 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1662 if (fGlobal)
1663 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1664 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1665
1666 /*
1667 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1668 */
1669 int rc = VINF_SUCCESS;
1670 RTGCPHYS GCPhysCR3;
1671 switch (pVCpu->pgm.s.enmGuestMode)
1672 {
1673 case PGMMODE_PAE:
1674 case PGMMODE_PAE_NX:
1675 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1676 break;
1677 case PGMMODE_AMD64:
1678 case PGMMODE_AMD64_NX:
1679 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1680 break;
1681 default:
1682 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1683 break;
1684 }
1685
1686 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1687 {
1688 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1689 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1690 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1691 if (RT_LIKELY(rc == VINF_SUCCESS))
1692 {
1693 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1694 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1695 }
1696 else
1697 {
1698 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1699 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1700 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1701 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1702 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1703 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1704 }
1705
1706 if (fGlobal)
1707 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1708 else
1709 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1710 }
1711 else
1712 {
1713# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1714 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1715 if (pPool->cDirtyPages)
1716 {
1717 pgmLock(pVM);
1718 pgmPoolResetDirtyPages(pVM);
1719 pgmUnlock(pVM);
1720 }
1721# endif
1722 /*
1723 * Check if we have a pending update of the CR3 monitoring.
1724 */
1725 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1726 {
1727 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1728 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1729 }
1730 if (fGlobal)
1731 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1732 else
1733 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1734 }
1735
1736 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1737 return rc;
1738}
1739
1740
1741/**
1742 * Performs and schedules necessary updates following a CR3 load or reload when
1743 * using nested or extended paging.
1744 *
1745 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1746 * TLB and triggering a SyncCR3.
1747 *
1748 * This will normally involve mapping the guest PD or nPDPT
1749 *
1750 * @returns VBox status code.
1751 * @retval VINF_SUCCESS.
1752 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1753 * requires a CR3 sync. This can safely be ignored and overridden since
1754 * the FF will be set too then.)
1755 * @param pVCpu VMCPU handle.
1756 * @param cr3 The new cr3.
1757 */
1758VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1759{
1760 PVM pVM = pVCpu->CTX_SUFF(pVM);
1761
1762 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1763
1764 /* We assume we're only called in nested paging mode. */
1765 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1766 Assert(pVM->pgm.s.fMappingsDisabled);
1767 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1768
1769 /*
1770 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1771 */
1772 int rc = VINF_SUCCESS;
1773 RTGCPHYS GCPhysCR3;
1774 switch (pVCpu->pgm.s.enmGuestMode)
1775 {
1776 case PGMMODE_PAE:
1777 case PGMMODE_PAE_NX:
1778 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1779 break;
1780 case PGMMODE_AMD64:
1781 case PGMMODE_AMD64_NX:
1782 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1783 break;
1784 default:
1785 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1786 break;
1787 }
1788 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1789 {
1790 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1791 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1792 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1793 }
1794 return rc;
1795}
1796
1797
1798/**
1799 * Synchronize the paging structures.
1800 *
1801 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1802 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1803 * in several places, most importantly whenever the CR3 is loaded.
1804 *
1805 * @returns VBox status code.
1806 * @param pVCpu VMCPU handle.
1807 * @param cr0 Guest context CR0 register
1808 * @param cr3 Guest context CR3 register
1809 * @param cr4 Guest context CR4 register
1810 * @param fGlobal Including global page directories or not
1811 */
1812VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1813{
1814 PVM pVM = pVCpu->CTX_SUFF(pVM);
1815 int rc;
1816
1817 /*
1818 * The pool may have pending stuff and even require a return to ring-3 to
1819 * clear the whole thing.
1820 */
1821 rc = pgmPoolSyncCR3(pVCpu);
1822 if (rc != VINF_SUCCESS)
1823 return rc;
1824
1825 /*
1826 * We might be called when we shouldn't.
1827 *
1828 * The mode switching will ensure that the PD is resynced
1829 * after every mode switch. So, if we find ourselves here
1830 * when in protected or real mode we can safely disable the
1831 * FF and return immediately.
1832 */
1833 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1834 {
1835 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1836 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1837 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1838 return VINF_SUCCESS;
1839 }
1840
1841 /* If global pages are not supported, then all flushes are global. */
1842 if (!(cr4 & X86_CR4_PGE))
1843 fGlobal = true;
1844 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1845 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1846
1847 /*
1848 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1849 * This should be done before SyncCR3.
1850 */
1851 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1852 {
1853 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1854
1855 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1856 RTGCPHYS GCPhysCR3;
1857 switch (pVCpu->pgm.s.enmGuestMode)
1858 {
1859 case PGMMODE_PAE:
1860 case PGMMODE_PAE_NX:
1861 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1862 break;
1863 case PGMMODE_AMD64:
1864 case PGMMODE_AMD64_NX:
1865 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1866 break;
1867 default:
1868 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1869 break;
1870 }
1871
1872 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1873 {
1874 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1875 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1876 }
1877#ifdef IN_RING3
1878 if (rc == VINF_PGM_SYNC_CR3)
1879 rc = pgmPoolSyncCR3(pVCpu);
1880#else
1881 if (rc == VINF_PGM_SYNC_CR3)
1882 {
1883 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1884 return rc;
1885 }
1886#endif
1887 AssertRCReturn(rc, rc);
1888 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1889 }
1890
1891 /*
1892 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1893 */
1894 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1895 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1896 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1897 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1898 if (rc == VINF_SUCCESS)
1899 {
1900 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1901 {
1902 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1904 }
1905
1906 /*
1907 * Check if we have a pending update of the CR3 monitoring.
1908 */
1909 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1910 {
1911 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1912 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1913 }
1914 }
1915
1916 /*
1917 * Now flush the CR3 (guest context).
1918 */
1919 if (rc == VINF_SUCCESS)
1920 PGM_INVL_VCPU_TLBS(pVCpu);
1921 return rc;
1922}
1923
1924
1925/**
1926 * Called whenever CR0 or CR4 in a way which may change
1927 * the paging mode.
1928 *
1929 * @returns VBox status code, with the following informational code for
1930 * VM scheduling.
1931 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1932 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1933 * (I.e. not in R3.)
1934 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1935 *
1936 * @param pVCpu VMCPU handle.
1937 * @param cr0 The new cr0.
1938 * @param cr4 The new cr4.
1939 * @param efer The new extended feature enable register.
1940 */
1941VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1942{
1943 PVM pVM = pVCpu->CTX_SUFF(pVM);
1944 PGMMODE enmGuestMode;
1945
1946 /*
1947 * Calc the new guest mode.
1948 */
1949 if (!(cr0 & X86_CR0_PE))
1950 enmGuestMode = PGMMODE_REAL;
1951 else if (!(cr0 & X86_CR0_PG))
1952 enmGuestMode = PGMMODE_PROTECTED;
1953 else if (!(cr4 & X86_CR4_PAE))
1954 enmGuestMode = PGMMODE_32_BIT;
1955 else if (!(efer & MSR_K6_EFER_LME))
1956 {
1957 if (!(efer & MSR_K6_EFER_NXE))
1958 enmGuestMode = PGMMODE_PAE;
1959 else
1960 enmGuestMode = PGMMODE_PAE_NX;
1961 }
1962 else
1963 {
1964 if (!(efer & MSR_K6_EFER_NXE))
1965 enmGuestMode = PGMMODE_AMD64;
1966 else
1967 enmGuestMode = PGMMODE_AMD64_NX;
1968 }
1969
1970 /*
1971 * Did it change?
1972 */
1973 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1974 return VINF_SUCCESS;
1975
1976 /* Flush the TLB */
1977 PGM_INVL_VCPU_TLBS(pVCpu);
1978
1979#ifdef IN_RING3
1980 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1981#else
1982 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1983 return VINF_PGM_CHANGE_MODE;
1984#endif
1985}
1986
1987
1988/**
1989 * Gets the current guest paging mode.
1990 *
1991 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1992 *
1993 * @returns The current paging mode.
1994 * @param pVCpu VMCPU handle.
1995 */
1996VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1997{
1998 return pVCpu->pgm.s.enmGuestMode;
1999}
2000
2001
2002/**
2003 * Gets the current shadow paging mode.
2004 *
2005 * @returns The current paging mode.
2006 * @param pVCpu VMCPU handle.
2007 */
2008VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2009{
2010 return pVCpu->pgm.s.enmShadowMode;
2011}
2012
2013/**
2014 * Gets the current host paging mode.
2015 *
2016 * @returns The current paging mode.
2017 * @param pVM The VM handle.
2018 */
2019VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2020{
2021 switch (pVM->pgm.s.enmHostMode)
2022 {
2023 case SUPPAGINGMODE_32_BIT:
2024 case SUPPAGINGMODE_32_BIT_GLOBAL:
2025 return PGMMODE_32_BIT;
2026
2027 case SUPPAGINGMODE_PAE:
2028 case SUPPAGINGMODE_PAE_GLOBAL:
2029 return PGMMODE_PAE;
2030
2031 case SUPPAGINGMODE_PAE_NX:
2032 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2033 return PGMMODE_PAE_NX;
2034
2035 case SUPPAGINGMODE_AMD64:
2036 case SUPPAGINGMODE_AMD64_GLOBAL:
2037 return PGMMODE_AMD64;
2038
2039 case SUPPAGINGMODE_AMD64_NX:
2040 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2041 return PGMMODE_AMD64_NX;
2042
2043 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2044 }
2045
2046 return PGMMODE_INVALID;
2047}
2048
2049
2050/**
2051 * Get mode name.
2052 *
2053 * @returns read-only name string.
2054 * @param enmMode The mode which name is desired.
2055 */
2056VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2057{
2058 switch (enmMode)
2059 {
2060 case PGMMODE_REAL: return "Real";
2061 case PGMMODE_PROTECTED: return "Protected";
2062 case PGMMODE_32_BIT: return "32-bit";
2063 case PGMMODE_PAE: return "PAE";
2064 case PGMMODE_PAE_NX: return "PAE+NX";
2065 case PGMMODE_AMD64: return "AMD64";
2066 case PGMMODE_AMD64_NX: return "AMD64+NX";
2067 case PGMMODE_NESTED: return "Nested";
2068 case PGMMODE_EPT: return "EPT";
2069 default: return "unknown mode value";
2070 }
2071}
2072
2073
2074/**
2075 * Check if any pgm pool pages are marked dirty (not monitored)
2076 *
2077 * @returns bool locked/not locked
2078 * @param pVM The VM to operate on.
2079 */
2080VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2081{
2082 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2083}
2084
2085/**
2086 * Check if the PGM lock is currently taken.
2087 *
2088 * @returns bool locked/not locked
2089 * @param pVM The VM to operate on.
2090 */
2091VMMDECL(bool) PGMIsLocked(PVM pVM)
2092{
2093 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2094}
2095
2096
2097/**
2098 * Check if this VCPU currently owns the PGM lock.
2099 *
2100 * @returns bool owner/not owner
2101 * @param pVM The VM to operate on.
2102 */
2103VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2104{
2105 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2106}
2107
2108
2109/**
2110 * Acquire the PGM lock.
2111 *
2112 * @returns VBox status code
2113 * @param pVM The VM to operate on.
2114 */
2115int pgmLock(PVM pVM)
2116{
2117 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2118#if defined(IN_RC) || defined(IN_RING0)
2119 if (rc == VERR_SEM_BUSY)
2120 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2121#endif
2122 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2123 return rc;
2124}
2125
2126
2127/**
2128 * Release the PGM lock.
2129 *
2130 * @returns VBox status code
2131 * @param pVM The VM to operate on.
2132 */
2133void pgmUnlock(PVM pVM)
2134{
2135 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2136}
2137
2138#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2139
2140/**
2141 * Temporarily maps one guest page specified by GC physical address.
2142 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2143 *
2144 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2145 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2146 *
2147 * @returns VBox status.
2148 * @param pVM VM handle.
2149 * @param GCPhys GC Physical address of the page.
2150 * @param ppv Where to store the address of the mapping.
2151 */
2152VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2153{
2154 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2155
2156 /*
2157 * Get the ram range.
2158 */
2159 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2160 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2161 pRam = pRam->CTX_SUFF(pNext);
2162 if (!pRam)
2163 {
2164 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2165 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2166 }
2167
2168 /*
2169 * Pass it on to PGMDynMapHCPage.
2170 */
2171 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2172 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2173#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2174 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2175#else
2176 PGMDynMapHCPage(pVM, HCPhys, ppv);
2177#endif
2178 return VINF_SUCCESS;
2179}
2180
2181
2182/**
2183 * Temporarily maps one guest page specified by unaligned GC physical address.
2184 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2185 *
2186 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2187 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2188 *
2189 * The caller is aware that only the speicifed page is mapped and that really bad things
2190 * will happen if writing beyond the page!
2191 *
2192 * @returns VBox status.
2193 * @param pVM VM handle.
2194 * @param GCPhys GC Physical address within the page to be mapped.
2195 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2196 */
2197VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2198{
2199 /*
2200 * Get the ram range.
2201 */
2202 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2203 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2204 pRam = pRam->CTX_SUFF(pNext);
2205 if (!pRam)
2206 {
2207 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2208 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2209 }
2210
2211 /*
2212 * Pass it on to PGMDynMapHCPage.
2213 */
2214 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2215#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2216 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2217#else
2218 PGMDynMapHCPage(pVM, HCPhys, ppv);
2219#endif
2220 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2221 return VINF_SUCCESS;
2222}
2223
2224# ifdef IN_RC
2225
2226/**
2227 * Temporarily maps one host page specified by HC physical address.
2228 *
2229 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2230 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2231 *
2232 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2233 * @param pVM VM handle.
2234 * @param HCPhys HC Physical address of the page.
2235 * @param ppv Where to store the address of the mapping. This is the
2236 * address of the PAGE not the exact address corresponding
2237 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2238 * page offset.
2239 */
2240VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2241{
2242 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2243
2244 /*
2245 * Check the cache.
2246 */
2247 register unsigned iCache;
2248 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2249 {
2250 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2251 {
2252 { 0, 9, 10, 11, 12, 13, 14, 15},
2253 { 0, 1, 10, 11, 12, 13, 14, 15},
2254 { 0, 1, 2, 11, 12, 13, 14, 15},
2255 { 0, 1, 2, 3, 12, 13, 14, 15},
2256 { 0, 1, 2, 3, 4, 13, 14, 15},
2257 { 0, 1, 2, 3, 4, 5, 14, 15},
2258 { 0, 1, 2, 3, 4, 5, 6, 15},
2259 { 0, 1, 2, 3, 4, 5, 6, 7},
2260 { 8, 1, 2, 3, 4, 5, 6, 7},
2261 { 8, 9, 2, 3, 4, 5, 6, 7},
2262 { 8, 9, 10, 3, 4, 5, 6, 7},
2263 { 8, 9, 10, 11, 4, 5, 6, 7},
2264 { 8, 9, 10, 11, 12, 5, 6, 7},
2265 { 8, 9, 10, 11, 12, 13, 6, 7},
2266 { 8, 9, 10, 11, 12, 13, 14, 7},
2267 { 8, 9, 10, 11, 12, 13, 14, 15},
2268 };
2269 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2270 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2271
2272 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2273 {
2274 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2275
2276 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2277 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2278 {
2279 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2280 *ppv = pv;
2281 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2282 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2283 return VINF_SUCCESS;
2284 }
2285 LogFlow(("Out of sync entry %d\n", iPage));
2286 }
2287 }
2288 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2289 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2290 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2291
2292 /*
2293 * Update the page tables.
2294 */
2295 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2296 unsigned i;
2297 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2298 {
2299 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2300 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2301 break;
2302 iPage++;
2303 }
2304 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2305
2306 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2307 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2308 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2309 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2310
2311 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2312 *ppv = pv;
2313 ASMInvalidatePage(pv);
2314 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/**
2320 * Temporarily lock a dynamic page to prevent it from being reused.
2321 *
2322 * @param pVM VM handle.
2323 * @param GCPage GC address of page
2324 */
2325VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2326{
2327 unsigned iPage;
2328
2329 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2330 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2331 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2332 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2333}
2334
2335
2336/**
2337 * Unlock a dynamic page
2338 *
2339 * @param pVM VM handle.
2340 * @param GCPage GC address of page
2341 */
2342VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2343{
2344 unsigned iPage;
2345
2346 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2347 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2348
2349 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2350 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2351 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2352 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2353 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2354}
2355
2356
2357# ifdef VBOX_STRICT
2358/**
2359 * Check for lock leaks.
2360 *
2361 * @param pVM VM handle.
2362 */
2363VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2364{
2365 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2366 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2367}
2368# endif /* VBOX_STRICT */
2369
2370# endif /* IN_RC */
2371#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2372
2373#if !defined(IN_R0) || defined(LOG_ENABLED)
2374
2375/** Format handler for PGMPAGE.
2376 * @copydoc FNRTSTRFORMATTYPE */
2377static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2378 const char *pszType, void const *pvValue,
2379 int cchWidth, int cchPrecision, unsigned fFlags,
2380 void *pvUser)
2381{
2382 size_t cch;
2383 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2384 if (VALID_PTR(pPage))
2385 {
2386 char szTmp[64+80];
2387
2388 cch = 0;
2389
2390 /* The single char state stuff. */
2391 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2392 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2393
2394#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2395 if (IS_PART_INCLUDED(5))
2396 {
2397 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2398 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2399 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2400 }
2401
2402 /* The type. */
2403 if (IS_PART_INCLUDED(4))
2404 {
2405 szTmp[cch++] = ':';
2406 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2407 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2408 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2409 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2410 }
2411
2412 /* The numbers. */
2413 if (IS_PART_INCLUDED(3))
2414 {
2415 szTmp[cch++] = ':';
2416 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2417 }
2418
2419 if (IS_PART_INCLUDED(2))
2420 {
2421 szTmp[cch++] = ':';
2422 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2423 }
2424
2425 if (IS_PART_INCLUDED(6))
2426 {
2427 szTmp[cch++] = ':';
2428 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2429 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2430 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2431 }
2432#undef IS_PART_INCLUDED
2433
2434 cch = pfnOutput(pvArgOutput, szTmp, cch);
2435 }
2436 else
2437 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2438 return cch;
2439}
2440
2441
2442/** Format handler for PGMRAMRANGE.
2443 * @copydoc FNRTSTRFORMATTYPE */
2444static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2445 const char *pszType, void const *pvValue,
2446 int cchWidth, int cchPrecision, unsigned fFlags,
2447 void *pvUser)
2448{
2449 size_t cch;
2450 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2451 if (VALID_PTR(pRam))
2452 {
2453 char szTmp[80];
2454 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2455 cch = pfnOutput(pvArgOutput, szTmp, cch);
2456 }
2457 else
2458 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2459 return cch;
2460}
2461
2462/** Format type andlers to be registered/deregistered. */
2463static const struct
2464{
2465 char szType[24];
2466 PFNRTSTRFORMATTYPE pfnHandler;
2467} g_aPgmFormatTypes[] =
2468{
2469 { "pgmpage", pgmFormatTypeHandlerPage },
2470 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2471};
2472
2473#endif /* !IN_R0 || LOG_ENABLED */
2474
2475
2476/**
2477 * Registers the global string format types.
2478 *
2479 * This should be called at module load time or in some other manner that ensure
2480 * that it's called exactly one time.
2481 *
2482 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2483 */
2484VMMDECL(int) PGMRegisterStringFormatTypes(void)
2485{
2486#if !defined(IN_R0) || defined(LOG_ENABLED)
2487 int rc = VINF_SUCCESS;
2488 unsigned i;
2489 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2490 {
2491 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2492# ifdef IN_RING0
2493 if (rc == VERR_ALREADY_EXISTS)
2494 {
2495 /* in case of cleanup failure in ring-0 */
2496 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2497 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2498 }
2499# endif
2500 }
2501 if (RT_FAILURE(rc))
2502 while (i-- > 0)
2503 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2504
2505 return rc;
2506#else
2507 return VINF_SUCCESS;
2508#endif
2509}
2510
2511
2512/**
2513 * Deregisters the global string format types.
2514 *
2515 * This should be called at module unload time or in some other manner that
2516 * ensure that it's called exactly one time.
2517 */
2518VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2519{
2520#if !defined(IN_R0) || defined(LOG_ENABLED)
2521 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2522 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2523#endif
2524}
2525
2526#ifdef VBOX_STRICT
2527
2528/**
2529 * Asserts that there are no mapping conflicts.
2530 *
2531 * @returns Number of conflicts.
2532 * @param pVM The VM Handle.
2533 */
2534VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2535{
2536 unsigned cErrors = 0;
2537
2538 /* Only applies to raw mode -> 1 VPCU */
2539 Assert(pVM->cCpus == 1);
2540 PVMCPU pVCpu = &pVM->aCpus[0];
2541
2542 /*
2543 * Check for mapping conflicts.
2544 */
2545 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2546 pMapping;
2547 pMapping = pMapping->CTX_SUFF(pNext))
2548 {
2549 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2550 for (RTGCPTR GCPtr = pMapping->GCPtr;
2551 GCPtr <= pMapping->GCPtrLast;
2552 GCPtr += PAGE_SIZE)
2553 {
2554 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2555 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2556 {
2557 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2558 cErrors++;
2559 break;
2560 }
2561 }
2562 }
2563
2564 return cErrors;
2565}
2566
2567
2568/**
2569 * Asserts that everything related to the guest CR3 is correctly shadowed.
2570 *
2571 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2572 * and assert the correctness of the guest CR3 mapping before asserting that the
2573 * shadow page tables is in sync with the guest page tables.
2574 *
2575 * @returns Number of conflicts.
2576 * @param pVM The VM Handle.
2577 * @param pVCpu VMCPU handle.
2578 * @param cr3 The current guest CR3 register value.
2579 * @param cr4 The current guest CR4 register value.
2580 */
2581VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2582{
2583 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2584 pgmLock(pVM);
2585 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2586 pgmUnlock(pVM);
2587 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2588 return cErrors;
2589}
2590
2591#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette