VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 41451

Last change on this file since 41451 was 41391, checked in by vboxsync, 13 years ago

PGM: A quick stab at correct A20 gate masking (new code is disabled).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 86.6 KB
Line 
1/* $Id: PGMAll.cpp 41391 2012-05-22 14:06:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/sup.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
35#include <VBox/vmm/em.h>
36#include <VBox/vmm/hwaccm.h>
37#include <VBox/vmm/hwacc_vmx.h>
38#include "PGMInternal.h"
39#include <VBox/vmm/vm.h>
40#include "PGMInline.h"
41#include <iprt/assert.h>
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The VMCPU handle. */
61 PVMCPU pVCpu;
62 /** The todo flags. */
63 RTUINT fTodo;
64 /** The CR4 register value. */
65 uint32_t cr4;
66} PGMHVUSTATE, *PPGMHVUSTATE;
67
68
69/*******************************************************************************
70* Internal Functions *
71*******************************************************************************/
72DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
74#ifndef IN_RC
75static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
76static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
77#endif
78
79
80/*
81 * Shadow - 32-bit mode
82 */
83#define PGM_SHW_TYPE PGM_TYPE_32BIT
84#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
85#include "PGMAllShw.h"
86
87/* Guest - real mode */
88#define PGM_GST_TYPE PGM_TYPE_REAL
89#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
92#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
93#include "PGMGstDefs.h"
94#include "PGMAllGst.h"
95#include "PGMAllBth.h"
96#undef BTH_PGMPOOLKIND_PT_FOR_PT
97#undef BTH_PGMPOOLKIND_ROOT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - protected mode */
103#define PGM_GST_TYPE PGM_TYPE_PROT
104#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
107#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
108#include "PGMGstDefs.h"
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
112#undef BTH_PGMPOOLKIND_ROOT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117/* Guest - 32-bit mode */
118#define PGM_GST_TYPE PGM_TYPE_32BIT
119#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
120#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
121#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
122#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
123#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
124#include "PGMGstDefs.h"
125#include "PGMAllGst.h"
126#include "PGMAllBth.h"
127#undef BTH_PGMPOOLKIND_PT_FOR_BIG
128#undef BTH_PGMPOOLKIND_PT_FOR_PT
129#undef BTH_PGMPOOLKIND_ROOT
130#undef PGM_BTH_NAME
131#undef PGM_GST_TYPE
132#undef PGM_GST_NAME
133
134#undef PGM_SHW_TYPE
135#undef PGM_SHW_NAME
136
137
138/*
139 * Shadow - PAE mode
140 */
141#define PGM_SHW_TYPE PGM_TYPE_PAE
142#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
144#include "PGMAllShw.h"
145
146/* Guest - real mode */
147#define PGM_GST_TYPE PGM_TYPE_REAL
148#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
149#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
150#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
151#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
152#include "PGMGstDefs.h"
153#include "PGMAllBth.h"
154#undef BTH_PGMPOOLKIND_PT_FOR_PT
155#undef BTH_PGMPOOLKIND_ROOT
156#undef PGM_BTH_NAME
157#undef PGM_GST_TYPE
158#undef PGM_GST_NAME
159
160/* Guest - protected mode */
161#define PGM_GST_TYPE PGM_TYPE_PROT
162#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
163#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
164#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
165#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
166#include "PGMGstDefs.h"
167#include "PGMAllBth.h"
168#undef BTH_PGMPOOLKIND_PT_FOR_PT
169#undef BTH_PGMPOOLKIND_ROOT
170#undef PGM_BTH_NAME
171#undef PGM_GST_TYPE
172#undef PGM_GST_NAME
173
174/* Guest - 32-bit mode */
175#define PGM_GST_TYPE PGM_TYPE_32BIT
176#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
177#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
178#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
179#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
180#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
181#include "PGMGstDefs.h"
182#include "PGMAllBth.h"
183#undef BTH_PGMPOOLKIND_PT_FOR_BIG
184#undef BTH_PGMPOOLKIND_PT_FOR_PT
185#undef BTH_PGMPOOLKIND_ROOT
186#undef PGM_BTH_NAME
187#undef PGM_GST_TYPE
188#undef PGM_GST_NAME
189
190
191/* Guest - PAE mode */
192#define PGM_GST_TYPE PGM_TYPE_PAE
193#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
196#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
197#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
198#include "PGMGstDefs.h"
199#include "PGMAllGst.h"
200#include "PGMAllBth.h"
201#undef BTH_PGMPOOLKIND_PT_FOR_BIG
202#undef BTH_PGMPOOLKIND_PT_FOR_PT
203#undef BTH_PGMPOOLKIND_ROOT
204#undef PGM_BTH_NAME
205#undef PGM_GST_TYPE
206#undef PGM_GST_NAME
207
208#undef PGM_SHW_TYPE
209#undef PGM_SHW_NAME
210
211
212#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
213/*
214 * Shadow - AMD64 mode
215 */
216# define PGM_SHW_TYPE PGM_TYPE_AMD64
217# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
218# include "PGMAllShw.h"
219
220/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
221# define PGM_GST_TYPE PGM_TYPE_PROT
222# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
223# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
224# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
225# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
226# include "PGMGstDefs.h"
227# include "PGMAllBth.h"
228# undef BTH_PGMPOOLKIND_PT_FOR_PT
229# undef BTH_PGMPOOLKIND_ROOT
230# undef PGM_BTH_NAME
231# undef PGM_GST_TYPE
232# undef PGM_GST_NAME
233
234# ifdef VBOX_WITH_64_BITS_GUESTS
235/* Guest - AMD64 mode */
236# define PGM_GST_TYPE PGM_TYPE_AMD64
237# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
238# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
239# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
240# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
241# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
242# include "PGMGstDefs.h"
243# include "PGMAllGst.h"
244# include "PGMAllBth.h"
245# undef BTH_PGMPOOLKIND_PT_FOR_BIG
246# undef BTH_PGMPOOLKIND_PT_FOR_PT
247# undef BTH_PGMPOOLKIND_ROOT
248# undef PGM_BTH_NAME
249# undef PGM_GST_TYPE
250# undef PGM_GST_NAME
251# endif /* VBOX_WITH_64_BITS_GUESTS */
252
253# undef PGM_SHW_TYPE
254# undef PGM_SHW_NAME
255
256
257/*
258 * Shadow - Nested paging mode
259 */
260# define PGM_SHW_TYPE PGM_TYPE_NESTED
261# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
262# include "PGMAllShw.h"
263
264/* Guest - real mode */
265# define PGM_GST_TYPE PGM_TYPE_REAL
266# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
267# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
268# include "PGMGstDefs.h"
269# include "PGMAllBth.h"
270# undef PGM_BTH_NAME
271# undef PGM_GST_TYPE
272# undef PGM_GST_NAME
273
274/* Guest - protected mode */
275# define PGM_GST_TYPE PGM_TYPE_PROT
276# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
277# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
278# include "PGMGstDefs.h"
279# include "PGMAllBth.h"
280# undef PGM_BTH_NAME
281# undef PGM_GST_TYPE
282# undef PGM_GST_NAME
283
284/* Guest - 32-bit mode */
285# define PGM_GST_TYPE PGM_TYPE_32BIT
286# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
287# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
288# include "PGMGstDefs.h"
289# include "PGMAllBth.h"
290# undef PGM_BTH_NAME
291# undef PGM_GST_TYPE
292# undef PGM_GST_NAME
293
294/* Guest - PAE mode */
295# define PGM_GST_TYPE PGM_TYPE_PAE
296# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
297# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
298# include "PGMGstDefs.h"
299# include "PGMAllBth.h"
300# undef PGM_BTH_NAME
301# undef PGM_GST_TYPE
302# undef PGM_GST_NAME
303
304# ifdef VBOX_WITH_64_BITS_GUESTS
305/* Guest - AMD64 mode */
306# define PGM_GST_TYPE PGM_TYPE_AMD64
307# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
308# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
309# include "PGMGstDefs.h"
310# include "PGMAllBth.h"
311# undef PGM_BTH_NAME
312# undef PGM_GST_TYPE
313# undef PGM_GST_NAME
314# endif /* VBOX_WITH_64_BITS_GUESTS */
315
316# undef PGM_SHW_TYPE
317# undef PGM_SHW_NAME
318
319
320/*
321 * Shadow - EPT
322 */
323# define PGM_SHW_TYPE PGM_TYPE_EPT
324# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
325# include "PGMAllShw.h"
326
327/* Guest - real mode */
328# define PGM_GST_TYPE PGM_TYPE_REAL
329# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
330# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
331# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
332# include "PGMGstDefs.h"
333# include "PGMAllBth.h"
334# undef BTH_PGMPOOLKIND_PT_FOR_PT
335# undef PGM_BTH_NAME
336# undef PGM_GST_TYPE
337# undef PGM_GST_NAME
338
339/* Guest - protected mode */
340# define PGM_GST_TYPE PGM_TYPE_PROT
341# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
342# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
343# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
344# include "PGMGstDefs.h"
345# include "PGMAllBth.h"
346# undef BTH_PGMPOOLKIND_PT_FOR_PT
347# undef PGM_BTH_NAME
348# undef PGM_GST_TYPE
349# undef PGM_GST_NAME
350
351/* Guest - 32-bit mode */
352# define PGM_GST_TYPE PGM_TYPE_32BIT
353# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
354# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
355# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
356# include "PGMGstDefs.h"
357# include "PGMAllBth.h"
358# undef BTH_PGMPOOLKIND_PT_FOR_PT
359# undef PGM_BTH_NAME
360# undef PGM_GST_TYPE
361# undef PGM_GST_NAME
362
363/* Guest - PAE mode */
364# define PGM_GST_TYPE PGM_TYPE_PAE
365# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
366# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
367# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
368# include "PGMGstDefs.h"
369# include "PGMAllBth.h"
370# undef BTH_PGMPOOLKIND_PT_FOR_PT
371# undef PGM_BTH_NAME
372# undef PGM_GST_TYPE
373# undef PGM_GST_NAME
374
375# ifdef VBOX_WITH_64_BITS_GUESTS
376/* Guest - AMD64 mode */
377# define PGM_GST_TYPE PGM_TYPE_AMD64
378# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
379# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
380# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
381# include "PGMGstDefs.h"
382# include "PGMAllBth.h"
383# undef BTH_PGMPOOLKIND_PT_FOR_PT
384# undef PGM_BTH_NAME
385# undef PGM_GST_TYPE
386# undef PGM_GST_NAME
387# endif /* VBOX_WITH_64_BITS_GUESTS */
388
389# undef PGM_SHW_TYPE
390# undef PGM_SHW_NAME
391
392#endif /* !IN_RC */
393
394
395#ifndef IN_RING3
396/**
397 * #PF Handler.
398 *
399 * @returns VBox status code (appropriate for trap handling and GC return).
400 * @param pVCpu VMCPU handle.
401 * @param uErr The trap error code.
402 * @param pRegFrame Trap register frame.
403 * @param pvFault The fault address.
404 */
405VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
406{
407 PVM pVM = pVCpu->CTX_SUFF(pVM);
408
409 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
410 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
411 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
412
413
414#ifdef VBOX_WITH_STATISTICS
415 /*
416 * Error code stats.
417 */
418 if (uErr & X86_TRAP_PF_US)
419 {
420 if (!(uErr & X86_TRAP_PF_P))
421 {
422 if (uErr & X86_TRAP_PF_RW)
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
424 else
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
426 }
427 else if (uErr & X86_TRAP_PF_RW)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
429 else if (uErr & X86_TRAP_PF_RSVD)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
431 else if (uErr & X86_TRAP_PF_ID)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
433 else
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
435 }
436 else
437 { /* Supervisor */
438 if (!(uErr & X86_TRAP_PF_P))
439 {
440 if (uErr & X86_TRAP_PF_RW)
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
442 else
443 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
444 }
445 else if (uErr & X86_TRAP_PF_RW)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
447 else if (uErr & X86_TRAP_PF_ID)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
449 else if (uErr & X86_TRAP_PF_RSVD)
450 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
451 }
452#endif /* VBOX_WITH_STATISTICS */
453
454 /*
455 * Call the worker.
456 */
457 bool fLockTaken = false;
458 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
459 if (fLockTaken)
460 {
461 PGM_LOCK_ASSERT_OWNER(pVM);
462 pgmUnlock(pVM);
463 }
464 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
465
466 /*
467 * Return code tweaks.
468 */
469 if (rc != VINF_SUCCESS)
470 {
471 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
472 rc = VINF_SUCCESS;
473
474# ifdef IN_RING0
475 /* Note: hack alert for difficult to reproduce problem. */
476 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
477 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
478 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
479 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
480 {
481 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
482 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
483 rc = VINF_SUCCESS;
484 }
485# endif
486 }
487
488 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
489 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
490 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
491 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
492 return rc;
493}
494#endif /* !IN_RING3 */
495
496
497/**
498 * Prefetch a page
499 *
500 * Typically used to sync commonly used pages before entering raw mode
501 * after a CR3 reload.
502 *
503 * @returns VBox status code suitable for scheduling.
504 * @retval VINF_SUCCESS on success.
505 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
506 * @param pVCpu VMCPU handle.
507 * @param GCPtrPage Page to invalidate.
508 */
509VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
510{
511 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
512 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
513 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
514 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
515 return rc;
516}
517
518
519/**
520 * Gets the mapping corresponding to the specified address (if any).
521 *
522 * @returns Pointer to the mapping.
523 * @returns NULL if not
524 *
525 * @param pVM The VM handle.
526 * @param GCPtr The guest context pointer.
527 */
528PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
529{
530 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
531 while (pMapping)
532 {
533 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
534 break;
535 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
536 return pMapping;
537 pMapping = pMapping->CTX_SUFF(pNext);
538 }
539 return NULL;
540}
541
542
543/**
544 * Verifies a range of pages for read or write access
545 *
546 * Only checks the guest's page tables
547 *
548 * @returns VBox status code.
549 * @param pVCpu VMCPU handle.
550 * @param Addr Guest virtual address to check
551 * @param cbSize Access size
552 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
553 * @remarks Current not in use.
554 */
555VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
556{
557 /*
558 * Validate input.
559 */
560 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
561 {
562 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
563 return VERR_INVALID_PARAMETER;
564 }
565
566 uint64_t fPage;
567 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
568 if (RT_FAILURE(rc))
569 {
570 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
571 return VINF_EM_RAW_GUEST_TRAP;
572 }
573
574 /*
575 * Check if the access would cause a page fault
576 *
577 * Note that hypervisor page directories are not present in the guest's tables, so this check
578 * is sufficient.
579 */
580 bool fWrite = !!(fAccess & X86_PTE_RW);
581 bool fUser = !!(fAccess & X86_PTE_US);
582 if ( !(fPage & X86_PTE_P)
583 || (fWrite && !(fPage & X86_PTE_RW))
584 || (fUser && !(fPage & X86_PTE_US)) )
585 {
586 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
587 return VINF_EM_RAW_GUEST_TRAP;
588 }
589 if ( RT_SUCCESS(rc)
590 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
591 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
592 return rc;
593}
594
595
596/**
597 * Verifies a range of pages for read or write access
598 *
599 * Supports handling of pages marked for dirty bit tracking and CSAM
600 *
601 * @returns VBox status code.
602 * @param pVCpu VMCPU handle.
603 * @param Addr Guest virtual address to check
604 * @param cbSize Access size
605 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
606 */
607VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
608{
609 PVM pVM = pVCpu->CTX_SUFF(pVM);
610
611 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
612
613 /*
614 * Get going.
615 */
616 uint64_t fPageGst;
617 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
618 if (RT_FAILURE(rc))
619 {
620 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
621 return VINF_EM_RAW_GUEST_TRAP;
622 }
623
624 /*
625 * Check if the access would cause a page fault
626 *
627 * Note that hypervisor page directories are not present in the guest's tables, so this check
628 * is sufficient.
629 */
630 const bool fWrite = !!(fAccess & X86_PTE_RW);
631 const bool fUser = !!(fAccess & X86_PTE_US);
632 if ( !(fPageGst & X86_PTE_P)
633 || (fWrite && !(fPageGst & X86_PTE_RW))
634 || (fUser && !(fPageGst & X86_PTE_US)) )
635 {
636 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
637 return VINF_EM_RAW_GUEST_TRAP;
638 }
639
640 if (!pVM->pgm.s.fNestedPaging)
641 {
642 /*
643 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
644 */
645 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
646 if ( rc == VERR_PAGE_NOT_PRESENT
647 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
648 {
649 /*
650 * Page is not present in our page tables.
651 * Try to sync it!
652 */
653 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
654 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
655 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
656 if (rc != VINF_SUCCESS)
657 return rc;
658 }
659 else
660 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
661 }
662
663#if 0 /* def VBOX_STRICT; triggers too often now */
664 /*
665 * This check is a bit paranoid, but useful.
666 */
667 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
668 uint64_t fPageShw;
669 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
670 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
671 || (fWrite && !(fPageShw & X86_PTE_RW))
672 || (fUser && !(fPageShw & X86_PTE_US)) )
673 {
674 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
675 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
676 return VINF_EM_RAW_GUEST_TRAP;
677 }
678#endif
679
680 if ( RT_SUCCESS(rc)
681 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
682 || Addr + cbSize < Addr))
683 {
684 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
685 for (;;)
686 {
687 Addr += PAGE_SIZE;
688 if (cbSize > PAGE_SIZE)
689 cbSize -= PAGE_SIZE;
690 else
691 cbSize = 1;
692 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
693 if (rc != VINF_SUCCESS)
694 break;
695 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
696 break;
697 }
698 }
699 return rc;
700}
701
702
703/**
704 * Emulation of the invlpg instruction (HC only actually).
705 *
706 * @returns Strict VBox status code, special care required.
707 * @retval VINF_PGM_SYNC_CR3 - handled.
708 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
709 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
710 *
711 * @param pVCpu VMCPU handle.
712 * @param GCPtrPage Page to invalidate.
713 *
714 * @remark ASSUMES the page table entry or page directory is valid. Fairly
715 * safe, but there could be edge cases!
716 *
717 * @todo Flush page or page directory only if necessary!
718 * @todo VBOXSTRICTRC
719 */
720VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
721{
722 PVM pVM = pVCpu->CTX_SUFF(pVM);
723 int rc;
724 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
725
726#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
727 /*
728 * Notify the recompiler so it can record this instruction.
729 */
730 REMNotifyInvalidatePage(pVM, GCPtrPage);
731#endif /* !IN_RING3 */
732
733
734#ifdef IN_RC
735 /*
736 * Check for conflicts and pending CR3 monitoring updates.
737 */
738 if (pgmMapAreMappingsFloating(pVM))
739 {
740 if ( pgmGetMapping(pVM, GCPtrPage)
741 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
742 {
743 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
744 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
745 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
746 return VINF_PGM_SYNC_CR3;
747 }
748
749 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
750 {
751 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
752 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
753 return VINF_EM_RAW_EMULATE_INSTR;
754 }
755 }
756#endif /* IN_RC */
757
758 /*
759 * Call paging mode specific worker.
760 */
761 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
762 pgmLock(pVM);
763 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
764 pgmUnlock(pVM);
765 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
766
767#ifdef IN_RING3
768 /*
769 * Check if we have a pending update of the CR3 monitoring.
770 */
771 if ( RT_SUCCESS(rc)
772 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
773 {
774 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
775 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
776 }
777
778 /*
779 * Inform CSAM about the flush
780 *
781 * Note: This is to check if monitored pages have been changed; when we implement
782 * callbacks for virtual handlers, this is no longer required.
783 */
784 CSAMR3FlushPage(pVM, GCPtrPage);
785#endif /* IN_RING3 */
786
787 /* Ignore all irrelevant error codes. */
788 if ( rc == VERR_PAGE_NOT_PRESENT
789 || rc == VERR_PAGE_TABLE_NOT_PRESENT
790 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
791 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
792 rc = VINF_SUCCESS;
793
794 return rc;
795}
796
797
798/**
799 * Executes an instruction using the interpreter.
800 *
801 * @returns VBox status code (appropriate for trap handling and GC return).
802 * @param pVM The VM handle.
803 * @param pVCpu VMCPU handle.
804 * @param pRegFrame Register frame.
805 * @param pvFault Fault address.
806 */
807VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
808{
809 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
810 if (rc == VERR_EM_INTERPRETER)
811 rc = VINF_EM_RAW_EMULATE_INSTR;
812 if (rc != VINF_SUCCESS)
813 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
814 return rc;
815}
816
817
818/**
819 * Gets effective page information (from the VMM page directory).
820 *
821 * @returns VBox status.
822 * @param pVCpu VMCPU handle.
823 * @param GCPtr Guest Context virtual address of the page.
824 * @param pfFlags Where to store the flags. These are X86_PTE_*.
825 * @param pHCPhys Where to store the HC physical address of the page.
826 * This is page aligned.
827 * @remark You should use PGMMapGetPage() for pages in a mapping.
828 */
829VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
830{
831 pgmLock(pVCpu->CTX_SUFF(pVM));
832 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
833 pgmUnlock(pVCpu->CTX_SUFF(pVM));
834 return rc;
835}
836
837
838/**
839 * Modify page flags for a range of pages in the shadow context.
840 *
841 * The existing flags are ANDed with the fMask and ORed with the fFlags.
842 *
843 * @returns VBox status code.
844 * @param pVCpu VMCPU handle.
845 * @param GCPtr Virtual address of the first page in the range.
846 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
847 * @param fMask The AND mask - page flags X86_PTE_*.
848 * Be very CAREFUL when ~'ing constants which could be 32-bit!
849 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
850 * @remark You must use PGMMapModifyPage() for pages in a mapping.
851 */
852DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
853{
854 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
855 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
856
857 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
858
859 PVM pVM = pVCpu->CTX_SUFF(pVM);
860 pgmLock(pVM);
861 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
862 pgmUnlock(pVM);
863 return rc;
864}
865
866
867/**
868 * Changing the page flags for a single page in the shadow page tables so as to
869 * make it read-only.
870 *
871 * @returns VBox status code.
872 * @param pVCpu VMCPU handle.
873 * @param GCPtr Virtual address of the first page in the range.
874 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
875 */
876VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
877{
878 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
879}
880
881
882/**
883 * Changing the page flags for a single page in the shadow page tables so as to
884 * make it writable.
885 *
886 * The call must know with 101% certainty that the guest page tables maps this
887 * as writable too. This function will deal shared, zero and write monitored
888 * pages.
889 *
890 * @returns VBox status code.
891 * @param pVCpu VMCPU handle.
892 * @param GCPtr Virtual address of the first page in the range.
893 * @param fMmio2 Set if it is an MMIO2 page.
894 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
895 */
896VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
897{
898 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
899}
900
901
902/**
903 * Changing the page flags for a single page in the shadow page tables so as to
904 * make it not present.
905 *
906 * @returns VBox status code.
907 * @param pVCpu VMCPU handle.
908 * @param GCPtr Virtual address of the first page in the range.
909 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
910 */
911VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
912{
913 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
914}
915
916
917/**
918 * Gets the shadow page directory for the specified address, PAE.
919 *
920 * @returns Pointer to the shadow PD.
921 * @param pVCpu The VMCPU handle.
922 * @param GCPtr The address.
923 * @param uGstPdpe Guest PDPT entry. Valid.
924 * @param ppPD Receives address of page directory
925 */
926int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
927{
928 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
929 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
930 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
931 PVM pVM = pVCpu->CTX_SUFF(pVM);
932 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
933 PPGMPOOLPAGE pShwPage;
934 int rc;
935
936 PGM_LOCK_ASSERT_OWNER(pVM);
937
938 /* Allocate page directory if not present. */
939 if ( !pPdpe->n.u1Present
940 && !(pPdpe->u & X86_PDPE_PG_MASK))
941 {
942 RTGCPTR64 GCPdPt;
943 PGMPOOLKIND enmKind;
944
945 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
946 {
947 /* AMD-V nested paging or real/protected mode without paging. */
948 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
949 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
950 }
951 else
952 {
953 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
954 {
955 if (!(uGstPdpe & X86_PDPE_P))
956 {
957 /* PD not present; guest must reload CR3 to change it.
958 * No need to monitor anything in this case.
959 */
960 Assert(!HWACCMIsEnabled(pVM));
961
962 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
963 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
964 uGstPdpe |= X86_PDPE_P;
965 }
966 else
967 {
968 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
969 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
970 }
971 }
972 else
973 {
974 GCPdPt = CPUMGetGuestCR3(pVCpu);
975 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
976 }
977 }
978
979 /* Create a reference back to the PDPT by using the index in its shadow page. */
980 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
981 AssertRCReturn(rc, rc);
982
983 /* The PD was cached or created; hook it up now. */
984 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
985
986# if defined(IN_RC)
987 /*
988 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
989 * PDPT entry; the CPU fetches them only during cr3 load, so any
990 * non-present PDPT will continue to cause page faults.
991 */
992 ASMReloadCR3();
993# endif
994 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
995 }
996 else
997 {
998 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
999 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1000 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1001
1002 pgmPoolCacheUsed(pPool, pShwPage);
1003 }
1004 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1005 return VINF_SUCCESS;
1006}
1007
1008
1009/**
1010 * Gets the pointer to the shadow page directory entry for an address, PAE.
1011 *
1012 * @returns Pointer to the PDE.
1013 * @param pVCpu The current CPU.
1014 * @param GCPtr The address.
1015 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1016 */
1017DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1018{
1019 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1020 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1021 PVM pVM = pVCpu->CTX_SUFF(pVM);
1022
1023 PGM_LOCK_ASSERT_OWNER(pVM);
1024
1025 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1026 if (!pPdpt->a[iPdPt].n.u1Present)
1027 {
1028 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1029 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1030 }
1031 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1032
1033 /* Fetch the pgm pool shadow descriptor. */
1034 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1035 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1036
1037 *ppShwPde = pShwPde;
1038 return VINF_SUCCESS;
1039}
1040
1041#ifndef IN_RC
1042
1043/**
1044 * Syncs the SHADOW page directory pointer for the specified address.
1045 *
1046 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1047 *
1048 * The caller is responsible for making sure the guest has a valid PD before
1049 * calling this function.
1050 *
1051 * @returns VBox status.
1052 * @param pVCpu VMCPU handle.
1053 * @param GCPtr The address.
1054 * @param uGstPml4e Guest PML4 entry (valid).
1055 * @param uGstPdpe Guest PDPT entry (valid).
1056 * @param ppPD Receives address of page directory
1057 */
1058static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1059{
1060 PVM pVM = pVCpu->CTX_SUFF(pVM);
1061 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1062 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1063 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1064 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1065 PPGMPOOLPAGE pShwPage;
1066 int rc;
1067
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069
1070 /* Allocate page directory pointer table if not present. */
1071 if ( !pPml4e->n.u1Present
1072 && !(pPml4e->u & X86_PML4E_PG_MASK))
1073 {
1074 RTGCPTR64 GCPml4;
1075 PGMPOOLKIND enmKind;
1076
1077 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1078
1079 if (fNestedPagingOrNoGstPaging)
1080 {
1081 /* AMD-V nested paging or real/protected mode without paging */
1082 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1083 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1084 }
1085 else
1086 {
1087 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1088 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1089 }
1090
1091 /* Create a reference back to the PDPT by using the index in its shadow page. */
1092 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1098 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1099
1100 pgmPoolCacheUsed(pPool, pShwPage);
1101 }
1102 /* The PDPT was cached or created; hook it up now. */
1103 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1104
1105 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1106 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1107 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1108
1109 /* Allocate page directory if not present. */
1110 if ( !pPdpe->n.u1Present
1111 && !(pPdpe->u & X86_PDPE_PG_MASK))
1112 {
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPagingOrNoGstPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1125 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1126 }
1127
1128 /* Create a reference back to the PDPT by using the index in its shadow page. */
1129 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1130 AssertRCReturn(rc, rc);
1131 }
1132 else
1133 {
1134 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1135 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1136
1137 pgmPoolCacheUsed(pPool, pShwPage);
1138 }
1139 /* The PD was cached or created; hook it up now. */
1140 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1141
1142 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1143 return VINF_SUCCESS;
1144}
1145
1146
1147/**
1148 * Gets the SHADOW page directory pointer for the specified address (long mode).
1149 *
1150 * @returns VBox status.
1151 * @param pVCpu VMCPU handle.
1152 * @param GCPtr The address.
1153 * @param ppPdpt Receives address of pdpt
1154 * @param ppPD Receives address of page directory
1155 */
1156DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1157{
1158 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1159 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1160
1161 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1162
1163 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1164 if (ppPml4e)
1165 *ppPml4e = (PX86PML4E)pPml4e;
1166
1167 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1168
1169 if (!pPml4e->n.u1Present)
1170 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1171
1172 PVM pVM = pVCpu->CTX_SUFF(pVM);
1173 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1174 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1175 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1176
1177 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1178 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1179 if (!pPdpt->a[iPdPt].n.u1Present)
1180 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1181
1182 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1183 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1184
1185 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1186 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1187 return VINF_SUCCESS;
1188}
1189
1190
1191/**
1192 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1193 * backing pages in case the PDPT or PML4 entry is missing.
1194 *
1195 * @returns VBox status.
1196 * @param pVCpu VMCPU handle.
1197 * @param GCPtr The address.
1198 * @param ppPdpt Receives address of pdpt
1199 * @param ppPD Receives address of page directory
1200 */
1201static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1202{
1203 PVM pVM = pVCpu->CTX_SUFF(pVM);
1204 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1205 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1206 PEPTPML4 pPml4;
1207 PEPTPML4E pPml4e;
1208 PPGMPOOLPAGE pShwPage;
1209 int rc;
1210
1211 Assert(pVM->pgm.s.fNestedPaging);
1212 PGM_LOCK_ASSERT_OWNER(pVM);
1213
1214 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1215 Assert(pPml4);
1216
1217 /* Allocate page directory pointer table if not present. */
1218 pPml4e = &pPml4->a[iPml4];
1219 if ( !pPml4e->n.u1Present
1220 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1221 {
1222 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1223 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1224
1225 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1226 AssertRCReturn(rc, rc);
1227 }
1228 else
1229 {
1230 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1231 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1232
1233 pgmPoolCacheUsed(pPool, pShwPage);
1234 }
1235 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1236 pPml4e->u = pShwPage->Core.Key;
1237 pPml4e->n.u1Present = 1;
1238 pPml4e->n.u1Write = 1;
1239 pPml4e->n.u1Execute = 1;
1240
1241 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1242 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1243 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1244
1245 if (ppPdpt)
1246 *ppPdpt = pPdpt;
1247
1248 /* Allocate page directory if not present. */
1249 if ( !pPdpe->n.u1Present
1250 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1251 {
1252 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1253
1254 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1255 AssertRCReturn(rc, rc);
1256 }
1257 else
1258 {
1259 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1260 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1261
1262 pgmPoolCacheUsed(pPool, pShwPage);
1263 }
1264 /* The PD was cached or created; hook it up now and fill with the default value. */
1265 pPdpe->u = pShwPage->Core.Key;
1266 pPdpe->n.u1Present = 1;
1267 pPdpe->n.u1Write = 1;
1268 pPdpe->n.u1Execute = 1;
1269
1270 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1271 return VINF_SUCCESS;
1272}
1273
1274#endif /* IN_RC */
1275
1276#ifdef IN_RING0
1277/**
1278 * Synchronizes a range of nested page table entries.
1279 *
1280 * The caller must own the PGM lock.
1281 *
1282 * @param pVCpu The current CPU.
1283 * @param GCPhys Where to start.
1284 * @param cPages How many pages which entries should be synced.
1285 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1286 * host paging mode for AMD-V).
1287 */
1288int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1289{
1290 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1291
1292 int rc;
1293 switch (enmShwPagingMode)
1294 {
1295 case PGMMODE_32_BIT:
1296 {
1297 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1298 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1299 break;
1300 }
1301
1302 case PGMMODE_PAE:
1303 case PGMMODE_PAE_NX:
1304 {
1305 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1306 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1307 break;
1308 }
1309
1310 case PGMMODE_AMD64:
1311 case PGMMODE_AMD64_NX:
1312 {
1313 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1314 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1315 break;
1316 }
1317
1318 case PGMMODE_EPT:
1319 {
1320 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1321 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1322 break;
1323 }
1324
1325 default:
1326 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1327 }
1328 return rc;
1329}
1330#endif /* IN_RING0 */
1331
1332
1333/**
1334 * Gets effective Guest OS page information.
1335 *
1336 * When GCPtr is in a big page, the function will return as if it was a normal
1337 * 4KB page. If the need for distinguishing between big and normal page becomes
1338 * necessary at a later point, a PGMGstGetPage() will be created for that
1339 * purpose.
1340 *
1341 * @returns VBox status.
1342 * @param pVCpu The current CPU.
1343 * @param GCPtr Guest Context virtual address of the page.
1344 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1345 * @param pGCPhys Where to store the GC physical address of the page.
1346 * This is page aligned. The fact that the
1347 */
1348VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1349{
1350 VMCPU_ASSERT_EMT(pVCpu);
1351 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1352}
1353
1354
1355/**
1356 * Checks if the page is present.
1357 *
1358 * @returns true if the page is present.
1359 * @returns false if the page is not present.
1360 * @param pVCpu VMCPU handle.
1361 * @param GCPtr Address within the page.
1362 */
1363VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1364{
1365 VMCPU_ASSERT_EMT(pVCpu);
1366 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1367 return RT_SUCCESS(rc);
1368}
1369
1370
1371/**
1372 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1373 *
1374 * @returns VBox status.
1375 * @param pVCpu VMCPU handle.
1376 * @param GCPtr The address of the first page.
1377 * @param cb The size of the range in bytes.
1378 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1379 */
1380VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1381{
1382 VMCPU_ASSERT_EMT(pVCpu);
1383 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1384}
1385
1386
1387/**
1388 * Modify page flags for a range of pages in the guest's tables
1389 *
1390 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1391 *
1392 * @returns VBox status code.
1393 * @param pVCpu VMCPU handle.
1394 * @param GCPtr Virtual address of the first page in the range.
1395 * @param cb Size (in bytes) of the range to apply the modification to.
1396 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1397 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1398 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1399 */
1400VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1401{
1402 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1403 VMCPU_ASSERT_EMT(pVCpu);
1404
1405 /*
1406 * Validate input.
1407 */
1408 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1409 Assert(cb);
1410
1411 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1412
1413 /*
1414 * Adjust input.
1415 */
1416 cb += GCPtr & PAGE_OFFSET_MASK;
1417 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1418 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1419
1420 /*
1421 * Call worker.
1422 */
1423 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1424
1425 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1426 return rc;
1427}
1428
1429
1430#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1431
1432/**
1433 * Performs the lazy mapping of the 32-bit guest PD.
1434 *
1435 * @returns VBox status code.
1436 * @param pVCpu The current CPU.
1437 * @param ppPd Where to return the pointer to the mapping. This is
1438 * always set.
1439 */
1440int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1441{
1442 PVM pVM = pVCpu->CTX_SUFF(pVM);
1443 pgmLock(pVM);
1444
1445 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1446
1447 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1448 PPGMPAGE pPage;
1449 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1450 if (RT_SUCCESS(rc))
1451 {
1452 RTHCPTR HCPtrGuestCR3;
1453 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1454 if (RT_SUCCESS(rc))
1455 {
1456 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1457# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1458 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1459# endif
1460 *ppPd = (PX86PD)HCPtrGuestCR3;
1461
1462 pgmUnlock(pVM);
1463 return VINF_SUCCESS;
1464 }
1465
1466 AssertRC(rc);
1467 }
1468 pgmUnlock(pVM);
1469
1470 *ppPd = NULL;
1471 return rc;
1472}
1473
1474
1475/**
1476 * Performs the lazy mapping of the PAE guest PDPT.
1477 *
1478 * @returns VBox status code.
1479 * @param pVCpu The current CPU.
1480 * @param ppPdpt Where to return the pointer to the mapping. This is
1481 * always set.
1482 */
1483int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1484{
1485 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1486 PVM pVM = pVCpu->CTX_SUFF(pVM);
1487 pgmLock(pVM);
1488
1489 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1490 PPGMPAGE pPage;
1491 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1492 if (RT_SUCCESS(rc))
1493 {
1494 RTHCPTR HCPtrGuestCR3;
1495 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1496 if (RT_SUCCESS(rc))
1497 {
1498 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1499# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1500 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1501# endif
1502 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1503
1504 pgmUnlock(pVM);
1505 return VINF_SUCCESS;
1506 }
1507
1508 AssertRC(rc);
1509 }
1510
1511 pgmUnlock(pVM);
1512 *ppPdpt = NULL;
1513 return rc;
1514}
1515
1516
1517/**
1518 * Performs the lazy mapping / updating of a PAE guest PD.
1519 *
1520 * @returns Pointer to the mapping.
1521 * @returns VBox status code.
1522 * @param pVCpu The current CPU.
1523 * @param iPdpt Which PD entry to map (0..3).
1524 * @param ppPd Where to return the pointer to the mapping. This is
1525 * always set.
1526 */
1527int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1528{
1529 PVM pVM = pVCpu->CTX_SUFF(pVM);
1530 pgmLock(pVM);
1531
1532 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1533 Assert(pGuestPDPT);
1534 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1535 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1536 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1537
1538 PPGMPAGE pPage;
1539 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1540 if (RT_SUCCESS(rc))
1541 {
1542 RTRCPTR RCPtr = NIL_RTRCPTR;
1543 RTHCPTR HCPtr = NIL_RTHCPTR;
1544#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1545 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1546 AssertRC(rc);
1547#endif
1548 if (RT_SUCCESS(rc) && fChanged)
1549 {
1550 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1551 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1552 }
1553 if (RT_SUCCESS(rc))
1554 {
1555 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1556# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1557 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1558# endif
1559 if (fChanged)
1560 {
1561 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1562 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1563 }
1564
1565 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1566 pgmUnlock(pVM);
1567 return VINF_SUCCESS;
1568 }
1569 }
1570
1571 /* Invalid page or some failure, invalidate the entry. */
1572 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1573 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1574# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1575 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1576# endif
1577 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1578
1579 pgmUnlock(pVM);
1580 return rc;
1581}
1582
1583#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1584#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1585/**
1586 * Performs the lazy mapping of the 32-bit guest PD.
1587 *
1588 * @returns VBox status code.
1589 * @param pVCpu The current CPU.
1590 * @param ppPml4 Where to return the pointer to the mapping. This will
1591 * always be set.
1592 */
1593int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1594{
1595 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1596 PVM pVM = pVCpu->CTX_SUFF(pVM);
1597 pgmLock(pVM);
1598
1599 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1600 PPGMPAGE pPage;
1601 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1602 if (RT_SUCCESS(rc))
1603 {
1604 RTHCPTR HCPtrGuestCR3;
1605 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1606 if (RT_SUCCESS(rc))
1607 {
1608 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1609# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1610 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1611# endif
1612 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1613
1614 pgmUnlock(pVM);
1615 return VINF_SUCCESS;
1616 }
1617 }
1618
1619 pgmUnlock(pVM);
1620 *ppPml4 = NULL;
1621 return rc;
1622}
1623#endif
1624
1625
1626/**
1627 * Gets the PAE PDPEs values cached by the CPU.
1628 *
1629 * @returns VBox status code.
1630 * @param pVCpu The virtual CPU.
1631 * @param paPdpes Where to return the four PDPEs. The array
1632 * pointed to must have 4 entries.
1633 */
1634VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1635{
1636 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1637
1638 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1639 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1640 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1641 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1642 return VINF_SUCCESS;
1643}
1644
1645
1646/**
1647 * Sets the PAE PDPEs values cached by the CPU.
1648 *
1649 * @remarks This must be called *AFTER* PGMUpdateCR3.
1650 *
1651 * @returns VBox status code.
1652 * @param pVCpu The virtual CPU.
1653 * @param paPdpes The four PDPE values. The array pointed to
1654 * must have exactly 4 entries.
1655 */
1656VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1657{
1658 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1659
1660 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1661 {
1662 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1663 {
1664 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1665
1666 /* Force lazy remapping if it changed in any way. */
1667 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1668# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1669 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1670# endif
1671 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1672 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1673 }
1674 }
1675 return VINF_SUCCESS;
1676}
1677
1678
1679/**
1680 * Gets the current CR3 register value for the shadow memory context.
1681 * @returns CR3 value.
1682 * @param pVCpu VMCPU handle.
1683 */
1684VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1685{
1686 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1687 AssertPtrReturn(pPoolPage, 0);
1688 return pPoolPage->Core.Key;
1689}
1690
1691
1692/**
1693 * Gets the current CR3 register value for the nested memory context.
1694 * @returns CR3 value.
1695 * @param pVCpu VMCPU handle.
1696 */
1697VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1698{
1699 NOREF(enmShadowMode);
1700 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1701 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1702}
1703
1704
1705/**
1706 * Gets the current CR3 register value for the HC intermediate memory context.
1707 * @returns CR3 value.
1708 * @param pVM The VM handle.
1709 */
1710VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1711{
1712 switch (pVM->pgm.s.enmHostMode)
1713 {
1714 case SUPPAGINGMODE_32_BIT:
1715 case SUPPAGINGMODE_32_BIT_GLOBAL:
1716 return pVM->pgm.s.HCPhysInterPD;
1717
1718 case SUPPAGINGMODE_PAE:
1719 case SUPPAGINGMODE_PAE_GLOBAL:
1720 case SUPPAGINGMODE_PAE_NX:
1721 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1722 return pVM->pgm.s.HCPhysInterPaePDPT;
1723
1724 case SUPPAGINGMODE_AMD64:
1725 case SUPPAGINGMODE_AMD64_GLOBAL:
1726 case SUPPAGINGMODE_AMD64_NX:
1727 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1728 return pVM->pgm.s.HCPhysInterPaePDPT;
1729
1730 default:
1731 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1732 return NIL_RTHCPHYS;
1733 }
1734}
1735
1736
1737/**
1738 * Gets the current CR3 register value for the RC intermediate memory context.
1739 * @returns CR3 value.
1740 * @param pVM The VM handle.
1741 * @param pVCpu VMCPU handle.
1742 */
1743VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1744{
1745 switch (pVCpu->pgm.s.enmShadowMode)
1746 {
1747 case PGMMODE_32_BIT:
1748 return pVM->pgm.s.HCPhysInterPD;
1749
1750 case PGMMODE_PAE:
1751 case PGMMODE_PAE_NX:
1752 return pVM->pgm.s.HCPhysInterPaePDPT;
1753
1754 case PGMMODE_AMD64:
1755 case PGMMODE_AMD64_NX:
1756 return pVM->pgm.s.HCPhysInterPaePML4;
1757
1758 case PGMMODE_EPT:
1759 case PGMMODE_NESTED:
1760 return 0; /* not relevant */
1761
1762 default:
1763 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1764 return NIL_RTHCPHYS;
1765 }
1766}
1767
1768
1769/**
1770 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1771 * @returns CR3 value.
1772 * @param pVM The VM handle.
1773 */
1774VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1775{
1776 return pVM->pgm.s.HCPhysInterPD;
1777}
1778
1779
1780/**
1781 * Gets the CR3 register value for the PAE intermediate memory context.
1782 * @returns CR3 value.
1783 * @param pVM The VM handle.
1784 */
1785VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1786{
1787 return pVM->pgm.s.HCPhysInterPaePDPT;
1788}
1789
1790
1791/**
1792 * Gets the CR3 register value for the AMD64 intermediate memory context.
1793 * @returns CR3 value.
1794 * @param pVM The VM handle.
1795 */
1796VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1797{
1798 return pVM->pgm.s.HCPhysInterPaePML4;
1799}
1800
1801
1802/**
1803 * Performs and schedules necessary updates following a CR3 load or reload.
1804 *
1805 * This will normally involve mapping the guest PD or nPDPT
1806 *
1807 * @returns VBox status code.
1808 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1809 * safely be ignored and overridden since the FF will be set too then.
1810 * @param pVCpu VMCPU handle.
1811 * @param cr3 The new cr3.
1812 * @param fGlobal Indicates whether this is a global flush or not.
1813 */
1814VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1815{
1816 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1817 PVM pVM = pVCpu->CTX_SUFF(pVM);
1818
1819 VMCPU_ASSERT_EMT(pVCpu);
1820
1821 /*
1822 * Always flag the necessary updates; necessary for hardware acceleration
1823 */
1824 /** @todo optimize this, it shouldn't always be necessary. */
1825 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1826 if (fGlobal)
1827 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1828 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1829
1830 /*
1831 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1832 */
1833 int rc = VINF_SUCCESS;
1834 RTGCPHYS GCPhysCR3;
1835 switch (pVCpu->pgm.s.enmGuestMode)
1836 {
1837 case PGMMODE_PAE:
1838 case PGMMODE_PAE_NX:
1839 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1840 break;
1841 case PGMMODE_AMD64:
1842 case PGMMODE_AMD64_NX:
1843 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1844 break;
1845 default:
1846 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1847 break;
1848 }
1849 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1850
1851 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1852 {
1853 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1854 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1855 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1856 if (RT_LIKELY(rc == VINF_SUCCESS))
1857 {
1858 if (pgmMapAreMappingsFloating(pVM))
1859 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1860 }
1861 else
1862 {
1863 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1864 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1865 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1866 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1867 if (pgmMapAreMappingsFloating(pVM))
1868 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1869 }
1870
1871 if (fGlobal)
1872 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1873 else
1874 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1875 }
1876 else
1877 {
1878# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1879 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1880 if (pPool->cDirtyPages)
1881 {
1882 pgmLock(pVM);
1883 pgmPoolResetDirtyPages(pVM);
1884 pgmUnlock(pVM);
1885 }
1886# endif
1887 /*
1888 * Check if we have a pending update of the CR3 monitoring.
1889 */
1890 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1891 {
1892 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1893 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1894 }
1895 if (fGlobal)
1896 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1897 else
1898 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1899 }
1900
1901 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1902 return rc;
1903}
1904
1905
1906/**
1907 * Performs and schedules necessary updates following a CR3 load or reload when
1908 * using nested or extended paging.
1909 *
1910 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
1911 * TLB and triggering a SyncCR3.
1912 *
1913 * This will normally involve mapping the guest PD or nPDPT
1914 *
1915 * @returns VBox status code.
1916 * @retval VINF_SUCCESS.
1917 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1918 * requires a CR3 sync. This can safely be ignored and overridden since
1919 * the FF will be set too then.)
1920 * @param pVCpu VMCPU handle.
1921 * @param cr3 The new cr3.
1922 */
1923VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1924{
1925 VMCPU_ASSERT_EMT(pVCpu);
1926 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1927
1928 /* We assume we're only called in nested paging mode. */
1929 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1930 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
1931 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1932
1933 /*
1934 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1935 */
1936 int rc = VINF_SUCCESS;
1937 RTGCPHYS GCPhysCR3;
1938 switch (pVCpu->pgm.s.enmGuestMode)
1939 {
1940 case PGMMODE_PAE:
1941 case PGMMODE_PAE_NX:
1942 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1943 break;
1944 case PGMMODE_AMD64:
1945 case PGMMODE_AMD64_NX:
1946 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1947 break;
1948 default:
1949 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1950 break;
1951 }
1952 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1953
1954 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1955 {
1956 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1957 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1958 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1959 }
1960 return rc;
1961}
1962
1963
1964/**
1965 * Synchronize the paging structures.
1966 *
1967 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1968 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1969 * in several places, most importantly whenever the CR3 is loaded.
1970 *
1971 * @returns VBox status code.
1972 * @param pVCpu VMCPU handle.
1973 * @param cr0 Guest context CR0 register
1974 * @param cr3 Guest context CR3 register
1975 * @param cr4 Guest context CR4 register
1976 * @param fGlobal Including global page directories or not
1977 */
1978VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1979{
1980 int rc;
1981
1982 VMCPU_ASSERT_EMT(pVCpu);
1983
1984 /*
1985 * The pool may have pending stuff and even require a return to ring-3 to
1986 * clear the whole thing.
1987 */
1988 rc = pgmPoolSyncCR3(pVCpu);
1989 if (rc != VINF_SUCCESS)
1990 return rc;
1991
1992 /*
1993 * We might be called when we shouldn't.
1994 *
1995 * The mode switching will ensure that the PD is resynced
1996 * after every mode switch. So, if we find ourselves here
1997 * when in protected or real mode we can safely disable the
1998 * FF and return immediately.
1999 */
2000 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2001 {
2002 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2003 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2004 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2005 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2006 return VINF_SUCCESS;
2007 }
2008
2009 /* If global pages are not supported, then all flushes are global. */
2010 if (!(cr4 & X86_CR4_PGE))
2011 fGlobal = true;
2012 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2013 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2014
2015 /*
2016 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2017 * This should be done before SyncCR3.
2018 */
2019 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2020 {
2021 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2022
2023 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2024 RTGCPHYS GCPhysCR3;
2025 switch (pVCpu->pgm.s.enmGuestMode)
2026 {
2027 case PGMMODE_PAE:
2028 case PGMMODE_PAE_NX:
2029 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2030 break;
2031 case PGMMODE_AMD64:
2032 case PGMMODE_AMD64_NX:
2033 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2034 break;
2035 default:
2036 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2037 break;
2038 }
2039 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2040
2041 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2042 {
2043 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2044 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2045 }
2046
2047 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2048 if ( rc == VINF_PGM_SYNC_CR3
2049 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2050 {
2051 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2052#ifdef IN_RING3
2053 rc = pgmPoolSyncCR3(pVCpu);
2054#else
2055 if (rc == VINF_PGM_SYNC_CR3)
2056 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2057 return VINF_PGM_SYNC_CR3;
2058#endif
2059 }
2060 AssertRCReturn(rc, rc);
2061 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2062 }
2063
2064 /*
2065 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2066 */
2067 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2068 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2069 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2070 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2071 if (rc == VINF_SUCCESS)
2072 {
2073 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2074 {
2075 /* Go back to ring 3 if a pgm pool sync is again pending. */
2076 return VINF_PGM_SYNC_CR3;
2077 }
2078
2079 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2080 {
2081 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2082 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2083 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2084 }
2085
2086 /*
2087 * Check if we have a pending update of the CR3 monitoring.
2088 */
2089 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2090 {
2091 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2092 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2093 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
2094 }
2095 }
2096
2097 /*
2098 * Now flush the CR3 (guest context).
2099 */
2100 if (rc == VINF_SUCCESS)
2101 PGM_INVL_VCPU_TLBS(pVCpu);
2102 return rc;
2103}
2104
2105
2106/**
2107 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2108 *
2109 * @returns VBox status code, with the following informational code for
2110 * VM scheduling.
2111 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2112 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2113 * (I.e. not in R3.)
2114 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2115 *
2116 * @param pVCpu VMCPU handle.
2117 * @param cr0 The new cr0.
2118 * @param cr4 The new cr4.
2119 * @param efer The new extended feature enable register.
2120 */
2121VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2122{
2123 PGMMODE enmGuestMode;
2124
2125 VMCPU_ASSERT_EMT(pVCpu);
2126
2127 /*
2128 * Calc the new guest mode.
2129 */
2130 if (!(cr0 & X86_CR0_PE))
2131 enmGuestMode = PGMMODE_REAL;
2132 else if (!(cr0 & X86_CR0_PG))
2133 enmGuestMode = PGMMODE_PROTECTED;
2134 else if (!(cr4 & X86_CR4_PAE))
2135 {
2136 bool const fPse = !!(cr4 & X86_CR4_PSE);
2137 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2138 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2139 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2140 enmGuestMode = PGMMODE_32_BIT;
2141 }
2142 else if (!(efer & MSR_K6_EFER_LME))
2143 {
2144 if (!(efer & MSR_K6_EFER_NXE))
2145 enmGuestMode = PGMMODE_PAE;
2146 else
2147 enmGuestMode = PGMMODE_PAE_NX;
2148 }
2149 else
2150 {
2151 if (!(efer & MSR_K6_EFER_NXE))
2152 enmGuestMode = PGMMODE_AMD64;
2153 else
2154 enmGuestMode = PGMMODE_AMD64_NX;
2155 }
2156
2157 /*
2158 * Did it change?
2159 */
2160 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2161 return VINF_SUCCESS;
2162
2163 /* Flush the TLB */
2164 PGM_INVL_VCPU_TLBS(pVCpu);
2165
2166#ifdef IN_RING3
2167 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2168#else
2169 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2170 return VINF_PGM_CHANGE_MODE;
2171#endif
2172}
2173
2174
2175/**
2176 * Gets the current guest paging mode.
2177 *
2178 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2179 *
2180 * @returns The current paging mode.
2181 * @param pVCpu VMCPU handle.
2182 */
2183VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2184{
2185 return pVCpu->pgm.s.enmGuestMode;
2186}
2187
2188
2189/**
2190 * Gets the current shadow paging mode.
2191 *
2192 * @returns The current paging mode.
2193 * @param pVCpu VMCPU handle.
2194 */
2195VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2196{
2197 return pVCpu->pgm.s.enmShadowMode;
2198}
2199
2200
2201/**
2202 * Gets the current host paging mode.
2203 *
2204 * @returns The current paging mode.
2205 * @param pVM The VM handle.
2206 */
2207VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2208{
2209 switch (pVM->pgm.s.enmHostMode)
2210 {
2211 case SUPPAGINGMODE_32_BIT:
2212 case SUPPAGINGMODE_32_BIT_GLOBAL:
2213 return PGMMODE_32_BIT;
2214
2215 case SUPPAGINGMODE_PAE:
2216 case SUPPAGINGMODE_PAE_GLOBAL:
2217 return PGMMODE_PAE;
2218
2219 case SUPPAGINGMODE_PAE_NX:
2220 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2221 return PGMMODE_PAE_NX;
2222
2223 case SUPPAGINGMODE_AMD64:
2224 case SUPPAGINGMODE_AMD64_GLOBAL:
2225 return PGMMODE_AMD64;
2226
2227 case SUPPAGINGMODE_AMD64_NX:
2228 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2229 return PGMMODE_AMD64_NX;
2230
2231 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2232 }
2233
2234 return PGMMODE_INVALID;
2235}
2236
2237
2238/**
2239 * Get mode name.
2240 *
2241 * @returns read-only name string.
2242 * @param enmMode The mode which name is desired.
2243 */
2244VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2245{
2246 switch (enmMode)
2247 {
2248 case PGMMODE_REAL: return "Real";
2249 case PGMMODE_PROTECTED: return "Protected";
2250 case PGMMODE_32_BIT: return "32-bit";
2251 case PGMMODE_PAE: return "PAE";
2252 case PGMMODE_PAE_NX: return "PAE+NX";
2253 case PGMMODE_AMD64: return "AMD64";
2254 case PGMMODE_AMD64_NX: return "AMD64+NX";
2255 case PGMMODE_NESTED: return "Nested";
2256 case PGMMODE_EPT: return "EPT";
2257 default: return "unknown mode value";
2258 }
2259}
2260
2261
2262
2263/**
2264 * Notification from CPUM that the EFER.NXE bit has changed.
2265 *
2266 * @param pVCpu The virtual CPU for which EFER changed.
2267 * @param fNxe The new NXE state.
2268 */
2269VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2270{
2271/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2272 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2273
2274 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2275 if (fNxe)
2276 {
2277 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2278 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2279 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2280 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2281 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2282 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2283 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2284 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2285 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2286 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2287 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2288
2289 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2290 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2291 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2292 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2293 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2294 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2295 }
2296 else
2297 {
2298 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2299 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2300 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2301 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2302 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2303 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2304 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2305 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2306 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2307 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2308 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2309
2310 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2311 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2312 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2313 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2314 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2315 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2316 }
2317}
2318
2319
2320/**
2321 * Check if any pgm pool pages are marked dirty (not monitored)
2322 *
2323 * @returns bool locked/not locked
2324 * @param pVM The VM handle.
2325 */
2326VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2327{
2328 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2329}
2330
2331
2332/**
2333 * Check if this VCPU currently owns the PGM lock.
2334 *
2335 * @returns bool owner/not owner
2336 * @param pVM The VM handle.
2337 */
2338VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2339{
2340 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2341}
2342
2343
2344/**
2345 * Enable or disable large page usage
2346 *
2347 * @returns VBox status code.
2348 * @param pVM The VM handle.
2349 * @param fUseLargePages Use/not use large pages
2350 */
2351VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2352{
2353 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2354
2355 pVM->fUseLargePages = fUseLargePages;
2356 return VINF_SUCCESS;
2357}
2358
2359
2360/**
2361 * Acquire the PGM lock.
2362 *
2363 * @returns VBox status code
2364 * @param pVM The VM handle.
2365 */
2366int pgmLock(PVM pVM)
2367{
2368 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2369#if defined(IN_RC) || defined(IN_RING0)
2370 if (rc == VERR_SEM_BUSY)
2371 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2372#endif
2373 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2374 return rc;
2375}
2376
2377
2378/**
2379 * Release the PGM lock.
2380 *
2381 * @returns VBox status code
2382 * @param pVM The VM handle.
2383 */
2384void pgmUnlock(PVM pVM)
2385{
2386 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2387 pVM->pgm.s.cDeprecatedPageLocks = 0;
2388 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2389 if (rc == VINF_SEM_NESTED)
2390 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2391}
2392
2393#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2394
2395/**
2396 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2397 *
2398 * @returns VBox status code.
2399 * @param pVM The VM handle.
2400 * @param pVCpu The current CPU.
2401 * @param GCPhys The guest physical address of the page to map. The
2402 * offset bits are not ignored.
2403 * @param ppv Where to return the address corresponding to @a GCPhys.
2404 */
2405int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2406{
2407 pgmLock(pVM);
2408
2409 /*
2410 * Convert it to a writable page and it on to the dynamic mapper.
2411 */
2412 int rc;
2413 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2414 if (RT_LIKELY(pPage))
2415 {
2416 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2417 if (RT_SUCCESS(rc))
2418 {
2419 void *pv;
2420 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2421 if (RT_SUCCESS(rc))
2422 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2423 }
2424 else
2425 AssertRC(rc);
2426 }
2427 else
2428 {
2429 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2430 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2431 }
2432
2433 pgmUnlock(pVM);
2434 return rc;
2435}
2436
2437#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2438#if !defined(IN_R0) || defined(LOG_ENABLED)
2439
2440/** Format handler for PGMPAGE.
2441 * @copydoc FNRTSTRFORMATTYPE */
2442static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2443 const char *pszType, void const *pvValue,
2444 int cchWidth, int cchPrecision, unsigned fFlags,
2445 void *pvUser)
2446{
2447 size_t cch;
2448 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2449 if (RT_VALID_PTR(pPage))
2450 {
2451 char szTmp[64+80];
2452
2453 cch = 0;
2454
2455 /* The single char state stuff. */
2456 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2457 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2458
2459#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2460 if (IS_PART_INCLUDED(5))
2461 {
2462 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2463 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2464 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2465 }
2466
2467 /* The type. */
2468 if (IS_PART_INCLUDED(4))
2469 {
2470 szTmp[cch++] = ':';
2471 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2472 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2473 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2474 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2475 }
2476
2477 /* The numbers. */
2478 if (IS_PART_INCLUDED(3))
2479 {
2480 szTmp[cch++] = ':';
2481 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2482 }
2483
2484 if (IS_PART_INCLUDED(2))
2485 {
2486 szTmp[cch++] = ':';
2487 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2488 }
2489
2490 if (IS_PART_INCLUDED(6))
2491 {
2492 szTmp[cch++] = ':';
2493 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2494 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2495 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2496 }
2497#undef IS_PART_INCLUDED
2498
2499 cch = pfnOutput(pvArgOutput, szTmp, cch);
2500 }
2501 else
2502 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2503 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2504 return cch;
2505}
2506
2507
2508/** Format handler for PGMRAMRANGE.
2509 * @copydoc FNRTSTRFORMATTYPE */
2510static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2511 const char *pszType, void const *pvValue,
2512 int cchWidth, int cchPrecision, unsigned fFlags,
2513 void *pvUser)
2514{
2515 size_t cch;
2516 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2517 if (VALID_PTR(pRam))
2518 {
2519 char szTmp[80];
2520 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2521 cch = pfnOutput(pvArgOutput, szTmp, cch);
2522 }
2523 else
2524 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2525 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2526 return cch;
2527}
2528
2529/** Format type andlers to be registered/deregistered. */
2530static const struct
2531{
2532 char szType[24];
2533 PFNRTSTRFORMATTYPE pfnHandler;
2534} g_aPgmFormatTypes[] =
2535{
2536 { "pgmpage", pgmFormatTypeHandlerPage },
2537 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2538};
2539
2540#endif /* !IN_R0 || LOG_ENABLED */
2541
2542/**
2543 * Registers the global string format types.
2544 *
2545 * This should be called at module load time or in some other manner that ensure
2546 * that it's called exactly one time.
2547 *
2548 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2549 */
2550VMMDECL(int) PGMRegisterStringFormatTypes(void)
2551{
2552#if !defined(IN_R0) || defined(LOG_ENABLED)
2553 int rc = VINF_SUCCESS;
2554 unsigned i;
2555 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2556 {
2557 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2558# ifdef IN_RING0
2559 if (rc == VERR_ALREADY_EXISTS)
2560 {
2561 /* in case of cleanup failure in ring-0 */
2562 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2563 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2564 }
2565# endif
2566 }
2567 if (RT_FAILURE(rc))
2568 while (i-- > 0)
2569 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2570
2571 return rc;
2572#else
2573 return VINF_SUCCESS;
2574#endif
2575}
2576
2577
2578/**
2579 * Deregisters the global string format types.
2580 *
2581 * This should be called at module unload time or in some other manner that
2582 * ensure that it's called exactly one time.
2583 */
2584VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2585{
2586#if !defined(IN_R0) || defined(LOG_ENABLED)
2587 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2588 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2589#endif
2590}
2591
2592#ifdef VBOX_STRICT
2593
2594/**
2595 * Asserts that there are no mapping conflicts.
2596 *
2597 * @returns Number of conflicts.
2598 * @param pVM The VM handle.
2599 */
2600VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2601{
2602 unsigned cErrors = 0;
2603
2604 /* Only applies to raw mode -> 1 VPCU */
2605 Assert(pVM->cCpus == 1);
2606 PVMCPU pVCpu = &pVM->aCpus[0];
2607
2608 /*
2609 * Check for mapping conflicts.
2610 */
2611 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2612 pMapping;
2613 pMapping = pMapping->CTX_SUFF(pNext))
2614 {
2615 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2616 for (RTGCPTR GCPtr = pMapping->GCPtr;
2617 GCPtr <= pMapping->GCPtrLast;
2618 GCPtr += PAGE_SIZE)
2619 {
2620 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2621 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2622 {
2623 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2624 cErrors++;
2625 break;
2626 }
2627 }
2628 }
2629
2630 return cErrors;
2631}
2632
2633
2634/**
2635 * Asserts that everything related to the guest CR3 is correctly shadowed.
2636 *
2637 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2638 * and assert the correctness of the guest CR3 mapping before asserting that the
2639 * shadow page tables is in sync with the guest page tables.
2640 *
2641 * @returns Number of conflicts.
2642 * @param pVM The VM handle.
2643 * @param pVCpu The VMCPU handle.
2644 * @param cr3 The current guest CR3 register value.
2645 * @param cr4 The current guest CR4 register value.
2646 */
2647VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2648{
2649 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2650 pgmLock(pVM);
2651 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2652 pgmUnlock(pVM);
2653 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2654 return cErrors;
2655}
2656
2657#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette