VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 31768

Last change on this file since 31768 was 31636, checked in by vboxsync, 14 years ago

EM: Addressed VBOXSTRICTRC todo from r64673. Decided to only use VBOXSTRICTRC on the methods that would actually return VINF_EM_* stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.4 KB
Line 
1/* $Id: PGMAll.cpp 31636 2010-08-13 12:03:15Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include <VBox/hwaccm.h>
35#include <VBox/hwacc_vmx.h>
36#include "../PGMInternal.h"
37#include <VBox/vm.h>
38#include "../PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
408 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 Assert(PGMIsLockOwner(pVM));
460 pgmUnlock(pVM);
461 }
462 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
463 rc = VINF_SUCCESS;
464
465# ifdef IN_RING0
466 /* Note: hack alert for difficult to reproduce problem. */
467 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
468 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
469 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
470 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
471 {
472 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
473 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
474 rc = VINF_SUCCESS;
475 }
476# endif
477
478 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
479 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
480 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
481 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
482 return rc;
483}
484#endif /* !IN_RING3 */
485
486
487/**
488 * Prefetch a page
489 *
490 * Typically used to sync commonly used pages before entering raw mode
491 * after a CR3 reload.
492 *
493 * @returns VBox status code suitable for scheduling.
494 * @retval VINF_SUCCESS on success.
495 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
496 * @param pVCpu VMCPU handle.
497 * @param GCPtrPage Page to invalidate.
498 */
499VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
500{
501 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
502 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
503 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
504 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
505 return rc;
506}
507
508
509/**
510 * Gets the mapping corresponding to the specified address (if any).
511 *
512 * @returns Pointer to the mapping.
513 * @returns NULL if not
514 *
515 * @param pVM The virtual machine.
516 * @param GCPtr The guest context pointer.
517 */
518PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
519{
520 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
521 while (pMapping)
522 {
523 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
524 break;
525 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
526 return pMapping;
527 pMapping = pMapping->CTX_SUFF(pNext);
528 }
529 return NULL;
530}
531
532
533/**
534 * Verifies a range of pages for read or write access
535 *
536 * Only checks the guest's page tables
537 *
538 * @returns VBox status code.
539 * @param pVCpu VMCPU handle.
540 * @param Addr Guest virtual address to check
541 * @param cbSize Access size
542 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
543 * @remarks Current not in use.
544 */
545VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
546{
547 /*
548 * Validate input.
549 */
550 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
551 {
552 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
553 return VERR_INVALID_PARAMETER;
554 }
555
556 uint64_t fPage;
557 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
558 if (RT_FAILURE(rc))
559 {
560 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
561 return VINF_EM_RAW_GUEST_TRAP;
562 }
563
564 /*
565 * Check if the access would cause a page fault
566 *
567 * Note that hypervisor page directories are not present in the guest's tables, so this check
568 * is sufficient.
569 */
570 bool fWrite = !!(fAccess & X86_PTE_RW);
571 bool fUser = !!(fAccess & X86_PTE_US);
572 if ( !(fPage & X86_PTE_P)
573 || (fWrite && !(fPage & X86_PTE_RW))
574 || (fUser && !(fPage & X86_PTE_US)) )
575 {
576 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
577 return VINF_EM_RAW_GUEST_TRAP;
578 }
579 if ( RT_SUCCESS(rc)
580 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
581 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
582 return rc;
583}
584
585
586/**
587 * Verifies a range of pages for read or write access
588 *
589 * Supports handling of pages marked for dirty bit tracking and CSAM
590 *
591 * @returns VBox status code.
592 * @param pVCpu VMCPU handle.
593 * @param Addr Guest virtual address to check
594 * @param cbSize Access size
595 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
596 */
597VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
598{
599 PVM pVM = pVCpu->CTX_SUFF(pVM);
600
601 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
602
603 /*
604 * Get going.
605 */
606 uint64_t fPageGst;
607 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
608 if (RT_FAILURE(rc))
609 {
610 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
611 return VINF_EM_RAW_GUEST_TRAP;
612 }
613
614 /*
615 * Check if the access would cause a page fault
616 *
617 * Note that hypervisor page directories are not present in the guest's tables, so this check
618 * is sufficient.
619 */
620 const bool fWrite = !!(fAccess & X86_PTE_RW);
621 const bool fUser = !!(fAccess & X86_PTE_US);
622 if ( !(fPageGst & X86_PTE_P)
623 || (fWrite && !(fPageGst & X86_PTE_RW))
624 || (fUser && !(fPageGst & X86_PTE_US)) )
625 {
626 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
627 return VINF_EM_RAW_GUEST_TRAP;
628 }
629
630 if (!pVM->pgm.s.fNestedPaging)
631 {
632 /*
633 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
634 */
635 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
636 if ( rc == VERR_PAGE_NOT_PRESENT
637 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
638 {
639 /*
640 * Page is not present in our page tables.
641 * Try to sync it!
642 */
643 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
644 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
645 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
646 if (rc != VINF_SUCCESS)
647 return rc;
648 }
649 else
650 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
651 }
652
653#if 0 /* def VBOX_STRICT; triggers too often now */
654 /*
655 * This check is a bit paranoid, but useful.
656 */
657 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
658 uint64_t fPageShw;
659 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
660 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
661 || (fWrite && !(fPageShw & X86_PTE_RW))
662 || (fUser && !(fPageShw & X86_PTE_US)) )
663 {
664 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
665 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
666 return VINF_EM_RAW_GUEST_TRAP;
667 }
668#endif
669
670 if ( RT_SUCCESS(rc)
671 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
672 || Addr + cbSize < Addr))
673 {
674 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
675 for (;;)
676 {
677 Addr += PAGE_SIZE;
678 if (cbSize > PAGE_SIZE)
679 cbSize -= PAGE_SIZE;
680 else
681 cbSize = 1;
682 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
683 if (rc != VINF_SUCCESS)
684 break;
685 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
686 break;
687 }
688 }
689 return rc;
690}
691
692
693/**
694 * Emulation of the invlpg instruction (HC only actually).
695 *
696 * @returns Strict VBox status code, special care required.
697 * @retval VINF_PGM_SYNC_CR3 - handled.
698 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
699 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
700 *
701 * @param pVCpu VMCPU handle.
702 * @param GCPtrPage Page to invalidate.
703 *
704 * @remark ASSUMES the page table entry or page directory is valid. Fairly
705 * safe, but there could be edge cases!
706 *
707 * @todo Flush page or page directory only if necessary!
708 * @todo VBOXSTRICTRC
709 */
710VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
711{
712 PVM pVM = pVCpu->CTX_SUFF(pVM);
713 int rc;
714 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
715
716#ifndef IN_RING3
717 /*
718 * Notify the recompiler so it can record this instruction.
719 */
720 REMNotifyInvalidatePage(pVM, GCPtrPage);
721#endif /* !IN_RING3 */
722
723
724#ifdef IN_RC
725 /*
726 * Check for conflicts and pending CR3 monitoring updates.
727 */
728 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
729 {
730 if ( pgmGetMapping(pVM, GCPtrPage)
731 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
732 {
733 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
734 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
735 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
736 return VINF_PGM_SYNC_CR3;
737 }
738
739 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
740 {
741 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
742 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
743 return VINF_EM_RAW_EMULATE_INSTR;
744 }
745 }
746#endif /* IN_RC */
747
748 /*
749 * Call paging mode specific worker.
750 */
751 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
752 pgmLock(pVM);
753 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
754 pgmUnlock(pVM);
755 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
756
757 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
758 PGM_INVL_PG(pVCpu, GCPtrPage);
759
760#ifdef IN_RING3
761 /*
762 * Check if we have a pending update of the CR3 monitoring.
763 */
764 if ( RT_SUCCESS(rc)
765 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
766 {
767 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
768 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
769 }
770
771 /*
772 * Inform CSAM about the flush
773 *
774 * Note: This is to check if monitored pages have been changed; when we implement
775 * callbacks for virtual handlers, this is no longer required.
776 */
777 CSAMR3FlushPage(pVM, GCPtrPage);
778#endif /* IN_RING3 */
779
780 /* Ignore all irrelevant error codes. */
781 if ( rc == VERR_PAGE_NOT_PRESENT
782 || rc == VERR_PAGE_TABLE_NOT_PRESENT
783 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
784 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
785 rc = VINF_SUCCESS;
786
787 return rc;
788}
789
790
791/**
792 * Executes an instruction using the interpreter.
793 *
794 * @returns VBox status code (appropriate for trap handling and GC return).
795 * @param pVM VM handle.
796 * @param pVCpu VMCPU handle.
797 * @param pRegFrame Register frame.
798 * @param pvFault Fault address.
799 */
800VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
801{
802 uint32_t cb;
803 VBOXSTRICTRC rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
804 if (rc == VERR_EM_INTERPRETER)
805 rc = VINF_EM_RAW_EMULATE_INSTR;
806 if (rc != VINF_SUCCESS)
807 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
808 return rc;
809}
810
811
812/**
813 * Gets effective page information (from the VMM page directory).
814 *
815 * @returns VBox status.
816 * @param pVCpu VMCPU handle.
817 * @param GCPtr Guest Context virtual address of the page.
818 * @param pfFlags Where to store the flags. These are X86_PTE_*.
819 * @param pHCPhys Where to store the HC physical address of the page.
820 * This is page aligned.
821 * @remark You should use PGMMapGetPage() for pages in a mapping.
822 */
823VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
824{
825 pgmLock(pVCpu->CTX_SUFF(pVM));
826 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
827 pgmUnlock(pVCpu->CTX_SUFF(pVM));
828 return rc;
829}
830
831
832/**
833 * Modify page flags for a range of pages in the shadow context.
834 *
835 * The existing flags are ANDed with the fMask and ORed with the fFlags.
836 *
837 * @returns VBox status code.
838 * @param pVCpu VMCPU handle.
839 * @param GCPtr Virtual address of the first page in the range.
840 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
841 * @param fMask The AND mask - page flags X86_PTE_*.
842 * Be very CAREFUL when ~'ing constants which could be 32-bit!
843 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
844 * @remark You must use PGMMapModifyPage() for pages in a mapping.
845 */
846DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
847{
848 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
849 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
850
851 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
852
853 PVM pVM = pVCpu->CTX_SUFF(pVM);
854 pgmLock(pVM);
855 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
856 pgmUnlock(pVM);
857 return rc;
858}
859
860
861/**
862 * Changing the page flags for a single page in the shadow page tables so as to
863 * make it read-only.
864 *
865 * @returns VBox status code.
866 * @param pVCpu VMCPU handle.
867 * @param GCPtr Virtual address of the first page in the range.
868 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
869 */
870VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
871{
872 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
873}
874
875
876/**
877 * Changing the page flags for a single page in the shadow page tables so as to
878 * make it writable.
879 *
880 * The call must know with 101% certainty that the guest page tables maps this
881 * as writable too. This function will deal shared, zero and write monitored
882 * pages.
883 *
884 * @returns VBox status code.
885 * @param pVCpu VMCPU handle.
886 * @param GCPtr Virtual address of the first page in the range.
887 * @param fMmio2 Set if it is an MMIO2 page.
888 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
889 */
890VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
891{
892 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
893}
894
895
896/**
897 * Changing the page flags for a single page in the shadow page tables so as to
898 * make it not present.
899 *
900 * @returns VBox status code.
901 * @param pVCpu VMCPU handle.
902 * @param GCPtr Virtual address of the first page in the range.
903 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
904 */
905VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
906{
907 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
908}
909
910
911/**
912 * Gets the shadow page directory for the specified address, PAE.
913 *
914 * @returns Pointer to the shadow PD.
915 * @param pVCpu The VMCPU handle.
916 * @param GCPtr The address.
917 * @param uGstPdpe Guest PDPT entry.
918 * @param ppPD Receives address of page directory
919 */
920int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
921{
922 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
923 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
924 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
925 PVM pVM = pVCpu->CTX_SUFF(pVM);
926 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
927 PPGMPOOLPAGE pShwPage;
928 int rc;
929
930 Assert(PGMIsLockOwner(pVM));
931
932 /* Allocate page directory if not present. */
933 if ( !pPdpe->n.u1Present
934 && !(pPdpe->u & X86_PDPE_PG_MASK))
935 {
936 RTGCPTR64 GCPdPt;
937 PGMPOOLKIND enmKind;
938
939 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
940 {
941 /* AMD-V nested paging or real/protected mode without paging. */
942 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
943 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
944 }
945 else
946 {
947 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
948 {
949 if (!(uGstPdpe & X86_PDPE_P))
950 {
951 /* PD not present; guest must reload CR3 to change it.
952 * No need to monitor anything in this case.
953 */
954 Assert(!HWACCMIsEnabled(pVM));
955
956 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
957 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
958 uGstPdpe |= X86_PDPE_P;
959 }
960 else
961 {
962 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
963 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
964 }
965 }
966 else
967 {
968 GCPdPt = CPUMGetGuestCR3(pVCpu);
969 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
970 }
971 }
972
973 /* Create a reference back to the PDPT by using the index in its shadow page. */
974 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
975 AssertRCReturn(rc, rc);
976
977 /* The PD was cached or created; hook it up now. */
978 pPdpe->u |= pShwPage->Core.Key
979 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
980
981# if defined(IN_RC)
982 /*
983 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
984 * PDPT entry; the CPU fetches them only during cr3 load, so any
985 * non-present PDPT will continue to cause page faults.
986 */
987 ASMReloadCR3();
988# endif
989 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
990 }
991 else
992 {
993 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
994 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
995 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
996
997 pgmPoolCacheUsed(pPool, pShwPage);
998 }
999 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1000 return VINF_SUCCESS;
1001}
1002
1003
1004/**
1005 * Gets the pointer to the shadow page directory entry for an address, PAE.
1006 *
1007 * @returns Pointer to the PDE.
1008 * @param pVCpu The current CPU.
1009 * @param GCPtr The address.
1010 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1011 */
1012DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1013{
1014 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1015 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1016 PVM pVM = pVCpu->CTX_SUFF(pVM);
1017
1018 Assert(PGMIsLockOwner(pVM));
1019
1020 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1021 if (!pPdpt->a[iPdPt].n.u1Present)
1022 {
1023 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1024 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1025 }
1026 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1027
1028 /* Fetch the pgm pool shadow descriptor. */
1029 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1030 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1031
1032 *ppShwPde = pShwPde;
1033 return VINF_SUCCESS;
1034}
1035
1036#ifndef IN_RC
1037
1038/**
1039 * Syncs the SHADOW page directory pointer for the specified address.
1040 *
1041 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1042 *
1043 * The caller is responsible for making sure the guest has a valid PD before
1044 * calling this function.
1045 *
1046 * @returns VBox status.
1047 * @param pVCpu VMCPU handle.
1048 * @param GCPtr The address.
1049 * @param uGstPml4e Guest PML4 entry
1050 * @param uGstPdpe Guest PDPT entry
1051 * @param ppPD Receives address of page directory
1052 */
1053static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1054{
1055 PPGMCPU pPGM = &pVCpu->pgm.s;
1056 PVM pVM = pVCpu->CTX_SUFF(pVM);
1057 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1058 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1059 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1060 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1061 PPGMPOOLPAGE pShwPage;
1062 int rc;
1063
1064 Assert(PGMIsLockOwner(pVM));
1065
1066 /* Allocate page directory pointer table if not present. */
1067 if ( !pPml4e->n.u1Present
1068 && !(pPml4e->u & X86_PML4E_PG_MASK))
1069 {
1070 RTGCPTR64 GCPml4;
1071 PGMPOOLKIND enmKind;
1072
1073 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1074
1075 if (fNestedPagingOrNoGstPaging)
1076 {
1077 /* AMD-V nested paging or real/protected mode without paging */
1078 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1079 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1080 }
1081 else
1082 {
1083 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1084 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1085 }
1086
1087 /* Create a reference back to the PDPT by using the index in its shadow page. */
1088 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1089 AssertRCReturn(rc, rc);
1090 }
1091 else
1092 {
1093 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1094 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1095
1096 pgmPoolCacheUsed(pPool, pShwPage);
1097 }
1098 /* The PDPT was cached or created; hook it up now. */
1099 pPml4e->u |= pShwPage->Core.Key
1100 | (uGstPml4e & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1101
1102 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1103 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1104 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1105
1106 /* Allocate page directory if not present. */
1107 if ( !pPdpe->n.u1Present
1108 && !(pPdpe->u & X86_PDPE_PG_MASK))
1109 {
1110 RTGCPTR64 GCPdPt;
1111 PGMPOOLKIND enmKind;
1112
1113 if (fNestedPagingOrNoGstPaging)
1114 {
1115 /* AMD-V nested paging or real/protected mode without paging */
1116 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1117 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1118 }
1119 else
1120 {
1121 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1122 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1123 }
1124
1125 /* Create a reference back to the PDPT by using the index in its shadow page. */
1126 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1127 AssertRCReturn(rc, rc);
1128 }
1129 else
1130 {
1131 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1132 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1133
1134 pgmPoolCacheUsed(pPool, pShwPage);
1135 }
1136 /* The PD was cached or created; hook it up now. */
1137 pPdpe->u |= pShwPage->Core.Key
1138 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1139
1140 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1141 return VINF_SUCCESS;
1142}
1143
1144
1145/**
1146 * Gets the SHADOW page directory pointer for the specified address (long mode).
1147 *
1148 * @returns VBox status.
1149 * @param pVCpu VMCPU handle.
1150 * @param GCPtr The address.
1151 * @param ppPdpt Receives address of pdpt
1152 * @param ppPD Receives address of page directory
1153 */
1154DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1155{
1156 PPGMCPU pPGM = &pVCpu->pgm.s;
1157 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1158 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1159
1160 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1161
1162 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1163 if (ppPml4e)
1164 *ppPml4e = (PX86PML4E)pPml4e;
1165
1166 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1167
1168 if (!pPml4e->n.u1Present)
1169 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1170
1171 PVM pVM = pVCpu->CTX_SUFF(pVM);
1172 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1173 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1174 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1175
1176 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1177 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1178 if (!pPdpt->a[iPdPt].n.u1Present)
1179 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1180
1181 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1182 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1183
1184 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1185 return VINF_SUCCESS;
1186}
1187
1188
1189/**
1190 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1191 * backing pages in case the PDPT or PML4 entry is missing.
1192 *
1193 * @returns VBox status.
1194 * @param pVCpu VMCPU handle.
1195 * @param GCPtr The address.
1196 * @param ppPdpt Receives address of pdpt
1197 * @param ppPD Receives address of page directory
1198 */
1199static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1200{
1201 PVM pVM = pVCpu->CTX_SUFF(pVM);
1202 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1203 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1204 PEPTPML4 pPml4;
1205 PEPTPML4E pPml4e;
1206 PPGMPOOLPAGE pShwPage;
1207 int rc;
1208
1209 Assert(pVM->pgm.s.fNestedPaging);
1210 Assert(PGMIsLockOwner(pVM));
1211
1212 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1213 Assert(pPml4);
1214
1215 /* Allocate page directory pointer table if not present. */
1216 pPml4e = &pPml4->a[iPml4];
1217 if ( !pPml4e->n.u1Present
1218 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1219 {
1220 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1221 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1222
1223 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1224 AssertRCReturn(rc, rc);
1225 }
1226 else
1227 {
1228 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1229 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1230
1231 pgmPoolCacheUsed(pPool, pShwPage);
1232 }
1233 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1234 pPml4e->u = pShwPage->Core.Key;
1235 pPml4e->n.u1Present = 1;
1236 pPml4e->n.u1Write = 1;
1237 pPml4e->n.u1Execute = 1;
1238
1239 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1240 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1241 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1242
1243 if (ppPdpt)
1244 *ppPdpt = pPdpt;
1245
1246 /* Allocate page directory if not present. */
1247 if ( !pPdpe->n.u1Present
1248 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1249 {
1250 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1251
1252 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1253 AssertRCReturn(rc, rc);
1254 }
1255 else
1256 {
1257 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1258 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1259
1260 pgmPoolCacheUsed(pPool, pShwPage);
1261 }
1262 /* The PD was cached or created; hook it up now and fill with the default value. */
1263 pPdpe->u = pShwPage->Core.Key;
1264 pPdpe->n.u1Present = 1;
1265 pPdpe->n.u1Write = 1;
1266 pPdpe->n.u1Execute = 1;
1267
1268 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1269 return VINF_SUCCESS;
1270}
1271
1272#endif /* IN_RC */
1273
1274#ifdef IN_RING0
1275/**
1276 * Synchronizes a range of nested page table entries.
1277 *
1278 * The caller must own the PGM lock.
1279 *
1280 * @param pVCpu The current CPU.
1281 * @param GCPhys Where to start.
1282 * @param cPages How many pages which entries should be synced.
1283 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1284 * host paging mode for AMD-V).
1285 */
1286int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1287{
1288 Assert(PGMIsLockOwner(pVCpu->CTX_SUFF(pVM)));
1289
1290 int rc;
1291 switch (enmShwPagingMode)
1292 {
1293 case PGMMODE_32_BIT:
1294 {
1295 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1296 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1297 break;
1298 }
1299
1300 case PGMMODE_PAE:
1301 case PGMMODE_PAE_NX:
1302 {
1303 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1304 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1305 break;
1306 }
1307
1308 case PGMMODE_AMD64:
1309 case PGMMODE_AMD64_NX:
1310 {
1311 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1312 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1313 break;
1314 }
1315
1316 case PGMMODE_EPT:
1317 {
1318 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1319 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1320 break;
1321 }
1322
1323 default:
1324 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_INTERNAL_ERROR_5);
1325 }
1326 return rc;
1327}
1328#endif /* IN_RING0 */
1329
1330
1331/**
1332 * Gets effective Guest OS page information.
1333 *
1334 * When GCPtr is in a big page, the function will return as if it was a normal
1335 * 4KB page. If the need for distinguishing between big and normal page becomes
1336 * necessary at a later point, a PGMGstGetPage() will be created for that
1337 * purpose.
1338 *
1339 * @returns VBox status.
1340 * @param pVCpu VMCPU handle.
1341 * @param GCPtr Guest Context virtual address of the page.
1342 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1343 * @param pGCPhys Where to store the GC physical address of the page.
1344 * This is page aligned. The fact that the
1345 */
1346VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1347{
1348 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1349}
1350
1351
1352/**
1353 * Checks if the page is present.
1354 *
1355 * @returns true if the page is present.
1356 * @returns false if the page is not present.
1357 * @param pVCpu VMCPU handle.
1358 * @param GCPtr Address within the page.
1359 */
1360VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1361{
1362 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1363 return RT_SUCCESS(rc);
1364}
1365
1366
1367/**
1368 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1369 *
1370 * @returns VBox status.
1371 * @param pVCpu VMCPU handle.
1372 * @param GCPtr The address of the first page.
1373 * @param cb The size of the range in bytes.
1374 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1375 */
1376VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1377{
1378 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1379}
1380
1381
1382/**
1383 * Modify page flags for a range of pages in the guest's tables
1384 *
1385 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1386 *
1387 * @returns VBox status code.
1388 * @param pVCpu VMCPU handle.
1389 * @param GCPtr Virtual address of the first page in the range.
1390 * @param cb Size (in bytes) of the range to apply the modification to.
1391 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1392 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1393 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1394 */
1395VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1396{
1397 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1398
1399 /*
1400 * Validate input.
1401 */
1402 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1403 Assert(cb);
1404
1405 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1406
1407 /*
1408 * Adjust input.
1409 */
1410 cb += GCPtr & PAGE_OFFSET_MASK;
1411 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1412 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1413
1414 /*
1415 * Call worker.
1416 */
1417 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1418
1419 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1420 return rc;
1421}
1422
1423
1424#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1425
1426/**
1427 * Performs the lazy mapping of the 32-bit guest PD.
1428 *
1429 * @returns VBox status code.
1430 * @param pVCpu The current CPU.
1431 * @param ppPd Where to return the pointer to the mapping. This is
1432 * always set.
1433 */
1434int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1435{
1436 PVM pVM = pVCpu->CTX_SUFF(pVM);
1437 pgmLock(pVM);
1438
1439 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1440
1441 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1442 PPGMPAGE pPage;
1443 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1444 if (RT_SUCCESS(rc))
1445 {
1446 RTHCPTR HCPtrGuestCR3;
1447 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1448 if (RT_SUCCESS(rc))
1449 {
1450 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1451# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1452 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1453# endif
1454 *ppPd = (PX86PD)HCPtrGuestCR3;
1455
1456 pgmUnlock(pVM);
1457 return VINF_SUCCESS;
1458 }
1459
1460 AssertRC(rc);
1461 }
1462 pgmUnlock(pVM);
1463
1464 *ppPd = NULL;
1465 return rc;
1466}
1467
1468
1469/**
1470 * Performs the lazy mapping of the PAE guest PDPT.
1471 *
1472 * @returns VBox status code.
1473 * @param pVCpu The current CPU.
1474 * @param ppPdpt Where to return the pointer to the mapping. This is
1475 * always set.
1476 */
1477int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1478{
1479 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1480 PVM pVM = pVCpu->CTX_SUFF(pVM);
1481 pgmLock(pVM);
1482
1483 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1484 PPGMPAGE pPage;
1485 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1486 if (RT_SUCCESS(rc))
1487 {
1488 RTHCPTR HCPtrGuestCR3;
1489 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1490 if (RT_SUCCESS(rc))
1491 {
1492 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1493# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1494 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1495# endif
1496 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1497
1498 pgmUnlock(pVM);
1499 return VINF_SUCCESS;
1500 }
1501
1502 AssertRC(rc);
1503 }
1504
1505 pgmUnlock(pVM);
1506 *ppPdpt = NULL;
1507 return rc;
1508}
1509
1510
1511/**
1512 * Performs the lazy mapping / updating of a PAE guest PD.
1513 *
1514 * @returns Pointer to the mapping.
1515 * @returns VBox status code.
1516 * @param pVCpu The current CPU.
1517 * @param iPdpt Which PD entry to map (0..3).
1518 * @param ppPd Where to return the pointer to the mapping. This is
1519 * always set.
1520 */
1521int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1522{
1523 PVM pVM = pVCpu->CTX_SUFF(pVM);
1524 pgmLock(pVM);
1525
1526 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1527 Assert(pGuestPDPT);
1528 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1529 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK_FULL;
1530 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1531
1532 PPGMPAGE pPage;
1533 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1534 if (RT_SUCCESS(rc))
1535 {
1536 RTRCPTR RCPtr = NIL_RTRCPTR;
1537 RTHCPTR HCPtr = NIL_RTHCPTR;
1538#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1539 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1540 AssertRC(rc);
1541#endif
1542 if (RT_SUCCESS(rc) && fChanged)
1543 {
1544 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1545 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1546 }
1547 if (RT_SUCCESS(rc))
1548 {
1549 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1550# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1551 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1552# endif
1553 if (fChanged)
1554 {
1555 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1556 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1557 }
1558
1559 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1560 pgmUnlock(pVM);
1561 return VINF_SUCCESS;
1562 }
1563 }
1564
1565 /* Invalid page or some failure, invalidate the entry. */
1566 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1567 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1568# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1569 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1570# endif
1571 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1572
1573 pgmUnlock(pVM);
1574 return rc;
1575}
1576
1577#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1578#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1579/**
1580 * Performs the lazy mapping of the 32-bit guest PD.
1581 *
1582 * @returns VBox status code.
1583 * @param pVCpu The current CPU.
1584 * @param ppPml4 Where to return the pointer to the mapping. This will
1585 * always be set.
1586 */
1587int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1588{
1589 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1590 PVM pVM = pVCpu->CTX_SUFF(pVM);
1591 pgmLock(pVM);
1592
1593 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1594 PPGMPAGE pPage;
1595 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1596 if (RT_SUCCESS(rc))
1597 {
1598 RTHCPTR HCPtrGuestCR3;
1599 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1600 if (RT_SUCCESS(rc))
1601 {
1602 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1603# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1604 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1605# endif
1606 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1607
1608 pgmUnlock(pVM);
1609 return VINF_SUCCESS;
1610 }
1611 }
1612
1613 pgmUnlock(pVM);
1614 *ppPml4 = NULL;
1615 return rc;
1616}
1617#endif
1618
1619/**
1620 * Gets the specified page directory pointer table entry.
1621 *
1622 * @returns PDP entry
1623 * @param pVCpu VMCPU handle.
1624 * @param iPdpt PDPT index
1625 */
1626VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1627{
1628 Assert(iPdpt <= 3);
1629 return pgmGstGetPaePDPTPtr(pVCpu)->a[iPdpt & 3];
1630}
1631
1632
1633/**
1634 * Gets the current CR3 register value for the shadow memory context.
1635 * @returns CR3 value.
1636 * @param pVCpu VMCPU handle.
1637 */
1638VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1639{
1640 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1641 AssertPtrReturn(pPoolPage, 0);
1642 return pPoolPage->Core.Key;
1643}
1644
1645
1646/**
1647 * Gets the current CR3 register value for the nested memory context.
1648 * @returns CR3 value.
1649 * @param pVCpu VMCPU handle.
1650 */
1651VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1652{
1653 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1654 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1655}
1656
1657
1658/**
1659 * Gets the current CR3 register value for the HC intermediate memory context.
1660 * @returns CR3 value.
1661 * @param pVM The VM handle.
1662 */
1663VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1664{
1665 switch (pVM->pgm.s.enmHostMode)
1666 {
1667 case SUPPAGINGMODE_32_BIT:
1668 case SUPPAGINGMODE_32_BIT_GLOBAL:
1669 return pVM->pgm.s.HCPhysInterPD;
1670
1671 case SUPPAGINGMODE_PAE:
1672 case SUPPAGINGMODE_PAE_GLOBAL:
1673 case SUPPAGINGMODE_PAE_NX:
1674 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1675 return pVM->pgm.s.HCPhysInterPaePDPT;
1676
1677 case SUPPAGINGMODE_AMD64:
1678 case SUPPAGINGMODE_AMD64_GLOBAL:
1679 case SUPPAGINGMODE_AMD64_NX:
1680 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1681 return pVM->pgm.s.HCPhysInterPaePDPT;
1682
1683 default:
1684 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1685 return ~0;
1686 }
1687}
1688
1689
1690/**
1691 * Gets the current CR3 register value for the RC intermediate memory context.
1692 * @returns CR3 value.
1693 * @param pVM The VM handle.
1694 * @param pVCpu VMCPU handle.
1695 */
1696VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1697{
1698 switch (pVCpu->pgm.s.enmShadowMode)
1699 {
1700 case PGMMODE_32_BIT:
1701 return pVM->pgm.s.HCPhysInterPD;
1702
1703 case PGMMODE_PAE:
1704 case PGMMODE_PAE_NX:
1705 return pVM->pgm.s.HCPhysInterPaePDPT;
1706
1707 case PGMMODE_AMD64:
1708 case PGMMODE_AMD64_NX:
1709 return pVM->pgm.s.HCPhysInterPaePML4;
1710
1711 case PGMMODE_EPT:
1712 case PGMMODE_NESTED:
1713 return 0; /* not relevant */
1714
1715 default:
1716 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1717 return ~0;
1718 }
1719}
1720
1721
1722/**
1723 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1724 * @returns CR3 value.
1725 * @param pVM The VM handle.
1726 */
1727VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1728{
1729 return pVM->pgm.s.HCPhysInterPD;
1730}
1731
1732
1733/**
1734 * Gets the CR3 register value for the PAE intermediate memory context.
1735 * @returns CR3 value.
1736 * @param pVM The VM handle.
1737 */
1738VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1739{
1740 return pVM->pgm.s.HCPhysInterPaePDPT;
1741}
1742
1743
1744/**
1745 * Gets the CR3 register value for the AMD64 intermediate memory context.
1746 * @returns CR3 value.
1747 * @param pVM The VM handle.
1748 */
1749VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1750{
1751 return pVM->pgm.s.HCPhysInterPaePML4;
1752}
1753
1754
1755/**
1756 * Performs and schedules necessary updates following a CR3 load or reload.
1757 *
1758 * This will normally involve mapping the guest PD or nPDPT
1759 *
1760 * @returns VBox status code.
1761 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1762 * safely be ignored and overridden since the FF will be set too then.
1763 * @param pVCpu VMCPU handle.
1764 * @param cr3 The new cr3.
1765 * @param fGlobal Indicates whether this is a global flush or not.
1766 */
1767VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1768{
1769 PVM pVM = pVCpu->CTX_SUFF(pVM);
1770
1771 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1772
1773 /*
1774 * Always flag the necessary updates; necessary for hardware acceleration
1775 */
1776 /** @todo optimize this, it shouldn't always be necessary. */
1777 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1778 if (fGlobal)
1779 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1780 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1781
1782 /*
1783 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1784 */
1785 int rc = VINF_SUCCESS;
1786 RTGCPHYS GCPhysCR3;
1787 switch (pVCpu->pgm.s.enmGuestMode)
1788 {
1789 case PGMMODE_PAE:
1790 case PGMMODE_PAE_NX:
1791 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1792 break;
1793 case PGMMODE_AMD64:
1794 case PGMMODE_AMD64_NX:
1795 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1796 break;
1797 default:
1798 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1799 break;
1800 }
1801
1802 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1803 {
1804 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1805 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1806 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1807 if (RT_LIKELY(rc == VINF_SUCCESS))
1808 {
1809 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1810 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1811 }
1812 else
1813 {
1814 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1815 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1816 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1817 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1818 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1819 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1820 }
1821
1822 if (fGlobal)
1823 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1824 else
1825 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1826 }
1827 else
1828 {
1829# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1830 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1831 if (pPool->cDirtyPages)
1832 {
1833 pgmLock(pVM);
1834 pgmPoolResetDirtyPages(pVM);
1835 pgmUnlock(pVM);
1836 }
1837# endif
1838 /*
1839 * Check if we have a pending update of the CR3 monitoring.
1840 */
1841 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1842 {
1843 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1844 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1845 }
1846 if (fGlobal)
1847 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1848 else
1849 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1850 }
1851
1852 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1853 return rc;
1854}
1855
1856
1857/**
1858 * Performs and schedules necessary updates following a CR3 load or reload when
1859 * using nested or extended paging.
1860 *
1861 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1862 * TLB and triggering a SyncCR3.
1863 *
1864 * This will normally involve mapping the guest PD or nPDPT
1865 *
1866 * @returns VBox status code.
1867 * @retval VINF_SUCCESS.
1868 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1869 * requires a CR3 sync. This can safely be ignored and overridden since
1870 * the FF will be set too then.)
1871 * @param pVCpu VMCPU handle.
1872 * @param cr3 The new cr3.
1873 */
1874VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1875{
1876 PVM pVM = pVCpu->CTX_SUFF(pVM);
1877
1878 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1879
1880 /* We assume we're only called in nested paging mode. */
1881 Assert(pVM->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1882 Assert(pVM->pgm.s.fMappingsDisabled);
1883 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1884
1885 /*
1886 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1887 */
1888 int rc = VINF_SUCCESS;
1889 RTGCPHYS GCPhysCR3;
1890 switch (pVCpu->pgm.s.enmGuestMode)
1891 {
1892 case PGMMODE_PAE:
1893 case PGMMODE_PAE_NX:
1894 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1895 break;
1896 case PGMMODE_AMD64:
1897 case PGMMODE_AMD64_NX:
1898 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1899 break;
1900 default:
1901 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1902 break;
1903 }
1904 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1905 {
1906 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1907 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1908 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1909 }
1910 return rc;
1911}
1912
1913
1914/**
1915 * Synchronize the paging structures.
1916 *
1917 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1918 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1919 * in several places, most importantly whenever the CR3 is loaded.
1920 *
1921 * @returns VBox status code.
1922 * @param pVCpu VMCPU handle.
1923 * @param cr0 Guest context CR0 register
1924 * @param cr3 Guest context CR3 register
1925 * @param cr4 Guest context CR4 register
1926 * @param fGlobal Including global page directories or not
1927 */
1928VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1929{
1930 PVM pVM = pVCpu->CTX_SUFF(pVM);
1931 int rc;
1932
1933 /*
1934 * The pool may have pending stuff and even require a return to ring-3 to
1935 * clear the whole thing.
1936 */
1937 rc = pgmPoolSyncCR3(pVCpu);
1938 if (rc != VINF_SUCCESS)
1939 return rc;
1940
1941 /*
1942 * We might be called when we shouldn't.
1943 *
1944 * The mode switching will ensure that the PD is resynced
1945 * after every mode switch. So, if we find ourselves here
1946 * when in protected or real mode we can safely disable the
1947 * FF and return immediately.
1948 */
1949 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1950 {
1951 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1952 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1953 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1954 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1955 return VINF_SUCCESS;
1956 }
1957
1958 /* If global pages are not supported, then all flushes are global. */
1959 if (!(cr4 & X86_CR4_PGE))
1960 fGlobal = true;
1961 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1962 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1963
1964 /*
1965 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1966 * This should be done before SyncCR3.
1967 */
1968 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1969 {
1970 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1971
1972 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1973 RTGCPHYS GCPhysCR3;
1974 switch (pVCpu->pgm.s.enmGuestMode)
1975 {
1976 case PGMMODE_PAE:
1977 case PGMMODE_PAE_NX:
1978 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1979 break;
1980 case PGMMODE_AMD64:
1981 case PGMMODE_AMD64_NX:
1982 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1983 break;
1984 default:
1985 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1986 break;
1987 }
1988
1989 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1990 {
1991 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1992 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1993 }
1994 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
1995 if ( rc == VINF_PGM_SYNC_CR3
1996 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
1997 {
1998 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
1999#ifdef IN_RING3
2000 rc = pgmPoolSyncCR3(pVCpu);
2001#else
2002 if (rc == VINF_PGM_SYNC_CR3)
2003 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2004 return VINF_PGM_SYNC_CR3;
2005#endif
2006 }
2007 AssertRCReturn(rc, rc);
2008 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
2009 }
2010
2011 /*
2012 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2013 */
2014 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2015 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2016 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2017 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2018 if (rc == VINF_SUCCESS)
2019 {
2020 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2021 {
2022 /* Go back to ring 3 if a pgm pool sync is again pending. */
2023 return VINF_PGM_SYNC_CR3;
2024 }
2025
2026 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2027 {
2028 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2029 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2030 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2031 }
2032
2033 /*
2034 * Check if we have a pending update of the CR3 monitoring.
2035 */
2036 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2037 {
2038 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2039 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
2040 }
2041 }
2042
2043 /*
2044 * Now flush the CR3 (guest context).
2045 */
2046 if (rc == VINF_SUCCESS)
2047 PGM_INVL_VCPU_TLBS(pVCpu);
2048 return rc;
2049}
2050
2051
2052/**
2053 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2054 *
2055 * @returns VBox status code, with the following informational code for
2056 * VM scheduling.
2057 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2058 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2059 * (I.e. not in R3.)
2060 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2061 *
2062 * @param pVCpu VMCPU handle.
2063 * @param cr0 The new cr0.
2064 * @param cr4 The new cr4.
2065 * @param efer The new extended feature enable register.
2066 */
2067VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2068{
2069 PVM pVM = pVCpu->CTX_SUFF(pVM);
2070 PGMMODE enmGuestMode;
2071
2072 /*
2073 * Calc the new guest mode.
2074 */
2075 if (!(cr0 & X86_CR0_PE))
2076 enmGuestMode = PGMMODE_REAL;
2077 else if (!(cr0 & X86_CR0_PG))
2078 enmGuestMode = PGMMODE_PROTECTED;
2079 else if (!(cr4 & X86_CR4_PAE))
2080 {
2081 bool const fPse = !!(cr4 & X86_CR4_PSE);
2082 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2083 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2084 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2085 enmGuestMode = PGMMODE_32_BIT;
2086 }
2087 else if (!(efer & MSR_K6_EFER_LME))
2088 {
2089 if (!(efer & MSR_K6_EFER_NXE))
2090 enmGuestMode = PGMMODE_PAE;
2091 else
2092 enmGuestMode = PGMMODE_PAE_NX;
2093 }
2094 else
2095 {
2096 if (!(efer & MSR_K6_EFER_NXE))
2097 enmGuestMode = PGMMODE_AMD64;
2098 else
2099 enmGuestMode = PGMMODE_AMD64_NX;
2100 }
2101
2102 /*
2103 * Did it change?
2104 */
2105 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2106 return VINF_SUCCESS;
2107
2108 /* Flush the TLB */
2109 PGM_INVL_VCPU_TLBS(pVCpu);
2110
2111#ifdef IN_RING3
2112 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2113#else
2114 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2115 return VINF_PGM_CHANGE_MODE;
2116#endif
2117}
2118
2119
2120/**
2121 * Gets the current guest paging mode.
2122 *
2123 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2124 *
2125 * @returns The current paging mode.
2126 * @param pVCpu VMCPU handle.
2127 */
2128VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2129{
2130 return pVCpu->pgm.s.enmGuestMode;
2131}
2132
2133
2134/**
2135 * Gets the current shadow paging mode.
2136 *
2137 * @returns The current paging mode.
2138 * @param pVCpu VMCPU handle.
2139 */
2140VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2141{
2142 return pVCpu->pgm.s.enmShadowMode;
2143}
2144
2145/**
2146 * Gets the current host paging mode.
2147 *
2148 * @returns The current paging mode.
2149 * @param pVM The VM handle.
2150 */
2151VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2152{
2153 switch (pVM->pgm.s.enmHostMode)
2154 {
2155 case SUPPAGINGMODE_32_BIT:
2156 case SUPPAGINGMODE_32_BIT_GLOBAL:
2157 return PGMMODE_32_BIT;
2158
2159 case SUPPAGINGMODE_PAE:
2160 case SUPPAGINGMODE_PAE_GLOBAL:
2161 return PGMMODE_PAE;
2162
2163 case SUPPAGINGMODE_PAE_NX:
2164 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2165 return PGMMODE_PAE_NX;
2166
2167 case SUPPAGINGMODE_AMD64:
2168 case SUPPAGINGMODE_AMD64_GLOBAL:
2169 return PGMMODE_AMD64;
2170
2171 case SUPPAGINGMODE_AMD64_NX:
2172 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2173 return PGMMODE_AMD64_NX;
2174
2175 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2176 }
2177
2178 return PGMMODE_INVALID;
2179}
2180
2181
2182/**
2183 * Get mode name.
2184 *
2185 * @returns read-only name string.
2186 * @param enmMode The mode which name is desired.
2187 */
2188VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2189{
2190 switch (enmMode)
2191 {
2192 case PGMMODE_REAL: return "Real";
2193 case PGMMODE_PROTECTED: return "Protected";
2194 case PGMMODE_32_BIT: return "32-bit";
2195 case PGMMODE_PAE: return "PAE";
2196 case PGMMODE_PAE_NX: return "PAE+NX";
2197 case PGMMODE_AMD64: return "AMD64";
2198 case PGMMODE_AMD64_NX: return "AMD64+NX";
2199 case PGMMODE_NESTED: return "Nested";
2200 case PGMMODE_EPT: return "EPT";
2201 default: return "unknown mode value";
2202 }
2203}
2204
2205
2206
2207/**
2208 * Notification from CPUM that the EFER.NXE bit has changed.
2209 *
2210 * @param pVCpu The virtual CPU for which EFER changed.
2211 * @param fNxe The new NXE state.
2212 */
2213VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2214{
2215 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2216 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2217 if (fNxe)
2218 {
2219 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2220 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2221 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2222 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2223 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2224 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2225 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2226 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2227 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2228 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2229 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2230 }
2231 else
2232 {
2233 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2234 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2235 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2236 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2237 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2238 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2239 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2240 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2241 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2242 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2243 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2244 }
2245}
2246
2247
2248/**
2249 * Check if any pgm pool pages are marked dirty (not monitored)
2250 *
2251 * @returns bool locked/not locked
2252 * @param pVM The VM to operate on.
2253 */
2254VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2255{
2256 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2257}
2258
2259/**
2260 * Check if the PGM lock is currently taken.
2261 *
2262 * @returns bool locked/not locked
2263 * @param pVM The VM to operate on.
2264 */
2265VMMDECL(bool) PGMIsLocked(PVM pVM)
2266{
2267 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2268}
2269
2270
2271/**
2272 * Check if this VCPU currently owns the PGM lock.
2273 *
2274 * @returns bool owner/not owner
2275 * @param pVM The VM to operate on.
2276 */
2277VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2278{
2279 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2280}
2281
2282
2283/**
2284 * Enable or disable large page usage
2285 *
2286 * @param pVM The VM to operate on.
2287 * @param fUseLargePages Use/not use large pages
2288 */
2289VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2290{
2291 pVM->fUseLargePages = fUseLargePages;
2292}
2293
2294/**
2295 * Acquire the PGM lock.
2296 *
2297 * @returns VBox status code
2298 * @param pVM The VM to operate on.
2299 */
2300int pgmLock(PVM pVM)
2301{
2302 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2303#if defined(IN_RC) || defined(IN_RING0)
2304 if (rc == VERR_SEM_BUSY)
2305 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2306#endif
2307 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2308 return rc;
2309}
2310
2311
2312/**
2313 * Release the PGM lock.
2314 *
2315 * @returns VBox status code
2316 * @param pVM The VM to operate on.
2317 */
2318void pgmUnlock(PVM pVM)
2319{
2320 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2321}
2322
2323#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2324
2325/**
2326 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2327 *
2328 * @returns VBox status code.
2329 * @param pVM The VM handle.
2330 * @param pVCpu The current CPU.
2331 * @param GCPhys The guest physical address of the page to map. The
2332 * offset bits are not ignored.
2333 * @param ppv Where to return the address corresponding to @a GCPhys.
2334 */
2335int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2336{
2337 pgmLock(pVM);
2338
2339 /*
2340 * Convert it to a writable page and it on to the dynamic mapper.
2341 */
2342 int rc;
2343 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2344 if (RT_LIKELY(pPage))
2345 {
2346 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2347 if (RT_SUCCESS(rc))
2348 {
2349 void *pv;
2350 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2351 if (RT_SUCCESS(rc))
2352 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2353 }
2354 else
2355 AssertRC(rc);
2356 }
2357 else
2358 {
2359 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2360 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2361 }
2362
2363 pgmUnlock(pVM);
2364 return rc;
2365}
2366
2367#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2368#if !defined(IN_R0) || defined(LOG_ENABLED)
2369
2370/** Format handler for PGMPAGE.
2371 * @copydoc FNRTSTRFORMATTYPE */
2372static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2373 const char *pszType, void const *pvValue,
2374 int cchWidth, int cchPrecision, unsigned fFlags,
2375 void *pvUser)
2376{
2377 size_t cch;
2378 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2379 if (VALID_PTR(pPage))
2380 {
2381 char szTmp[64+80];
2382
2383 cch = 0;
2384
2385 /* The single char state stuff. */
2386 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2387 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2388
2389#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2390 if (IS_PART_INCLUDED(5))
2391 {
2392 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2393 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2394 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2395 }
2396
2397 /* The type. */
2398 if (IS_PART_INCLUDED(4))
2399 {
2400 szTmp[cch++] = ':';
2401 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2402 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2403 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2404 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2405 }
2406
2407 /* The numbers. */
2408 if (IS_PART_INCLUDED(3))
2409 {
2410 szTmp[cch++] = ':';
2411 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2412 }
2413
2414 if (IS_PART_INCLUDED(2))
2415 {
2416 szTmp[cch++] = ':';
2417 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2418 }
2419
2420 if (IS_PART_INCLUDED(6))
2421 {
2422 szTmp[cch++] = ':';
2423 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2424 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2425 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2426 }
2427#undef IS_PART_INCLUDED
2428
2429 cch = pfnOutput(pvArgOutput, szTmp, cch);
2430 }
2431 else
2432 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2433 return cch;
2434}
2435
2436
2437/** Format handler for PGMRAMRANGE.
2438 * @copydoc FNRTSTRFORMATTYPE */
2439static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2440 const char *pszType, void const *pvValue,
2441 int cchWidth, int cchPrecision, unsigned fFlags,
2442 void *pvUser)
2443{
2444 size_t cch;
2445 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2446 if (VALID_PTR(pRam))
2447 {
2448 char szTmp[80];
2449 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2450 cch = pfnOutput(pvArgOutput, szTmp, cch);
2451 }
2452 else
2453 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2454 return cch;
2455}
2456
2457/** Format type andlers to be registered/deregistered. */
2458static const struct
2459{
2460 char szType[24];
2461 PFNRTSTRFORMATTYPE pfnHandler;
2462} g_aPgmFormatTypes[] =
2463{
2464 { "pgmpage", pgmFormatTypeHandlerPage },
2465 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2466};
2467
2468#endif /* !IN_R0 || LOG_ENABLED */
2469
2470/**
2471 * Registers the global string format types.
2472 *
2473 * This should be called at module load time or in some other manner that ensure
2474 * that it's called exactly one time.
2475 *
2476 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2477 */
2478VMMDECL(int) PGMRegisterStringFormatTypes(void)
2479{
2480#if !defined(IN_R0) || defined(LOG_ENABLED)
2481 int rc = VINF_SUCCESS;
2482 unsigned i;
2483 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2484 {
2485 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2486# ifdef IN_RING0
2487 if (rc == VERR_ALREADY_EXISTS)
2488 {
2489 /* in case of cleanup failure in ring-0 */
2490 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2491 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2492 }
2493# endif
2494 }
2495 if (RT_FAILURE(rc))
2496 while (i-- > 0)
2497 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2498
2499 return rc;
2500#else
2501 return VINF_SUCCESS;
2502#endif
2503}
2504
2505
2506/**
2507 * Deregisters the global string format types.
2508 *
2509 * This should be called at module unload time or in some other manner that
2510 * ensure that it's called exactly one time.
2511 */
2512VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2513{
2514#if !defined(IN_R0) || defined(LOG_ENABLED)
2515 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2516 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2517#endif
2518}
2519
2520#ifdef VBOX_STRICT
2521
2522/**
2523 * Asserts that there are no mapping conflicts.
2524 *
2525 * @returns Number of conflicts.
2526 * @param pVM The VM Handle.
2527 */
2528VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2529{
2530 unsigned cErrors = 0;
2531
2532 /* Only applies to raw mode -> 1 VPCU */
2533 Assert(pVM->cCpus == 1);
2534 PVMCPU pVCpu = &pVM->aCpus[0];
2535
2536 /*
2537 * Check for mapping conflicts.
2538 */
2539 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2540 pMapping;
2541 pMapping = pMapping->CTX_SUFF(pNext))
2542 {
2543 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2544 for (RTGCPTR GCPtr = pMapping->GCPtr;
2545 GCPtr <= pMapping->GCPtrLast;
2546 GCPtr += PAGE_SIZE)
2547 {
2548 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2549 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2550 {
2551 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2552 cErrors++;
2553 break;
2554 }
2555 }
2556 }
2557
2558 return cErrors;
2559}
2560
2561
2562/**
2563 * Asserts that everything related to the guest CR3 is correctly shadowed.
2564 *
2565 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2566 * and assert the correctness of the guest CR3 mapping before asserting that the
2567 * shadow page tables is in sync with the guest page tables.
2568 *
2569 * @returns Number of conflicts.
2570 * @param pVM The VM Handle.
2571 * @param pVCpu VMCPU handle.
2572 * @param cr3 The current guest CR3 register value.
2573 * @param cr4 The current guest CR4 register value.
2574 */
2575VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2576{
2577 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2578 pgmLock(pVM);
2579 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2580 pgmUnlock(pVM);
2581 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2582 return cErrors;
2583}
2584
2585#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette