VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 31123

Last change on this file since 31123 was 31123, checked in by vboxsync, 14 years ago

PGM: Moved the PGM stats out of the VM structure. Reduces the size of the PGMCPU structure quite a bit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 88.7 KB
Line 
1/* $Id: PGMAll.cpp 31123 2010-07-26 17:46:41Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include <VBox/hwaccm.h>
35#include <VBox/hwacc_vmx.h>
36#include "../PGMInternal.h"
37#include <VBox/vm.h>
38#include "../PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
408 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 Assert(PGMIsLockOwner(pVM));
460 pgmUnlock(pVM);
461 }
462 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
463 rc = VINF_SUCCESS;
464
465# ifdef IN_RING0
466 /* Note: hack alert for difficult to reproduce problem. */
467 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
468 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
469 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
470 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
471 {
472 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
473 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
474 rc = VINF_SUCCESS;
475 }
476# endif
477
478 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
479 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
480 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
481 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
482 return rc;
483}
484#endif /* !IN_RING3 */
485
486
487/**
488 * Prefetch a page
489 *
490 * Typically used to sync commonly used pages before entering raw mode
491 * after a CR3 reload.
492 *
493 * @returns VBox status code suitable for scheduling.
494 * @retval VINF_SUCCESS on success.
495 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
496 * @param pVCpu VMCPU handle.
497 * @param GCPtrPage Page to invalidate.
498 */
499VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
500{
501 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
502 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
503 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
504 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
505 return rc;
506}
507
508
509/**
510 * Gets the mapping corresponding to the specified address (if any).
511 *
512 * @returns Pointer to the mapping.
513 * @returns NULL if not
514 *
515 * @param pVM The virtual machine.
516 * @param GCPtr The guest context pointer.
517 */
518PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
519{
520 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
521 while (pMapping)
522 {
523 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
524 break;
525 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
526 return pMapping;
527 pMapping = pMapping->CTX_SUFF(pNext);
528 }
529 return NULL;
530}
531
532
533/**
534 * Verifies a range of pages for read or write access
535 *
536 * Only checks the guest's page tables
537 *
538 * @returns VBox status code.
539 * @param pVCpu VMCPU handle.
540 * @param Addr Guest virtual address to check
541 * @param cbSize Access size
542 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
543 * @remarks Current not in use.
544 */
545VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
546{
547 /*
548 * Validate input.
549 */
550 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
551 {
552 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
553 return VERR_INVALID_PARAMETER;
554 }
555
556 uint64_t fPage;
557 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
558 if (RT_FAILURE(rc))
559 {
560 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
561 return VINF_EM_RAW_GUEST_TRAP;
562 }
563
564 /*
565 * Check if the access would cause a page fault
566 *
567 * Note that hypervisor page directories are not present in the guest's tables, so this check
568 * is sufficient.
569 */
570 bool fWrite = !!(fAccess & X86_PTE_RW);
571 bool fUser = !!(fAccess & X86_PTE_US);
572 if ( !(fPage & X86_PTE_P)
573 || (fWrite && !(fPage & X86_PTE_RW))
574 || (fUser && !(fPage & X86_PTE_US)) )
575 {
576 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
577 return VINF_EM_RAW_GUEST_TRAP;
578 }
579 if ( RT_SUCCESS(rc)
580 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
581 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
582 return rc;
583}
584
585
586/**
587 * Verifies a range of pages for read or write access
588 *
589 * Supports handling of pages marked for dirty bit tracking and CSAM
590 *
591 * @returns VBox status code.
592 * @param pVCpu VMCPU handle.
593 * @param Addr Guest virtual address to check
594 * @param cbSize Access size
595 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
596 */
597VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
598{
599 PVM pVM = pVCpu->CTX_SUFF(pVM);
600
601 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
602
603 /*
604 * Get going.
605 */
606 uint64_t fPageGst;
607 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
608 if (RT_FAILURE(rc))
609 {
610 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
611 return VINF_EM_RAW_GUEST_TRAP;
612 }
613
614 /*
615 * Check if the access would cause a page fault
616 *
617 * Note that hypervisor page directories are not present in the guest's tables, so this check
618 * is sufficient.
619 */
620 const bool fWrite = !!(fAccess & X86_PTE_RW);
621 const bool fUser = !!(fAccess & X86_PTE_US);
622 if ( !(fPageGst & X86_PTE_P)
623 || (fWrite && !(fPageGst & X86_PTE_RW))
624 || (fUser && !(fPageGst & X86_PTE_US)) )
625 {
626 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
627 return VINF_EM_RAW_GUEST_TRAP;
628 }
629
630 if (!pVM->pgm.s.fNestedPaging)
631 {
632 /*
633 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
634 */
635 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
636 if ( rc == VERR_PAGE_NOT_PRESENT
637 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
638 {
639 /*
640 * Page is not present in our page tables.
641 * Try to sync it!
642 */
643 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
644 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
645 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
646 if (rc != VINF_SUCCESS)
647 return rc;
648 }
649 else
650 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
651 }
652
653#if 0 /* def VBOX_STRICT; triggers too often now */
654 /*
655 * This check is a bit paranoid, but useful.
656 */
657 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
658 uint64_t fPageShw;
659 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
660 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
661 || (fWrite && !(fPageShw & X86_PTE_RW))
662 || (fUser && !(fPageShw & X86_PTE_US)) )
663 {
664 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
665 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
666 return VINF_EM_RAW_GUEST_TRAP;
667 }
668#endif
669
670 if ( RT_SUCCESS(rc)
671 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
672 || Addr + cbSize < Addr))
673 {
674 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
675 for (;;)
676 {
677 Addr += PAGE_SIZE;
678 if (cbSize > PAGE_SIZE)
679 cbSize -= PAGE_SIZE;
680 else
681 cbSize = 1;
682 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
683 if (rc != VINF_SUCCESS)
684 break;
685 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
686 break;
687 }
688 }
689 return rc;
690}
691
692
693/**
694 * Emulation of the invlpg instruction (HC only actually).
695 *
696 * @returns VBox status code, special care required.
697 * @retval VINF_PGM_SYNC_CR3 - handled.
698 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
699 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
700 *
701 * @param pVCpu VMCPU handle.
702 * @param GCPtrPage Page to invalidate.
703 *
704 * @remark ASSUMES the page table entry or page directory is valid. Fairly
705 * safe, but there could be edge cases!
706 *
707 * @todo Flush page or page directory only if necessary!
708 */
709VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
710{
711 PVM pVM = pVCpu->CTX_SUFF(pVM);
712 int rc;
713 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
714
715#ifndef IN_RING3
716 /*
717 * Notify the recompiler so it can record this instruction.
718 */
719 REMNotifyInvalidatePage(pVM, GCPtrPage);
720#endif /* !IN_RING3 */
721
722
723#ifdef IN_RC
724 /*
725 * Check for conflicts and pending CR3 monitoring updates.
726 */
727 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
728 {
729 if ( pgmGetMapping(pVM, GCPtrPage)
730 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
731 {
732 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
733 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
734 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
735 return VINF_PGM_SYNC_CR3;
736 }
737
738 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
739 {
740 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
741 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
742 return VINF_EM_RAW_EMULATE_INSTR;
743 }
744 }
745#endif /* IN_RC */
746
747 /*
748 * Call paging mode specific worker.
749 */
750 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
751 pgmLock(pVM);
752 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
753 pgmUnlock(pVM);
754 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
755
756 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
757 PGM_INVL_PG(pVCpu, GCPtrPage);
758
759#ifdef IN_RING3
760 /*
761 * Check if we have a pending update of the CR3 monitoring.
762 */
763 if ( RT_SUCCESS(rc)
764 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
765 {
766 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
767 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
768 }
769
770 /*
771 * Inform CSAM about the flush
772 *
773 * Note: This is to check if monitored pages have been changed; when we implement
774 * callbacks for virtual handlers, this is no longer required.
775 */
776 CSAMR3FlushPage(pVM, GCPtrPage);
777#endif /* IN_RING3 */
778
779 /* Ignore all irrelevant error codes. */
780 if ( rc == VERR_PAGE_NOT_PRESENT
781 || rc == VERR_PAGE_TABLE_NOT_PRESENT
782 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
783 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
784 rc = VINF_SUCCESS;
785
786 return rc;
787}
788
789
790/**
791 * Executes an instruction using the interpreter.
792 *
793 * @returns VBox status code (appropriate for trap handling and GC return).
794 * @param pVM VM handle.
795 * @param pVCpu VMCPU handle.
796 * @param pRegFrame Register frame.
797 * @param pvFault Fault address.
798 */
799VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
800{
801 uint32_t cb;
802 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
803 if (rc == VERR_EM_INTERPRETER)
804 rc = VINF_EM_RAW_EMULATE_INSTR;
805 if (rc != VINF_SUCCESS)
806 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
807 return rc;
808}
809
810
811/**
812 * Gets effective page information (from the VMM page directory).
813 *
814 * @returns VBox status.
815 * @param pVCpu VMCPU handle.
816 * @param GCPtr Guest Context virtual address of the page.
817 * @param pfFlags Where to store the flags. These are X86_PTE_*.
818 * @param pHCPhys Where to store the HC physical address of the page.
819 * This is page aligned.
820 * @remark You should use PGMMapGetPage() for pages in a mapping.
821 */
822VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
823{
824 pgmLock(pVCpu->CTX_SUFF(pVM));
825 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
826 pgmUnlock(pVCpu->CTX_SUFF(pVM));
827 return rc;
828}
829
830
831/**
832 * Modify page flags for a range of pages in the shadow context.
833 *
834 * The existing flags are ANDed with the fMask and ORed with the fFlags.
835 *
836 * @returns VBox status code.
837 * @param pVCpu VMCPU handle.
838 * @param GCPtr Virtual address of the first page in the range.
839 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
840 * @param fMask The AND mask - page flags X86_PTE_*.
841 * Be very CAREFUL when ~'ing constants which could be 32-bit!
842 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
843 * @remark You must use PGMMapModifyPage() for pages in a mapping.
844 */
845DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
846{
847 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
848 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
849
850 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
851
852 PVM pVM = pVCpu->CTX_SUFF(pVM);
853 pgmLock(pVM);
854 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
855 pgmUnlock(pVM);
856 return rc;
857}
858
859
860/**
861 * Changing the page flags for a single page in the shadow page tables so as to
862 * make it read-only.
863 *
864 * @returns VBox status code.
865 * @param pVCpu VMCPU handle.
866 * @param GCPtr Virtual address of the first page in the range.
867 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
868 */
869VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
870{
871 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
872}
873
874
875/**
876 * Changing the page flags for a single page in the shadow page tables so as to
877 * make it writable.
878 *
879 * The call must know with 101% certainty that the guest page tables maps this
880 * as writable too. This function will deal shared, zero and write monitored
881 * pages.
882 *
883 * @returns VBox status code.
884 * @param pVCpu VMCPU handle.
885 * @param GCPtr Virtual address of the first page in the range.
886 * @param fMmio2 Set if it is an MMIO2 page.
887 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
888 */
889VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
890{
891 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
892}
893
894
895/**
896 * Changing the page flags for a single page in the shadow page tables so as to
897 * make it not present.
898 *
899 * @returns VBox status code.
900 * @param pVCpu VMCPU handle.
901 * @param GCPtr Virtual address of the first page in the range.
902 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
903 */
904VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
905{
906 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
907}
908
909
910/**
911 * Gets the shadow page directory for the specified address, PAE.
912 *
913 * @returns Pointer to the shadow PD.
914 * @param pVCpu The VMCPU handle.
915 * @param GCPtr The address.
916 * @param uGstPdpe Guest PDPT entry.
917 * @param ppPD Receives address of page directory
918 */
919int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
920{
921 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
922 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
923 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
924 PVM pVM = pVCpu->CTX_SUFF(pVM);
925 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
926 PPGMPOOLPAGE pShwPage;
927 int rc;
928
929 Assert(PGMIsLockOwner(pVM));
930
931 /* Allocate page directory if not present. */
932 if ( !pPdpe->n.u1Present
933 && !(pPdpe->u & X86_PDPE_PG_MASK))
934 {
935 RTGCPTR64 GCPdPt;
936 PGMPOOLKIND enmKind;
937
938# if defined(IN_RC)
939 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
940 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
941# endif
942
943 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
944 {
945 /* AMD-V nested paging or real/protected mode without paging. */
946 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
947 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
948 }
949 else
950 {
951 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
952 {
953 if (!(uGstPdpe & X86_PDPE_P))
954 {
955 /* PD not present; guest must reload CR3 to change it.
956 * No need to monitor anything in this case.
957 */
958 Assert(!HWACCMIsEnabled(pVM));
959
960 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
961 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
962 uGstPdpe |= X86_PDPE_P;
963 }
964 else
965 {
966 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
967 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
968 }
969 }
970 else
971 {
972 GCPdPt = CPUMGetGuestCR3(pVCpu);
973 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
974 }
975 }
976
977 /* Create a reference back to the PDPT by using the index in its shadow page. */
978 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
979 AssertRCReturn(rc, rc);
980
981 /* The PD was cached or created; hook it up now. */
982 pPdpe->u |= pShwPage->Core.Key
983 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
984
985# if defined(IN_RC)
986 /*
987 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
988 * PDPT entry; the CPU fetches them only during cr3 load, so any
989 * non-present PDPT will continue to cause page faults.
990 */
991 ASMReloadCR3();
992 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
993# endif
994 }
995 else
996 {
997 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
998 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
999 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1000
1001 pgmPoolCacheUsed(pPool, pShwPage);
1002 }
1003 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Gets the pointer to the shadow page directory entry for an address, PAE.
1010 *
1011 * @returns Pointer to the PDE.
1012 * @param pPGM Pointer to the PGMCPU instance data.
1013 * @param GCPtr The address.
1014 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1015 */
1016DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1017{
1018 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1019 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1020
1021 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1022
1023 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1024 if (!pPdpt->a[iPdPt].n.u1Present)
1025 {
1026 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1027 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1028 }
1029 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1030
1031 /* Fetch the pgm pool shadow descriptor. */
1032 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1033 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1034
1035 *ppShwPde = pShwPde;
1036 return VINF_SUCCESS;
1037}
1038
1039#ifndef IN_RC
1040
1041/**
1042 * Syncs the SHADOW page directory pointer for the specified address.
1043 *
1044 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1045 *
1046 * The caller is responsible for making sure the guest has a valid PD before
1047 * calling this function.
1048 *
1049 * @returns VBox status.
1050 * @param pVCpu VMCPU handle.
1051 * @param GCPtr The address.
1052 * @param uGstPml4e Guest PML4 entry
1053 * @param uGstPdpe Guest PDPT entry
1054 * @param ppPD Receives address of page directory
1055 */
1056static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1057{
1058 PPGMCPU pPGM = &pVCpu->pgm.s;
1059 PVM pVM = pVCpu->CTX_SUFF(pVM);
1060 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1061 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1062 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1063 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1064 PPGMPOOLPAGE pShwPage;
1065 int rc;
1066
1067 Assert(PGMIsLockOwner(pVM));
1068
1069 /* Allocate page directory pointer table if not present. */
1070 if ( !pPml4e->n.u1Present
1071 && !(pPml4e->u & X86_PML4E_PG_MASK))
1072 {
1073 RTGCPTR64 GCPml4;
1074 PGMPOOLKIND enmKind;
1075
1076 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1077
1078 if (fNestedPagingOrNoGstPaging)
1079 {
1080 /* AMD-V nested paging or real/protected mode without paging */
1081 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1082 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1083 }
1084 else
1085 {
1086 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1087 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1088 }
1089
1090 /* Create a reference back to the PDPT by using the index in its shadow page. */
1091 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1092 AssertRCReturn(rc, rc);
1093 }
1094 else
1095 {
1096 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1097 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1098
1099 pgmPoolCacheUsed(pPool, pShwPage);
1100 }
1101 /* The PDPT was cached or created; hook it up now. */
1102 pPml4e->u |= pShwPage->Core.Key
1103 | (uGstPml4e & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1104
1105 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1106 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1107 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1108
1109 /* Allocate page directory if not present. */
1110 if ( !pPdpe->n.u1Present
1111 && !(pPdpe->u & X86_PDPE_PG_MASK))
1112 {
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPagingOrNoGstPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1125 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1126 }
1127
1128 /* Create a reference back to the PDPT by using the index in its shadow page. */
1129 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1130 AssertRCReturn(rc, rc);
1131 }
1132 else
1133 {
1134 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1135 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1136
1137 pgmPoolCacheUsed(pPool, pShwPage);
1138 }
1139 /* The PD was cached or created; hook it up now. */
1140 pPdpe->u |= pShwPage->Core.Key
1141 | (uGstPdpe & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1142
1143 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1144 return VINF_SUCCESS;
1145}
1146
1147
1148/**
1149 * Gets the SHADOW page directory pointer for the specified address (long mode).
1150 *
1151 * @returns VBox status.
1152 * @param pVCpu VMCPU handle.
1153 * @param GCPtr The address.
1154 * @param ppPdpt Receives address of pdpt
1155 * @param ppPD Receives address of page directory
1156 */
1157DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1158{
1159 PPGMCPU pPGM = &pVCpu->pgm.s;
1160 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1161 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1162
1163 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1164
1165 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1166 if (ppPml4e)
1167 *ppPml4e = (PX86PML4E)pPml4e;
1168
1169 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1170
1171 if (!pPml4e->n.u1Present)
1172 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1173
1174 PVM pVM = pVCpu->CTX_SUFF(pVM);
1175 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1176 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1177 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1178
1179 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1180 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1181 if (!pPdpt->a[iPdPt].n.u1Present)
1182 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1183
1184 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1185 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1186
1187 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1194 * backing pages in case the PDPT or PML4 entry is missing.
1195 *
1196 * @returns VBox status.
1197 * @param pVCpu VMCPU handle.
1198 * @param GCPtr The address.
1199 * @param ppPdpt Receives address of pdpt
1200 * @param ppPD Receives address of page directory
1201 */
1202static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1203{
1204 PPGMCPU pPGM = &pVCpu->pgm.s;
1205 PVM pVM = pVCpu->CTX_SUFF(pVM);
1206 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1207 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1208 PEPTPML4 pPml4;
1209 PEPTPML4E pPml4e;
1210 PPGMPOOLPAGE pShwPage;
1211 int rc;
1212
1213 Assert(pVM->pgm.s.fNestedPaging);
1214 Assert(PGMIsLockOwner(pVM));
1215
1216 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1217 Assert(pPml4);
1218
1219 /* Allocate page directory pointer table if not present. */
1220 pPml4e = &pPml4->a[iPml4];
1221 if ( !pPml4e->n.u1Present
1222 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1223 {
1224 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1225 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1226
1227 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1228 AssertRCReturn(rc, rc);
1229 }
1230 else
1231 {
1232 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1233 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1234
1235 pgmPoolCacheUsed(pPool, pShwPage);
1236 }
1237 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1238 pPml4e->u = pShwPage->Core.Key;
1239 pPml4e->n.u1Present = 1;
1240 pPml4e->n.u1Write = 1;
1241 pPml4e->n.u1Execute = 1;
1242
1243 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1244 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1245 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1246
1247 if (ppPdpt)
1248 *ppPdpt = pPdpt;
1249
1250 /* Allocate page directory if not present. */
1251 if ( !pPdpe->n.u1Present
1252 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1253 {
1254 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1255
1256 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1257 AssertRCReturn(rc, rc);
1258 }
1259 else
1260 {
1261 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1262 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1263
1264 pgmPoolCacheUsed(pPool, pShwPage);
1265 }
1266 /* The PD was cached or created; hook it up now and fill with the default value. */
1267 pPdpe->u = pShwPage->Core.Key;
1268 pPdpe->n.u1Present = 1;
1269 pPdpe->n.u1Write = 1;
1270 pPdpe->n.u1Execute = 1;
1271
1272 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1273 return VINF_SUCCESS;
1274}
1275
1276#endif /* IN_RC */
1277
1278/**
1279 * Gets effective Guest OS page information.
1280 *
1281 * When GCPtr is in a big page, the function will return as if it was a normal
1282 * 4KB page. If the need for distinguishing between big and normal page becomes
1283 * necessary at a later point, a PGMGstGetPage() will be created for that
1284 * purpose.
1285 *
1286 * @returns VBox status.
1287 * @param pVCpu VMCPU handle.
1288 * @param GCPtr Guest Context virtual address of the page.
1289 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1290 * @param pGCPhys Where to store the GC physical address of the page.
1291 * This is page aligned. The fact that the
1292 */
1293VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1294{
1295 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1296}
1297
1298
1299/**
1300 * Checks if the page is present.
1301 *
1302 * @returns true if the page is present.
1303 * @returns false if the page is not present.
1304 * @param pVCpu VMCPU handle.
1305 * @param GCPtr Address within the page.
1306 */
1307VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1308{
1309 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1310 return RT_SUCCESS(rc);
1311}
1312
1313
1314/**
1315 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1316 *
1317 * @returns VBox status.
1318 * @param pVCpu VMCPU handle.
1319 * @param GCPtr The address of the first page.
1320 * @param cb The size of the range in bytes.
1321 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1322 */
1323VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1324{
1325 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1326}
1327
1328
1329/**
1330 * Modify page flags for a range of pages in the guest's tables
1331 *
1332 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1333 *
1334 * @returns VBox status code.
1335 * @param pVCpu VMCPU handle.
1336 * @param GCPtr Virtual address of the first page in the range.
1337 * @param cb Size (in bytes) of the range to apply the modification to.
1338 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1339 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1340 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1341 */
1342VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1343{
1344 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1345
1346 /*
1347 * Validate input.
1348 */
1349 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1350 Assert(cb);
1351
1352 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1353
1354 /*
1355 * Adjust input.
1356 */
1357 cb += GCPtr & PAGE_OFFSET_MASK;
1358 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1359 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1360
1361 /*
1362 * Call worker.
1363 */
1364 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1365
1366 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1367 return rc;
1368}
1369
1370
1371#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1372
1373/**
1374 * Performs the lazy mapping of the 32-bit guest PD.
1375 *
1376 * @returns VBox status code.
1377 * @param pVCpu The current CPU.
1378 * @param ppPd Where to return the pointer to the mapping. This is
1379 * always set.
1380 */
1381int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1382{
1383 PVM pVM = pVCpu->CTX_SUFF(pVM);
1384 pgmLock(pVM);
1385
1386 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1387
1388 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1389 PPGMPAGE pPage;
1390 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1391 if (RT_SUCCESS(rc))
1392 {
1393 RTHCPTR HCPtrGuestCR3;
1394 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1395 if (RT_SUCCESS(rc))
1396 {
1397 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1398# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1399 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1400# endif
1401 *ppPd = (PX86PD)HCPtrGuestCR3;
1402
1403 pgmUnlock(pVM);
1404 return VINF_SUCCESS;
1405 }
1406
1407 AssertRC(rc);
1408 }
1409 pgmUnlock(pVM);
1410
1411 *ppPd = NULL;
1412 return rc;
1413}
1414
1415
1416/**
1417 * Performs the lazy mapping of the PAE guest PDPT.
1418 *
1419 * @returns VBox status code.
1420 * @param pVCpu The current CPU.
1421 * @param ppPdpt Where to return the pointer to the mapping. This is
1422 * always set.
1423 */
1424int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1425{
1426 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1427 PVM pVM = pVCpu->CTX_SUFF(pVM);
1428 pgmLock(pVM);
1429
1430 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1431 PPGMPAGE pPage;
1432 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1433 if (RT_SUCCESS(rc))
1434 {
1435 RTHCPTR HCPtrGuestCR3;
1436 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1437 if (RT_SUCCESS(rc))
1438 {
1439 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1440# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1441 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1442# endif
1443 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1444
1445 pgmUnlock(pVM);
1446 return VINF_SUCCESS;
1447 }
1448
1449 AssertRC(rc);
1450 }
1451
1452 pgmUnlock(pVM);
1453 *ppPdpt = NULL;
1454 return rc;
1455}
1456
1457
1458/**
1459 * Performs the lazy mapping / updating of a PAE guest PD.
1460 *
1461 * @returns Pointer to the mapping.
1462 * @returns VBox status code.
1463 * @param pVCpu The current CPU.
1464 * @param iPdpt Which PD entry to map (0..3).
1465 * @param ppPd Where to return the pointer to the mapping. This is
1466 * always set.
1467 */
1468int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1469{
1470 PVM pVM = pVCpu->CTX_SUFF(pVM);
1471 pgmLock(pVM);
1472
1473 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1474 Assert(pGuestPDPT);
1475 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1476 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK_FULL;
1477 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1478
1479 PPGMPAGE pPage;
1480 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1481 if (RT_SUCCESS(rc))
1482 {
1483 RTRCPTR RCPtr = NIL_RTRCPTR;
1484 RTHCPTR HCPtr = NIL_RTHCPTR;
1485#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1486 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1487 AssertRC(rc);
1488#endif
1489 if (RT_SUCCESS(rc) && fChanged)
1490 {
1491 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1492 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1493 }
1494 if (RT_SUCCESS(rc))
1495 {
1496 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1497# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1498 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1499# endif
1500 if (fChanged)
1501 {
1502 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1503 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1504 }
1505
1506 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1507 pgmUnlock(pVM);
1508 return VINF_SUCCESS;
1509 }
1510 }
1511
1512 /* Invalid page or some failure, invalidate the entry. */
1513 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1514 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1515# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1516 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1517# endif
1518 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1519
1520 pgmUnlock(pVM);
1521 return rc;
1522}
1523
1524#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1525#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1526
1527/**
1528 * Performs the lazy mapping of the 32-bit guest PD.
1529 *
1530 * @returns VBox status code.
1531 * @param pVCpu The current CPU.
1532 * @param ppPml4 Where to return the pointer to the mapping. This will
1533 * always be set.
1534 */
1535int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1536{
1537 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1538 PVM pVM = pVCpu->CTX_SUFF(pVM);
1539 pgmLock(pVM);
1540
1541 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1542 PPGMPAGE pPage;
1543 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1544 if (RT_SUCCESS(rc))
1545 {
1546 RTHCPTR HCPtrGuestCR3;
1547 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1548 if (RT_SUCCESS(rc))
1549 {
1550 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1551# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1552 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1553# endif
1554 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1555
1556 pgmUnlock(pVM);
1557 return VINF_SUCCESS;
1558 }
1559 }
1560
1561 pgmUnlock(pVM);
1562 *ppPml4 = NULL;
1563 return rc;
1564}
1565
1566#endif
1567
1568/**
1569 * Gets the specified page directory pointer table entry.
1570 *
1571 * @returns PDP entry
1572 * @param pVCpu VMCPU handle.
1573 * @param iPdpt PDPT index
1574 */
1575VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1576{
1577 Assert(iPdpt <= 3);
1578 return pgmGstGetPaePDPTPtr(pVCpu)->a[iPdpt & 3];
1579}
1580
1581
1582/**
1583 * Gets the current CR3 register value for the shadow memory context.
1584 * @returns CR3 value.
1585 * @param pVCpu VMCPU handle.
1586 */
1587VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1588{
1589 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1590 AssertPtrReturn(pPoolPage, 0);
1591 return pPoolPage->Core.Key;
1592}
1593
1594
1595/**
1596 * Gets the current CR3 register value for the nested memory context.
1597 * @returns CR3 value.
1598 * @param pVCpu VMCPU handle.
1599 */
1600VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1601{
1602 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1603 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1604}
1605
1606
1607/**
1608 * Gets the current CR3 register value for the HC intermediate memory context.
1609 * @returns CR3 value.
1610 * @param pVM The VM handle.
1611 */
1612VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1613{
1614 switch (pVM->pgm.s.enmHostMode)
1615 {
1616 case SUPPAGINGMODE_32_BIT:
1617 case SUPPAGINGMODE_32_BIT_GLOBAL:
1618 return pVM->pgm.s.HCPhysInterPD;
1619
1620 case SUPPAGINGMODE_PAE:
1621 case SUPPAGINGMODE_PAE_GLOBAL:
1622 case SUPPAGINGMODE_PAE_NX:
1623 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1624 return pVM->pgm.s.HCPhysInterPaePDPT;
1625
1626 case SUPPAGINGMODE_AMD64:
1627 case SUPPAGINGMODE_AMD64_GLOBAL:
1628 case SUPPAGINGMODE_AMD64_NX:
1629 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1630 return pVM->pgm.s.HCPhysInterPaePDPT;
1631
1632 default:
1633 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1634 return ~0;
1635 }
1636}
1637
1638
1639/**
1640 * Gets the current CR3 register value for the RC intermediate memory context.
1641 * @returns CR3 value.
1642 * @param pVM The VM handle.
1643 * @param pVCpu VMCPU handle.
1644 */
1645VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1646{
1647 switch (pVCpu->pgm.s.enmShadowMode)
1648 {
1649 case PGMMODE_32_BIT:
1650 return pVM->pgm.s.HCPhysInterPD;
1651
1652 case PGMMODE_PAE:
1653 case PGMMODE_PAE_NX:
1654 return pVM->pgm.s.HCPhysInterPaePDPT;
1655
1656 case PGMMODE_AMD64:
1657 case PGMMODE_AMD64_NX:
1658 return pVM->pgm.s.HCPhysInterPaePML4;
1659
1660 case PGMMODE_EPT:
1661 case PGMMODE_NESTED:
1662 return 0; /* not relevant */
1663
1664 default:
1665 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1666 return ~0;
1667 }
1668}
1669
1670
1671/**
1672 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1673 * @returns CR3 value.
1674 * @param pVM The VM handle.
1675 */
1676VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1677{
1678 return pVM->pgm.s.HCPhysInterPD;
1679}
1680
1681
1682/**
1683 * Gets the CR3 register value for the PAE intermediate memory context.
1684 * @returns CR3 value.
1685 * @param pVM The VM handle.
1686 */
1687VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1688{
1689 return pVM->pgm.s.HCPhysInterPaePDPT;
1690}
1691
1692
1693/**
1694 * Gets the CR3 register value for the AMD64 intermediate memory context.
1695 * @returns CR3 value.
1696 * @param pVM The VM handle.
1697 */
1698VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1699{
1700 return pVM->pgm.s.HCPhysInterPaePML4;
1701}
1702
1703
1704/**
1705 * Performs and schedules necessary updates following a CR3 load or reload.
1706 *
1707 * This will normally involve mapping the guest PD or nPDPT
1708 *
1709 * @returns VBox status code.
1710 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1711 * safely be ignored and overridden since the FF will be set too then.
1712 * @param pVCpu VMCPU handle.
1713 * @param cr3 The new cr3.
1714 * @param fGlobal Indicates whether this is a global flush or not.
1715 */
1716VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1717{
1718 PVM pVM = pVCpu->CTX_SUFF(pVM);
1719
1720 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1721
1722 /*
1723 * Always flag the necessary updates; necessary for hardware acceleration
1724 */
1725 /** @todo optimize this, it shouldn't always be necessary. */
1726 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1727 if (fGlobal)
1728 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1729 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1730
1731 /*
1732 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1733 */
1734 int rc = VINF_SUCCESS;
1735 RTGCPHYS GCPhysCR3;
1736 switch (pVCpu->pgm.s.enmGuestMode)
1737 {
1738 case PGMMODE_PAE:
1739 case PGMMODE_PAE_NX:
1740 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1741 break;
1742 case PGMMODE_AMD64:
1743 case PGMMODE_AMD64_NX:
1744 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1745 break;
1746 default:
1747 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1748 break;
1749 }
1750
1751 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1752 {
1753 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1754 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1755 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1756 if (RT_LIKELY(rc == VINF_SUCCESS))
1757 {
1758 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1759 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1760 }
1761 else
1762 {
1763 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1764 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1765 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1766 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1767 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1768 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1769 }
1770
1771 if (fGlobal)
1772 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1773 else
1774 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1775 }
1776 else
1777 {
1778# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1779 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1780 if (pPool->cDirtyPages)
1781 {
1782 pgmLock(pVM);
1783 pgmPoolResetDirtyPages(pVM);
1784 pgmUnlock(pVM);
1785 }
1786# endif
1787 /*
1788 * Check if we have a pending update of the CR3 monitoring.
1789 */
1790 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1791 {
1792 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1793 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1794 }
1795 if (fGlobal)
1796 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1797 else
1798 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1799 }
1800
1801 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1802 return rc;
1803}
1804
1805
1806/**
1807 * Performs and schedules necessary updates following a CR3 load or reload when
1808 * using nested or extended paging.
1809 *
1810 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1811 * TLB and triggering a SyncCR3.
1812 *
1813 * This will normally involve mapping the guest PD or nPDPT
1814 *
1815 * @returns VBox status code.
1816 * @retval VINF_SUCCESS.
1817 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1818 * requires a CR3 sync. This can safely be ignored and overridden since
1819 * the FF will be set too then.)
1820 * @param pVCpu VMCPU handle.
1821 * @param cr3 The new cr3.
1822 */
1823VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1824{
1825 PVM pVM = pVCpu->CTX_SUFF(pVM);
1826
1827 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1828
1829 /* We assume we're only called in nested paging mode. */
1830 Assert(pVM->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1831 Assert(pVM->pgm.s.fMappingsDisabled);
1832 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1833
1834 /*
1835 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1836 */
1837 int rc = VINF_SUCCESS;
1838 RTGCPHYS GCPhysCR3;
1839 switch (pVCpu->pgm.s.enmGuestMode)
1840 {
1841 case PGMMODE_PAE:
1842 case PGMMODE_PAE_NX:
1843 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1844 break;
1845 case PGMMODE_AMD64:
1846 case PGMMODE_AMD64_NX:
1847 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1848 break;
1849 default:
1850 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1851 break;
1852 }
1853 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1854 {
1855 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1856 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1857 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1858 }
1859 return rc;
1860}
1861
1862
1863/**
1864 * Synchronize the paging structures.
1865 *
1866 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1867 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1868 * in several places, most importantly whenever the CR3 is loaded.
1869 *
1870 * @returns VBox status code.
1871 * @param pVCpu VMCPU handle.
1872 * @param cr0 Guest context CR0 register
1873 * @param cr3 Guest context CR3 register
1874 * @param cr4 Guest context CR4 register
1875 * @param fGlobal Including global page directories or not
1876 */
1877VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1878{
1879 PVM pVM = pVCpu->CTX_SUFF(pVM);
1880 int rc;
1881
1882 /*
1883 * The pool may have pending stuff and even require a return to ring-3 to
1884 * clear the whole thing.
1885 */
1886 rc = pgmPoolSyncCR3(pVCpu);
1887 if (rc != VINF_SUCCESS)
1888 return rc;
1889
1890 /*
1891 * We might be called when we shouldn't.
1892 *
1893 * The mode switching will ensure that the PD is resynced
1894 * after every mode switch. So, if we find ourselves here
1895 * when in protected or real mode we can safely disable the
1896 * FF and return immediately.
1897 */
1898 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1899 {
1900 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1901 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1902 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1904 return VINF_SUCCESS;
1905 }
1906
1907 /* If global pages are not supported, then all flushes are global. */
1908 if (!(cr4 & X86_CR4_PGE))
1909 fGlobal = true;
1910 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1911 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1912
1913 /*
1914 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1915 * This should be done before SyncCR3.
1916 */
1917 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1918 {
1919 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1920
1921 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1922 RTGCPHYS GCPhysCR3;
1923 switch (pVCpu->pgm.s.enmGuestMode)
1924 {
1925 case PGMMODE_PAE:
1926 case PGMMODE_PAE_NX:
1927 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1928 break;
1929 case PGMMODE_AMD64:
1930 case PGMMODE_AMD64_NX:
1931 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1932 break;
1933 default:
1934 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1935 break;
1936 }
1937
1938 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1939 {
1940 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1941 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1942 }
1943 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
1944 if ( rc == VINF_PGM_SYNC_CR3
1945 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
1946 {
1947 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
1948#ifdef IN_RING3
1949 rc = pgmPoolSyncCR3(pVCpu);
1950#else
1951 if (rc == VINF_PGM_SYNC_CR3)
1952 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1953 return VINF_PGM_SYNC_CR3;
1954#endif
1955 }
1956 AssertRCReturn(rc, rc);
1957 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1958 }
1959
1960 /*
1961 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1962 */
1963 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
1964 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1965 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
1966 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1967 if (rc == VINF_SUCCESS)
1968 {
1969 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
1970 {
1971 /* Go back to ring 3 if a pgm pool sync is again pending. */
1972 return VINF_PGM_SYNC_CR3;
1973 }
1974
1975 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1976 {
1977 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1978 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1979 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1980 }
1981
1982 /*
1983 * Check if we have a pending update of the CR3 monitoring.
1984 */
1985 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1986 {
1987 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1988 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1989 }
1990 }
1991
1992 /*
1993 * Now flush the CR3 (guest context).
1994 */
1995 if (rc == VINF_SUCCESS)
1996 PGM_INVL_VCPU_TLBS(pVCpu);
1997 return rc;
1998}
1999
2000
2001/**
2002 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2003 *
2004 * @returns VBox status code, with the following informational code for
2005 * VM scheduling.
2006 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2007 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2008 * (I.e. not in R3.)
2009 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2010 *
2011 * @param pVCpu VMCPU handle.
2012 * @param cr0 The new cr0.
2013 * @param cr4 The new cr4.
2014 * @param efer The new extended feature enable register.
2015 */
2016VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2017{
2018 PVM pVM = pVCpu->CTX_SUFF(pVM);
2019 PGMMODE enmGuestMode;
2020
2021 /*
2022 * Calc the new guest mode.
2023 */
2024 if (!(cr0 & X86_CR0_PE))
2025 enmGuestMode = PGMMODE_REAL;
2026 else if (!(cr0 & X86_CR0_PG))
2027 enmGuestMode = PGMMODE_PROTECTED;
2028 else if (!(cr4 & X86_CR4_PAE))
2029 {
2030 bool const fPse = !!(cr4 & X86_CR4_PSE);
2031 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2032 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2033 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2034 enmGuestMode = PGMMODE_32_BIT;
2035 }
2036 else if (!(efer & MSR_K6_EFER_LME))
2037 {
2038 if (!(efer & MSR_K6_EFER_NXE))
2039 enmGuestMode = PGMMODE_PAE;
2040 else
2041 enmGuestMode = PGMMODE_PAE_NX;
2042 }
2043 else
2044 {
2045 if (!(efer & MSR_K6_EFER_NXE))
2046 enmGuestMode = PGMMODE_AMD64;
2047 else
2048 enmGuestMode = PGMMODE_AMD64_NX;
2049 }
2050
2051 /*
2052 * Did it change?
2053 */
2054 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2055 return VINF_SUCCESS;
2056
2057 /* Flush the TLB */
2058 PGM_INVL_VCPU_TLBS(pVCpu);
2059
2060#ifdef IN_RING3
2061 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2062#else
2063 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2064 return VINF_PGM_CHANGE_MODE;
2065#endif
2066}
2067
2068
2069/**
2070 * Gets the current guest paging mode.
2071 *
2072 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2073 *
2074 * @returns The current paging mode.
2075 * @param pVCpu VMCPU handle.
2076 */
2077VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2078{
2079 return pVCpu->pgm.s.enmGuestMode;
2080}
2081
2082
2083/**
2084 * Gets the current shadow paging mode.
2085 *
2086 * @returns The current paging mode.
2087 * @param pVCpu VMCPU handle.
2088 */
2089VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2090{
2091 return pVCpu->pgm.s.enmShadowMode;
2092}
2093
2094/**
2095 * Gets the current host paging mode.
2096 *
2097 * @returns The current paging mode.
2098 * @param pVM The VM handle.
2099 */
2100VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2101{
2102 switch (pVM->pgm.s.enmHostMode)
2103 {
2104 case SUPPAGINGMODE_32_BIT:
2105 case SUPPAGINGMODE_32_BIT_GLOBAL:
2106 return PGMMODE_32_BIT;
2107
2108 case SUPPAGINGMODE_PAE:
2109 case SUPPAGINGMODE_PAE_GLOBAL:
2110 return PGMMODE_PAE;
2111
2112 case SUPPAGINGMODE_PAE_NX:
2113 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2114 return PGMMODE_PAE_NX;
2115
2116 case SUPPAGINGMODE_AMD64:
2117 case SUPPAGINGMODE_AMD64_GLOBAL:
2118 return PGMMODE_AMD64;
2119
2120 case SUPPAGINGMODE_AMD64_NX:
2121 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2122 return PGMMODE_AMD64_NX;
2123
2124 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2125 }
2126
2127 return PGMMODE_INVALID;
2128}
2129
2130
2131/**
2132 * Get mode name.
2133 *
2134 * @returns read-only name string.
2135 * @param enmMode The mode which name is desired.
2136 */
2137VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2138{
2139 switch (enmMode)
2140 {
2141 case PGMMODE_REAL: return "Real";
2142 case PGMMODE_PROTECTED: return "Protected";
2143 case PGMMODE_32_BIT: return "32-bit";
2144 case PGMMODE_PAE: return "PAE";
2145 case PGMMODE_PAE_NX: return "PAE+NX";
2146 case PGMMODE_AMD64: return "AMD64";
2147 case PGMMODE_AMD64_NX: return "AMD64+NX";
2148 case PGMMODE_NESTED: return "Nested";
2149 case PGMMODE_EPT: return "EPT";
2150 default: return "unknown mode value";
2151 }
2152}
2153
2154
2155
2156/**
2157 * Notification from CPUM that the EFER.NXE bit has changed.
2158 *
2159 * @param pVCpu The virtual CPU for which EFER changed.
2160 * @param fNxe The new NXE state.
2161 */
2162VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2163{
2164 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2165 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2166 if (fNxe)
2167 {
2168 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2169 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2170 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2171 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2172 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2173 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2174 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2175 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2176 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2177 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2178 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2179 }
2180 else
2181 {
2182 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2183 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2184 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2185 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2186 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2187 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2188 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2189 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2190 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2191 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2192 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2193 }
2194}
2195
2196
2197/**
2198 * Check if any pgm pool pages are marked dirty (not monitored)
2199 *
2200 * @returns bool locked/not locked
2201 * @param pVM The VM to operate on.
2202 */
2203VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2204{
2205 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2206}
2207
2208/**
2209 * Check if the PGM lock is currently taken.
2210 *
2211 * @returns bool locked/not locked
2212 * @param pVM The VM to operate on.
2213 */
2214VMMDECL(bool) PGMIsLocked(PVM pVM)
2215{
2216 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2217}
2218
2219
2220/**
2221 * Check if this VCPU currently owns the PGM lock.
2222 *
2223 * @returns bool owner/not owner
2224 * @param pVM The VM to operate on.
2225 */
2226VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2227{
2228 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2229}
2230
2231
2232/**
2233 * Enable or disable large page usage
2234 *
2235 * @param pVM The VM to operate on.
2236 * @param fUseLargePages Use/not use large pages
2237 */
2238VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2239{
2240 pVM->fUseLargePages = fUseLargePages;
2241}
2242
2243/**
2244 * Acquire the PGM lock.
2245 *
2246 * @returns VBox status code
2247 * @param pVM The VM to operate on.
2248 */
2249int pgmLock(PVM pVM)
2250{
2251 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2252#if defined(IN_RC) || defined(IN_RING0)
2253 if (rc == VERR_SEM_BUSY)
2254 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2255#endif
2256 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2257 return rc;
2258}
2259
2260
2261/**
2262 * Release the PGM lock.
2263 *
2264 * @returns VBox status code
2265 * @param pVM The VM to operate on.
2266 */
2267void pgmUnlock(PVM pVM)
2268{
2269 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2270}
2271
2272#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2273
2274/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
2275DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2276{
2277 pgmLock(pVM);
2278
2279 /*
2280 * Convert it to a writable page and it on to PGMDynMapHCPage.
2281 */
2282 int rc;
2283 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2284 if (RT_LIKELY(pPage))
2285 {
2286 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2287 if (RT_SUCCESS(rc))
2288 {
2289 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2290#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2291 rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2292#else
2293 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2294#endif
2295 }
2296 else
2297 AssertRC(rc);
2298 }
2299 else
2300 {
2301 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2302 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2303 }
2304
2305 pgmUnlock(pVM);
2306 return rc;
2307}
2308
2309/**
2310 * Temporarily maps one guest page specified by GC physical address.
2311 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2312 *
2313 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2314 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2315 *
2316 * @returns VBox status.
2317 * @param pVM VM handle.
2318 * @param GCPhys GC Physical address of the page.
2319 * @param ppv Where to store the address of the mapping.
2320 */
2321VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2322{
2323 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2324 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
2325}
2326
2327
2328/**
2329 * Temporarily maps one guest page specified by unaligned GC physical address.
2330 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2331 *
2332 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2333 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2334 *
2335 * The caller is aware that only the speicifed page is mapped and that really bad things
2336 * will happen if writing beyond the page!
2337 *
2338 * @returns VBox status.
2339 * @param pVM VM handle.
2340 * @param GCPhys GC Physical address within the page to be mapped.
2341 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2342 */
2343VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2344{
2345 void *pv;
2346 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
2347 if (RT_SUCCESS(rc))
2348 {
2349 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
2350 return VINF_SUCCESS;
2351 }
2352 return rc;
2353}
2354
2355# ifdef IN_RC
2356
2357/**
2358 * Temporarily maps one host page specified by HC physical address.
2359 *
2360 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2361 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2362 *
2363 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2364 * @param pVM VM handle.
2365 * @param HCPhys HC Physical address of the page.
2366 * @param ppv Where to store the address of the mapping. This is the
2367 * address of the PAGE not the exact address corresponding
2368 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2369 * page offset.
2370 */
2371VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2372{
2373 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2374
2375 /*
2376 * Check the cache.
2377 */
2378 register unsigned iCache;
2379 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2380 {
2381 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2382 {
2383 { 0, 9, 10, 11, 12, 13, 14, 15},
2384 { 0, 1, 10, 11, 12, 13, 14, 15},
2385 { 0, 1, 2, 11, 12, 13, 14, 15},
2386 { 0, 1, 2, 3, 12, 13, 14, 15},
2387 { 0, 1, 2, 3, 4, 13, 14, 15},
2388 { 0, 1, 2, 3, 4, 5, 14, 15},
2389 { 0, 1, 2, 3, 4, 5, 6, 15},
2390 { 0, 1, 2, 3, 4, 5, 6, 7},
2391 { 8, 1, 2, 3, 4, 5, 6, 7},
2392 { 8, 9, 2, 3, 4, 5, 6, 7},
2393 { 8, 9, 10, 3, 4, 5, 6, 7},
2394 { 8, 9, 10, 11, 4, 5, 6, 7},
2395 { 8, 9, 10, 11, 12, 5, 6, 7},
2396 { 8, 9, 10, 11, 12, 13, 6, 7},
2397 { 8, 9, 10, 11, 12, 13, 14, 7},
2398 { 8, 9, 10, 11, 12, 13, 14, 15},
2399 };
2400 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2401 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2402
2403 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2404 {
2405 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2406
2407 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2408 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2409 {
2410 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2411 *ppv = pv;
2412 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheHits);
2413 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2414 return VINF_SUCCESS;
2415 }
2416 LogFlow(("Out of sync entry %d\n", iPage));
2417 }
2418 }
2419 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2420 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2421 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCDynMapCacheMisses);
2422
2423 /*
2424 * Update the page tables.
2425 */
2426 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2427 unsigned i;
2428 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2429 {
2430 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2431 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2432 break;
2433 iPage++;
2434 }
2435 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2436
2437 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2438 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2439 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2440 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2441
2442 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2443 *ppv = pv;
2444 ASMInvalidatePage(pv);
2445 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2446 return VINF_SUCCESS;
2447}
2448
2449
2450/**
2451 * Temporarily lock a dynamic page to prevent it from being reused.
2452 *
2453 * @param pVM VM handle.
2454 * @param GCPage GC address of page
2455 */
2456VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2457{
2458 unsigned iPage;
2459
2460 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2461 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2462 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2463 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2464}
2465
2466
2467/**
2468 * Unlock a dynamic page
2469 *
2470 * @param pVM VM handle.
2471 * @param GCPage GC address of page
2472 */
2473VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2474{
2475 unsigned iPage;
2476
2477 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2478 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2479
2480 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2481 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2482 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2483 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2484 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2485}
2486
2487
2488# ifdef VBOX_STRICT
2489/**
2490 * Check for lock leaks.
2491 *
2492 * @param pVM VM handle.
2493 */
2494VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2495{
2496 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
2497 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2498}
2499# endif /* VBOX_STRICT */
2500
2501# endif /* IN_RC */
2502#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2503
2504#if !defined(IN_R0) || defined(LOG_ENABLED)
2505
2506/** Format handler for PGMPAGE.
2507 * @copydoc FNRTSTRFORMATTYPE */
2508static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2509 const char *pszType, void const *pvValue,
2510 int cchWidth, int cchPrecision, unsigned fFlags,
2511 void *pvUser)
2512{
2513 size_t cch;
2514 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2515 if (VALID_PTR(pPage))
2516 {
2517 char szTmp[64+80];
2518
2519 cch = 0;
2520
2521 /* The single char state stuff. */
2522 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2523 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2524
2525#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2526 if (IS_PART_INCLUDED(5))
2527 {
2528 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2529 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2530 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2531 }
2532
2533 /* The type. */
2534 if (IS_PART_INCLUDED(4))
2535 {
2536 szTmp[cch++] = ':';
2537 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2538 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2539 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2540 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2541 }
2542
2543 /* The numbers. */
2544 if (IS_PART_INCLUDED(3))
2545 {
2546 szTmp[cch++] = ':';
2547 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2548 }
2549
2550 if (IS_PART_INCLUDED(2))
2551 {
2552 szTmp[cch++] = ':';
2553 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2554 }
2555
2556 if (IS_PART_INCLUDED(6))
2557 {
2558 szTmp[cch++] = ':';
2559 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2560 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2561 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2562 }
2563#undef IS_PART_INCLUDED
2564
2565 cch = pfnOutput(pvArgOutput, szTmp, cch);
2566 }
2567 else
2568 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2569 return cch;
2570}
2571
2572
2573/** Format handler for PGMRAMRANGE.
2574 * @copydoc FNRTSTRFORMATTYPE */
2575static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2576 const char *pszType, void const *pvValue,
2577 int cchWidth, int cchPrecision, unsigned fFlags,
2578 void *pvUser)
2579{
2580 size_t cch;
2581 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2582 if (VALID_PTR(pRam))
2583 {
2584 char szTmp[80];
2585 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2586 cch = pfnOutput(pvArgOutput, szTmp, cch);
2587 }
2588 else
2589 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2590 return cch;
2591}
2592
2593/** Format type andlers to be registered/deregistered. */
2594static const struct
2595{
2596 char szType[24];
2597 PFNRTSTRFORMATTYPE pfnHandler;
2598} g_aPgmFormatTypes[] =
2599{
2600 { "pgmpage", pgmFormatTypeHandlerPage },
2601 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2602};
2603
2604#endif /* !IN_R0 || LOG_ENABLED */
2605
2606/**
2607 * Registers the global string format types.
2608 *
2609 * This should be called at module load time or in some other manner that ensure
2610 * that it's called exactly one time.
2611 *
2612 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2613 */
2614VMMDECL(int) PGMRegisterStringFormatTypes(void)
2615{
2616#if !defined(IN_R0) || defined(LOG_ENABLED)
2617 int rc = VINF_SUCCESS;
2618 unsigned i;
2619 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2620 {
2621 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2622# ifdef IN_RING0
2623 if (rc == VERR_ALREADY_EXISTS)
2624 {
2625 /* in case of cleanup failure in ring-0 */
2626 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2627 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2628 }
2629# endif
2630 }
2631 if (RT_FAILURE(rc))
2632 while (i-- > 0)
2633 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2634
2635 return rc;
2636#else
2637 return VINF_SUCCESS;
2638#endif
2639}
2640
2641
2642/**
2643 * Deregisters the global string format types.
2644 *
2645 * This should be called at module unload time or in some other manner that
2646 * ensure that it's called exactly one time.
2647 */
2648VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2649{
2650#if !defined(IN_R0) || defined(LOG_ENABLED)
2651 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2652 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2653#endif
2654}
2655
2656#ifdef VBOX_STRICT
2657
2658/**
2659 * Asserts that there are no mapping conflicts.
2660 *
2661 * @returns Number of conflicts.
2662 * @param pVM The VM Handle.
2663 */
2664VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2665{
2666 unsigned cErrors = 0;
2667
2668 /* Only applies to raw mode -> 1 VPCU */
2669 Assert(pVM->cCpus == 1);
2670 PVMCPU pVCpu = &pVM->aCpus[0];
2671
2672 /*
2673 * Check for mapping conflicts.
2674 */
2675 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2676 pMapping;
2677 pMapping = pMapping->CTX_SUFF(pNext))
2678 {
2679 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2680 for (RTGCPTR GCPtr = pMapping->GCPtr;
2681 GCPtr <= pMapping->GCPtrLast;
2682 GCPtr += PAGE_SIZE)
2683 {
2684 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2685 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2686 {
2687 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2688 cErrors++;
2689 break;
2690 }
2691 }
2692 }
2693
2694 return cErrors;
2695}
2696
2697
2698/**
2699 * Asserts that everything related to the guest CR3 is correctly shadowed.
2700 *
2701 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2702 * and assert the correctness of the guest CR3 mapping before asserting that the
2703 * shadow page tables is in sync with the guest page tables.
2704 *
2705 * @returns Number of conflicts.
2706 * @param pVM The VM Handle.
2707 * @param pVCpu VMCPU handle.
2708 * @param cr3 The current guest CR3 register value.
2709 * @param cr4 The current guest CR4 register value.
2710 */
2711VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2712{
2713 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2714 pgmLock(pVM);
2715 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2716 pgmUnlock(pVM);
2717 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2718 return cErrors;
2719}
2720
2721#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette