VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 30684

Last change on this file since 30684 was 30326, checked in by vboxsync, 15 years ago

PGM: PGMShwModifyPage/PGMShwSetPage -> PGMShwMakePageWritable, PGMShwMakePageReadonly & PGMShwMakePageNotPresent and made the low level worker make the page writable before setting the X86_PTE_RW bit. PGMR3PhysTlbGCPhys2Ptr should make write monitored pages writable (?). PGMDynMapGCPage and PGMDynMapGCPageOff must make the pages writable and take the PGM lock.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 84.8 KB
Line 
1/* $Id: PGMAll.cpp 30326 2010-06-21 12:35:33Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include <VBox/hwaccm.h>
35#include <VBox/hwacc_vmx.h>
36#include "../PGMInternal.h"
37#include <VBox/vm.h>
38#include "../PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72
73/*
74 * Shadow - 32-bit mode
75 */
76#define PGM_SHW_TYPE PGM_TYPE_32BIT
77#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
78#include "PGMAllShw.h"
79
80/* Guest - real mode */
81#define PGM_GST_TYPE PGM_TYPE_REAL
82#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
83#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
84#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
85#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
86#include "PGMGstDefs.h"
87#include "PGMAllGst.h"
88#include "PGMAllBth.h"
89#undef BTH_PGMPOOLKIND_PT_FOR_PT
90#undef BTH_PGMPOOLKIND_ROOT
91#undef PGM_BTH_NAME
92#undef PGM_GST_TYPE
93#undef PGM_GST_NAME
94
95/* Guest - protected mode */
96#define PGM_GST_TYPE PGM_TYPE_PROT
97#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
98#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
99#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
100#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
101#include "PGMGstDefs.h"
102#include "PGMAllGst.h"
103#include "PGMAllBth.h"
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef BTH_PGMPOOLKIND_ROOT
106#undef PGM_BTH_NAME
107#undef PGM_GST_TYPE
108#undef PGM_GST_NAME
109
110/* Guest - 32-bit mode */
111#define PGM_GST_TYPE PGM_TYPE_32BIT
112#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
113#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
114#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
115#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_BIG
121#undef BTH_PGMPOOLKIND_PT_FOR_PT
122#undef BTH_PGMPOOLKIND_ROOT
123#undef PGM_BTH_NAME
124#undef PGM_GST_TYPE
125#undef PGM_GST_NAME
126
127#undef PGM_SHW_TYPE
128#undef PGM_SHW_NAME
129
130
131/*
132 * Shadow - PAE mode
133 */
134#define PGM_SHW_TYPE PGM_TYPE_PAE
135#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
136#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
137#include "PGMAllShw.h"
138
139/* Guest - real mode */
140#define PGM_GST_TYPE PGM_TYPE_REAL
141#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
145#include "PGMGstDefs.h"
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef BTH_PGMPOOLKIND_ROOT
149#undef PGM_BTH_NAME
150#undef PGM_GST_TYPE
151#undef PGM_GST_NAME
152
153/* Guest - protected mode */
154#define PGM_GST_TYPE PGM_TYPE_PROT
155#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
156#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
157#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
158#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
159#include "PGMGstDefs.h"
160#include "PGMAllBth.h"
161#undef BTH_PGMPOOLKIND_PT_FOR_PT
162#undef BTH_PGMPOOLKIND_ROOT
163#undef PGM_BTH_NAME
164#undef PGM_GST_TYPE
165#undef PGM_GST_NAME
166
167/* Guest - 32-bit mode */
168#define PGM_GST_TYPE PGM_TYPE_32BIT
169#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
170#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
171#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
172#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
173#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
174#include "PGMGstDefs.h"
175#include "PGMAllBth.h"
176#undef BTH_PGMPOOLKIND_PT_FOR_BIG
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183
184/* Guest - PAE mode */
185#define PGM_GST_TYPE PGM_TYPE_PAE
186#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
187#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
188#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
189#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
190#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
191#include "PGMGstDefs.h"
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
206/*
207 * Shadow - AMD64 mode
208 */
209# define PGM_SHW_TYPE PGM_TYPE_AMD64
210# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
211# include "PGMAllShw.h"
212
213/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
214# define PGM_GST_TYPE PGM_TYPE_PROT
215# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219# include "PGMGstDefs.h"
220# include "PGMAllBth.h"
221# undef BTH_PGMPOOLKIND_PT_FOR_PT
222# undef BTH_PGMPOOLKIND_ROOT
223# undef PGM_BTH_NAME
224# undef PGM_GST_TYPE
225# undef PGM_GST_NAME
226
227# ifdef VBOX_WITH_64_BITS_GUESTS
228/* Guest - AMD64 mode */
229# define PGM_GST_TYPE PGM_TYPE_AMD64
230# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
231# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
232# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
233# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
234# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
235# include "PGMGstDefs.h"
236# include "PGMAllGst.h"
237# include "PGMAllBth.h"
238# undef BTH_PGMPOOLKIND_PT_FOR_BIG
239# undef BTH_PGMPOOLKIND_PT_FOR_PT
240# undef BTH_PGMPOOLKIND_ROOT
241# undef PGM_BTH_NAME
242# undef PGM_GST_TYPE
243# undef PGM_GST_NAME
244# endif /* VBOX_WITH_64_BITS_GUESTS */
245
246# undef PGM_SHW_TYPE
247# undef PGM_SHW_NAME
248
249
250/*
251 * Shadow - Nested paging mode
252 */
253# define PGM_SHW_TYPE PGM_TYPE_NESTED
254# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
255# include "PGMAllShw.h"
256
257/* Guest - real mode */
258# define PGM_GST_TYPE PGM_TYPE_REAL
259# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
260# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
261# include "PGMGstDefs.h"
262# include "PGMAllBth.h"
263# undef PGM_BTH_NAME
264# undef PGM_GST_TYPE
265# undef PGM_GST_NAME
266
267/* Guest - protected mode */
268# define PGM_GST_TYPE PGM_TYPE_PROT
269# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
270# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
271# include "PGMGstDefs.h"
272# include "PGMAllBth.h"
273# undef PGM_BTH_NAME
274# undef PGM_GST_TYPE
275# undef PGM_GST_NAME
276
277/* Guest - 32-bit mode */
278# define PGM_GST_TYPE PGM_TYPE_32BIT
279# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
280# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
281# include "PGMGstDefs.h"
282# include "PGMAllBth.h"
283# undef PGM_BTH_NAME
284# undef PGM_GST_TYPE
285# undef PGM_GST_NAME
286
287/* Guest - PAE mode */
288# define PGM_GST_TYPE PGM_TYPE_PAE
289# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
290# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
291# include "PGMGstDefs.h"
292# include "PGMAllBth.h"
293# undef PGM_BTH_NAME
294# undef PGM_GST_TYPE
295# undef PGM_GST_NAME
296
297# ifdef VBOX_WITH_64_BITS_GUESTS
298/* Guest - AMD64 mode */
299# define PGM_GST_TYPE PGM_TYPE_AMD64
300# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
301# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
302# include "PGMGstDefs.h"
303# include "PGMAllBth.h"
304# undef PGM_BTH_NAME
305# undef PGM_GST_TYPE
306# undef PGM_GST_NAME
307# endif /* VBOX_WITH_64_BITS_GUESTS */
308
309# undef PGM_SHW_TYPE
310# undef PGM_SHW_NAME
311
312
313/*
314 * Shadow - EPT
315 */
316# define PGM_SHW_TYPE PGM_TYPE_EPT
317# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
318# include "PGMAllShw.h"
319
320/* Guest - real mode */
321# define PGM_GST_TYPE PGM_TYPE_REAL
322# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
323# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
324# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
325# include "PGMGstDefs.h"
326# include "PGMAllBth.h"
327# undef BTH_PGMPOOLKIND_PT_FOR_PT
328# undef PGM_BTH_NAME
329# undef PGM_GST_TYPE
330# undef PGM_GST_NAME
331
332/* Guest - protected mode */
333# define PGM_GST_TYPE PGM_TYPE_PROT
334# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
335# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
336# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
337# include "PGMGstDefs.h"
338# include "PGMAllBth.h"
339# undef BTH_PGMPOOLKIND_PT_FOR_PT
340# undef PGM_BTH_NAME
341# undef PGM_GST_TYPE
342# undef PGM_GST_NAME
343
344/* Guest - 32-bit mode */
345# define PGM_GST_TYPE PGM_TYPE_32BIT
346# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
347# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
348# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
349# include "PGMGstDefs.h"
350# include "PGMAllBth.h"
351# undef BTH_PGMPOOLKIND_PT_FOR_PT
352# undef PGM_BTH_NAME
353# undef PGM_GST_TYPE
354# undef PGM_GST_NAME
355
356/* Guest - PAE mode */
357# define PGM_GST_TYPE PGM_TYPE_PAE
358# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
359# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
360# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
361# include "PGMGstDefs.h"
362# include "PGMAllBth.h"
363# undef BTH_PGMPOOLKIND_PT_FOR_PT
364# undef PGM_BTH_NAME
365# undef PGM_GST_TYPE
366# undef PGM_GST_NAME
367
368# ifdef VBOX_WITH_64_BITS_GUESTS
369/* Guest - AMD64 mode */
370# define PGM_GST_TYPE PGM_TYPE_AMD64
371# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
372# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
373# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
374# include "PGMGstDefs.h"
375# include "PGMAllBth.h"
376# undef BTH_PGMPOOLKIND_PT_FOR_PT
377# undef PGM_BTH_NAME
378# undef PGM_GST_TYPE
379# undef PGM_GST_NAME
380# endif /* VBOX_WITH_64_BITS_GUESTS */
381
382# undef PGM_SHW_TYPE
383# undef PGM_SHW_NAME
384
385#endif /* !IN_RC */
386
387
388#ifndef IN_RING3
389/**
390 * #PF Handler.
391 *
392 * @returns VBox status code (appropriate for trap handling and GC return).
393 * @param pVCpu VMCPU handle.
394 * @param uErr The trap error code.
395 * @param pRegFrame Trap register frame.
396 * @param pvFault The fault address.
397 */
398VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
399{
400 PVM pVM = pVCpu->CTX_SUFF(pVM);
401
402 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
403 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
404 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
405
406
407#ifdef VBOX_WITH_STATISTICS
408 /*
409 * Error code stats.
410 */
411 if (uErr & X86_TRAP_PF_US)
412 {
413 if (!(uErr & X86_TRAP_PF_P))
414 {
415 if (uErr & X86_TRAP_PF_RW)
416 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
417 else
418 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
419 }
420 else if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
422 else if (uErr & X86_TRAP_PF_RSVD)
423 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
424 else if (uErr & X86_TRAP_PF_ID)
425 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
426 else
427 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
428 }
429 else
430 { /* Supervisor */
431 if (!(uErr & X86_TRAP_PF_P))
432 {
433 if (uErr & X86_TRAP_PF_RW)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
435 else
436 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
437 }
438 else if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
440 else if (uErr & X86_TRAP_PF_ID)
441 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
442 else if (uErr & X86_TRAP_PF_RSVD)
443 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
444 }
445#endif /* VBOX_WITH_STATISTICS */
446
447 /*
448 * Call the worker.
449 */
450 bool fLockTaken = false;
451 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
452 if (fLockTaken)
453 {
454 Assert(PGMIsLockOwner(pVM));
455 pgmUnlock(pVM);
456 }
457 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
458 rc = VINF_SUCCESS;
459
460# ifdef IN_RING0
461 /* Note: hack alert for difficult to reproduce problem. */
462 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
463 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
464 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
465 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
466 {
467 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
468 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
469 rc = VINF_SUCCESS;
470 }
471# endif
472
473 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
474 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
475 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
476 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
477 return rc;
478}
479#endif /* !IN_RING3 */
480
481
482/**
483 * Prefetch a page
484 *
485 * Typically used to sync commonly used pages before entering raw mode
486 * after a CR3 reload.
487 *
488 * @returns VBox status code suitable for scheduling.
489 * @retval VINF_SUCCESS on success.
490 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
491 * @param pVCpu VMCPU handle.
492 * @param GCPtrPage Page to invalidate.
493 */
494VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
495{
496 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
497 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
498 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
499 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
500 return rc;
501}
502
503
504/**
505 * Gets the mapping corresponding to the specified address (if any).
506 *
507 * @returns Pointer to the mapping.
508 * @returns NULL if not
509 *
510 * @param pVM The virtual machine.
511 * @param GCPtr The guest context pointer.
512 */
513PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
514{
515 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
516 while (pMapping)
517 {
518 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
519 break;
520 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
521 return pMapping;
522 pMapping = pMapping->CTX_SUFF(pNext);
523 }
524 return NULL;
525}
526
527
528/**
529 * Verifies a range of pages for read or write access
530 *
531 * Only checks the guest's page tables
532 *
533 * @returns VBox status code.
534 * @param pVCpu VMCPU handle.
535 * @param Addr Guest virtual address to check
536 * @param cbSize Access size
537 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
538 * @remarks Current not in use.
539 */
540VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
541{
542 /*
543 * Validate input.
544 */
545 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
546 {
547 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
548 return VERR_INVALID_PARAMETER;
549 }
550
551 uint64_t fPage;
552 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
553 if (RT_FAILURE(rc))
554 {
555 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
556 return VINF_EM_RAW_GUEST_TRAP;
557 }
558
559 /*
560 * Check if the access would cause a page fault
561 *
562 * Note that hypervisor page directories are not present in the guest's tables, so this check
563 * is sufficient.
564 */
565 bool fWrite = !!(fAccess & X86_PTE_RW);
566 bool fUser = !!(fAccess & X86_PTE_US);
567 if ( !(fPage & X86_PTE_P)
568 || (fWrite && !(fPage & X86_PTE_RW))
569 || (fUser && !(fPage & X86_PTE_US)) )
570 {
571 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
572 return VINF_EM_RAW_GUEST_TRAP;
573 }
574 if ( RT_SUCCESS(rc)
575 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
576 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
577 return rc;
578}
579
580
581/**
582 * Verifies a range of pages for read or write access
583 *
584 * Supports handling of pages marked for dirty bit tracking and CSAM
585 *
586 * @returns VBox status code.
587 * @param pVCpu VMCPU handle.
588 * @param Addr Guest virtual address to check
589 * @param cbSize Access size
590 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
591 */
592VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
593{
594 PVM pVM = pVCpu->CTX_SUFF(pVM);
595
596 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
597
598 /*
599 * Get going.
600 */
601 uint64_t fPageGst;
602 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
603 if (RT_FAILURE(rc))
604 {
605 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
606 return VINF_EM_RAW_GUEST_TRAP;
607 }
608
609 /*
610 * Check if the access would cause a page fault
611 *
612 * Note that hypervisor page directories are not present in the guest's tables, so this check
613 * is sufficient.
614 */
615 const bool fWrite = !!(fAccess & X86_PTE_RW);
616 const bool fUser = !!(fAccess & X86_PTE_US);
617 if ( !(fPageGst & X86_PTE_P)
618 || (fWrite && !(fPageGst & X86_PTE_RW))
619 || (fUser && !(fPageGst & X86_PTE_US)) )
620 {
621 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
622 return VINF_EM_RAW_GUEST_TRAP;
623 }
624
625 if (!HWACCMIsNestedPagingActive(pVM))
626 {
627 /*
628 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
629 */
630 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
631 if ( rc == VERR_PAGE_NOT_PRESENT
632 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
633 {
634 /*
635 * Page is not present in our page tables.
636 * Try to sync it!
637 */
638 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
639 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
640 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
641 if (rc != VINF_SUCCESS)
642 return rc;
643 }
644 else
645 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
646 }
647
648#if 0 /* def VBOX_STRICT; triggers too often now */
649 /*
650 * This check is a bit paranoid, but useful.
651 */
652 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
653 uint64_t fPageShw;
654 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
655 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
656 || (fWrite && !(fPageShw & X86_PTE_RW))
657 || (fUser && !(fPageShw & X86_PTE_US)) )
658 {
659 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
660 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
661 return VINF_EM_RAW_GUEST_TRAP;
662 }
663#endif
664
665 if ( RT_SUCCESS(rc)
666 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
667 || Addr + cbSize < Addr))
668 {
669 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
670 for (;;)
671 {
672 Addr += PAGE_SIZE;
673 if (cbSize > PAGE_SIZE)
674 cbSize -= PAGE_SIZE;
675 else
676 cbSize = 1;
677 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
678 if (rc != VINF_SUCCESS)
679 break;
680 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
681 break;
682 }
683 }
684 return rc;
685}
686
687
688/**
689 * Emulation of the invlpg instruction (HC only actually).
690 *
691 * @returns VBox status code, special care required.
692 * @retval VINF_PGM_SYNC_CR3 - handled.
693 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
694 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
695 *
696 * @param pVCpu VMCPU handle.
697 * @param GCPtrPage Page to invalidate.
698 *
699 * @remark ASSUMES the page table entry or page directory is valid. Fairly
700 * safe, but there could be edge cases!
701 *
702 * @todo Flush page or page directory only if necessary!
703 */
704VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
705{
706 PVM pVM = pVCpu->CTX_SUFF(pVM);
707 int rc;
708 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
709
710#ifndef IN_RING3
711 /*
712 * Notify the recompiler so it can record this instruction.
713 */
714 REMNotifyInvalidatePage(pVM, GCPtrPage);
715#endif /* !IN_RING3 */
716
717
718#ifdef IN_RC
719 /*
720 * Check for conflicts and pending CR3 monitoring updates.
721 */
722 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
723 {
724 if ( pgmGetMapping(pVM, GCPtrPage)
725 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
726 {
727 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
728 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
729 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
730 return VINF_PGM_SYNC_CR3;
731 }
732
733 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
734 {
735 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
736 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
737 return VINF_EM_RAW_EMULATE_INSTR;
738 }
739 }
740#endif /* IN_RC */
741
742 /*
743 * Call paging mode specific worker.
744 */
745 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
746 pgmLock(pVM);
747 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
748 pgmUnlock(pVM);
749 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
750
751 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
752 PGM_INVL_PG(pVCpu, GCPtrPage);
753
754#ifdef IN_RING3
755 /*
756 * Check if we have a pending update of the CR3 monitoring.
757 */
758 if ( RT_SUCCESS(rc)
759 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
760 {
761 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
762 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
763 }
764
765 /*
766 * Inform CSAM about the flush
767 *
768 * Note: This is to check if monitored pages have been changed; when we implement
769 * callbacks for virtual handlers, this is no longer required.
770 */
771 CSAMR3FlushPage(pVM, GCPtrPage);
772#endif /* IN_RING3 */
773
774 /* Ignore all irrelevant error codes. */
775 if ( rc == VERR_PAGE_NOT_PRESENT
776 || rc == VERR_PAGE_TABLE_NOT_PRESENT
777 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
778 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
779 rc = VINF_SUCCESS;
780
781 return rc;
782}
783
784
785/**
786 * Executes an instruction using the interpreter.
787 *
788 * @returns VBox status code (appropriate for trap handling and GC return).
789 * @param pVM VM handle.
790 * @param pVCpu VMCPU handle.
791 * @param pRegFrame Register frame.
792 * @param pvFault Fault address.
793 */
794VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
795{
796 uint32_t cb;
797 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
798 if (rc == VERR_EM_INTERPRETER)
799 rc = VINF_EM_RAW_EMULATE_INSTR;
800 if (rc != VINF_SUCCESS)
801 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
802 return rc;
803}
804
805
806/**
807 * Gets effective page information (from the VMM page directory).
808 *
809 * @returns VBox status.
810 * @param pVCpu VMCPU handle.
811 * @param GCPtr Guest Context virtual address of the page.
812 * @param pfFlags Where to store the flags. These are X86_PTE_*.
813 * @param pHCPhys Where to store the HC physical address of the page.
814 * This is page aligned.
815 * @remark You should use PGMMapGetPage() for pages in a mapping.
816 */
817VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
818{
819 pgmLock(pVCpu->CTX_SUFF(pVM));
820 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
821 pgmUnlock(pVCpu->CTX_SUFF(pVM));
822 return rc;
823}
824
825
826/**
827 * Modify page flags for a range of pages in the shadow context.
828 *
829 * The existing flags are ANDed with the fMask and ORed with the fFlags.
830 *
831 * @returns VBox status code.
832 * @param pVCpu VMCPU handle.
833 * @param GCPtr Virtual address of the first page in the range.
834 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
835 * @param fMask The AND mask - page flags X86_PTE_*.
836 * Be very CAREFUL when ~'ing constants which could be 32-bit!
837 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
838 * @remark You must use PGMMapModifyPage() for pages in a mapping.
839 */
840DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
841{
842 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
843 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
844
845 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
846
847 PVM pVM = pVCpu->CTX_SUFF(pVM);
848 pgmLock(pVM);
849 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
850 pgmUnlock(pVM);
851 return rc;
852}
853
854
855/**
856 * Changing the page flags for a single page in the shadow page tables so as to
857 * make it read-only.
858 *
859 * @returns VBox status code.
860 * @param pVCpu VMCPU handle.
861 * @param GCPtr Virtual address of the first page in the range.
862 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
863 */
864VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
865{
866 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
867}
868
869
870/**
871 * Changing the page flags for a single page in the shadow page tables so as to
872 * make it writable.
873 *
874 * The call must know with 101% certainty that the guest page tables maps this
875 * as writable too. This function will deal shared, zero and write monitored
876 * pages.
877 *
878 * @returns VBox status code.
879 * @param pVCpu VMCPU handle.
880 * @param GCPtr Virtual address of the first page in the range.
881 * @param fMmio2 Set if it is an MMIO2 page.
882 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
883 */
884VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
885{
886 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
887}
888
889
890/**
891 * Changing the page flags for a single page in the shadow page tables so as to
892 * make it not present.
893 *
894 * @returns VBox status code.
895 * @param pVCpu VMCPU handle.
896 * @param GCPtr Virtual address of the first page in the range.
897 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
898 */
899VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
900{
901 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
902}
903
904
905/**
906 * Gets the shadow page directory for the specified address, PAE.
907 *
908 * @returns Pointer to the shadow PD.
909 * @param pVCpu The VMCPU handle.
910 * @param GCPtr The address.
911 * @param pGstPdpe Guest PDPT entry
912 * @param ppPD Receives address of page directory
913 */
914int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
915{
916 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
917 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
918 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
919 PVM pVM = pVCpu->CTX_SUFF(pVM);
920 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
921 PPGMPOOLPAGE pShwPage;
922 int rc;
923
924 Assert(PGMIsLockOwner(pVM));
925
926 /* Allocate page directory if not present. */
927 if ( !pPdpe->n.u1Present
928 && !(pPdpe->u & X86_PDPE_PG_MASK))
929 {
930 RTGCPTR64 GCPdPt;
931 PGMPOOLKIND enmKind;
932
933# if defined(IN_RC)
934 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
935 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
936# endif
937
938 if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu))
939 {
940 /* AMD-V nested paging or real/protected mode without paging */
941 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
942 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
943 }
944 else
945 {
946 Assert(pGstPdpe);
947
948 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
949 {
950 if (!pGstPdpe->n.u1Present)
951 {
952 /* PD not present; guest must reload CR3 to change it.
953 * No need to monitor anything in this case.
954 */
955 Assert(!HWACCMIsEnabled(pVM));
956
957 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
958 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
959 pGstPdpe->n.u1Present = 1;
960 }
961 else
962 {
963 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
964 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
965 }
966 }
967 else
968 {
969 GCPdPt = CPUMGetGuestCR3(pVCpu);
970 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
971 }
972 }
973
974 /* Create a reference back to the PDPT by using the index in its shadow page. */
975 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
976 AssertRCReturn(rc, rc);
977
978 /* The PD was cached or created; hook it up now. */
979 pPdpe->u |= pShwPage->Core.Key
980 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
981
982# if defined(IN_RC)
983 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
984 * non-present PDPT will continue to cause page faults.
985 */
986 ASMReloadCR3();
987 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
988# endif
989 }
990 else
991 {
992 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
993 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
994 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
995
996 pgmPoolCacheUsed(pPool, pShwPage);
997 }
998 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
999 return VINF_SUCCESS;
1000}
1001
1002
1003/**
1004 * Gets the pointer to the shadow page directory entry for an address, PAE.
1005 *
1006 * @returns Pointer to the PDE.
1007 * @param pPGM Pointer to the PGMCPU instance data.
1008 * @param GCPtr The address.
1009 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1010 */
1011DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1012{
1013 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1014 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1015
1016 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1017
1018 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1019 if (!pPdpt->a[iPdPt].n.u1Present)
1020 {
1021 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1022 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1023 }
1024 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1025
1026 /* Fetch the pgm pool shadow descriptor. */
1027 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1028 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1029
1030 *ppShwPde = pShwPde;
1031 return VINF_SUCCESS;
1032}
1033
1034#ifndef IN_RC
1035
1036/**
1037 * Syncs the SHADOW page directory pointer for the specified address.
1038 *
1039 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1040 *
1041 * The caller is responsible for making sure the guest has a valid PD before
1042 * calling this function.
1043 *
1044 * @returns VBox status.
1045 * @param pVCpu VMCPU handle.
1046 * @param GCPtr The address.
1047 * @param pGstPml4e Guest PML4 entry
1048 * @param pGstPdpe Guest PDPT entry
1049 * @param ppPD Receives address of page directory
1050 */
1051int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1052{
1053 PPGMCPU pPGM = &pVCpu->pgm.s;
1054 PVM pVM = pVCpu->CTX_SUFF(pVM);
1055 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1056 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1057 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1058 bool fNestedPagingOrNoGstPaging = HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu);
1059 PPGMPOOLPAGE pShwPage;
1060 int rc;
1061
1062 Assert(PGMIsLockOwner(pVM));
1063
1064 /* Allocate page directory pointer table if not present. */
1065 if ( !pPml4e->n.u1Present
1066 && !(pPml4e->u & X86_PML4E_PG_MASK))
1067 {
1068 RTGCPTR64 GCPml4;
1069 PGMPOOLKIND enmKind;
1070
1071 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1072
1073 if (fNestedPagingOrNoGstPaging)
1074 {
1075 /* AMD-V nested paging or real/protected mode without paging */
1076 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1077 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1078 }
1079 else
1080 {
1081 Assert(pGstPml4e && pGstPdpe);
1082
1083 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1084 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1085 }
1086
1087 /* Create a reference back to the PDPT by using the index in its shadow page. */
1088 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1089 AssertRCReturn(rc, rc);
1090 }
1091 else
1092 {
1093 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1094 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1095
1096 pgmPoolCacheUsed(pPool, pShwPage);
1097 }
1098 /* The PDPT was cached or created; hook it up now. */
1099 pPml4e->u |= pShwPage->Core.Key
1100 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1101
1102 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1103 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1104 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1105
1106 /* Allocate page directory if not present. */
1107 if ( !pPdpe->n.u1Present
1108 && !(pPdpe->u & X86_PDPE_PG_MASK))
1109 {
1110 RTGCPTR64 GCPdPt;
1111 PGMPOOLKIND enmKind;
1112
1113 if (fNestedPagingOrNoGstPaging)
1114 {
1115 /* AMD-V nested paging or real/protected mode without paging */
1116 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1117 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1118 }
1119 else
1120 {
1121 Assert(pGstPdpe);
1122
1123 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1124 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1125 }
1126
1127 /* Create a reference back to the PDPT by using the index in its shadow page. */
1128 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1129 AssertRCReturn(rc, rc);
1130 }
1131 else
1132 {
1133 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1134 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1135
1136 pgmPoolCacheUsed(pPool, pShwPage);
1137 }
1138 /* The PD was cached or created; hook it up now. */
1139 pPdpe->u |= pShwPage->Core.Key
1140 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1141
1142 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1143 return VINF_SUCCESS;
1144}
1145
1146
1147/**
1148 * Gets the SHADOW page directory pointer for the specified address (long mode).
1149 *
1150 * @returns VBox status.
1151 * @param pVCpu VMCPU handle.
1152 * @param GCPtr The address.
1153 * @param ppPdpt Receives address of pdpt
1154 * @param ppPD Receives address of page directory
1155 */
1156DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1157{
1158 PPGMCPU pPGM = &pVCpu->pgm.s;
1159 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1160 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1161
1162 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1163
1164 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1165 if (ppPml4e)
1166 *ppPml4e = (PX86PML4E)pPml4e;
1167
1168 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1169
1170 if (!pPml4e->n.u1Present)
1171 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1172
1173 PVM pVM = pVCpu->CTX_SUFF(pVM);
1174 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1175 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1176 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1177
1178 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1179 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1180 if (!pPdpt->a[iPdPt].n.u1Present)
1181 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1182
1183 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1184 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1185
1186 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1187 return VINF_SUCCESS;
1188}
1189
1190
1191/**
1192 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1193 * backing pages in case the PDPT or PML4 entry is missing.
1194 *
1195 * @returns VBox status.
1196 * @param pVCpu VMCPU handle.
1197 * @param GCPtr The address.
1198 * @param ppPdpt Receives address of pdpt
1199 * @param ppPD Receives address of page directory
1200 */
1201int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1202{
1203 PPGMCPU pPGM = &pVCpu->pgm.s;
1204 PVM pVM = pVCpu->CTX_SUFF(pVM);
1205 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1206 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1207 PEPTPML4 pPml4;
1208 PEPTPML4E pPml4e;
1209 PPGMPOOLPAGE pShwPage;
1210 int rc;
1211
1212 Assert(HWACCMIsNestedPagingActive(pVM));
1213 Assert(PGMIsLockOwner(pVM));
1214
1215 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1216 Assert(pPml4);
1217
1218 /* Allocate page directory pointer table if not present. */
1219 pPml4e = &pPml4->a[iPml4];
1220 if ( !pPml4e->n.u1Present
1221 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1222 {
1223 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1224 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1225
1226 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1227 AssertRCReturn(rc, rc);
1228 }
1229 else
1230 {
1231 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1232 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1233
1234 pgmPoolCacheUsed(pPool, pShwPage);
1235 }
1236 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1237 pPml4e->u = pShwPage->Core.Key;
1238 pPml4e->n.u1Present = 1;
1239 pPml4e->n.u1Write = 1;
1240 pPml4e->n.u1Execute = 1;
1241
1242 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1243 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1244 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1245
1246 if (ppPdpt)
1247 *ppPdpt = pPdpt;
1248
1249 /* Allocate page directory if not present. */
1250 if ( !pPdpe->n.u1Present
1251 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1252 {
1253 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1254
1255 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1256 AssertRCReturn(rc, rc);
1257 }
1258 else
1259 {
1260 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1261 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1262
1263 pgmPoolCacheUsed(pPool, pShwPage);
1264 }
1265 /* The PD was cached or created; hook it up now and fill with the default value. */
1266 pPdpe->u = pShwPage->Core.Key;
1267 pPdpe->n.u1Present = 1;
1268 pPdpe->n.u1Write = 1;
1269 pPdpe->n.u1Execute = 1;
1270
1271 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1272 return VINF_SUCCESS;
1273}
1274
1275#endif /* IN_RC */
1276
1277/**
1278 * Gets effective Guest OS page information.
1279 *
1280 * When GCPtr is in a big page, the function will return as if it was a normal
1281 * 4KB page. If the need for distinguishing between big and normal page becomes
1282 * necessary at a later point, a PGMGstGetPage() will be created for that
1283 * purpose.
1284 *
1285 * @returns VBox status.
1286 * @param pVCpu VMCPU handle.
1287 * @param GCPtr Guest Context virtual address of the page.
1288 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1289 * @param pGCPhys Where to store the GC physical address of the page.
1290 * This is page aligned. The fact that the
1291 */
1292VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1293{
1294 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1295}
1296
1297
1298/**
1299 * Checks if the page is present.
1300 *
1301 * @returns true if the page is present.
1302 * @returns false if the page is not present.
1303 * @param pVCpu VMCPU handle.
1304 * @param GCPtr Address within the page.
1305 */
1306VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1307{
1308 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1309 return RT_SUCCESS(rc);
1310}
1311
1312
1313/**
1314 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1315 *
1316 * @returns VBox status.
1317 * @param pVCpu VMCPU handle.
1318 * @param GCPtr The address of the first page.
1319 * @param cb The size of the range in bytes.
1320 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1321 */
1322VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1323{
1324 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1325}
1326
1327
1328/**
1329 * Modify page flags for a range of pages in the guest's tables
1330 *
1331 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1332 *
1333 * @returns VBox status code.
1334 * @param pVCpu VMCPU handle.
1335 * @param GCPtr Virtual address of the first page in the range.
1336 * @param cb Size (in bytes) of the range to apply the modification to.
1337 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1338 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1339 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1340 */
1341VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1342{
1343 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1344
1345 /*
1346 * Validate input.
1347 */
1348 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1349 Assert(cb);
1350
1351 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1352
1353 /*
1354 * Adjust input.
1355 */
1356 cb += GCPtr & PAGE_OFFSET_MASK;
1357 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1358 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1359
1360 /*
1361 * Call worker.
1362 */
1363 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1364
1365 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1366 return rc;
1367}
1368
1369#ifdef IN_RING3
1370
1371/**
1372 * Performs the lazy mapping of the 32-bit guest PD.
1373 *
1374 * @returns Pointer to the mapping.
1375 * @param pPGM The PGM instance data.
1376 */
1377PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1378{
1379 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1380 PVM pVM = PGMCPU2VM(pPGM);
1381 pgmLock(pVM);
1382
1383 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1384 AssertReturn(pPage, NULL);
1385
1386 RTHCPTR HCPtrGuestCR3;
1387 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1388 AssertRCReturn(rc, NULL);
1389
1390 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1391# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1392 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1393# endif
1394
1395 pgmUnlock(pVM);
1396 return pPGM->CTX_SUFF(pGst32BitPd);
1397}
1398
1399
1400/**
1401 * Performs the lazy mapping of the PAE guest PDPT.
1402 *
1403 * @returns Pointer to the mapping.
1404 * @param pPGM The PGM instance data.
1405 */
1406PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1407{
1408 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1409 PVM pVM = PGMCPU2VM(pPGM);
1410 pgmLock(pVM);
1411
1412 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1413 AssertReturn(pPage, NULL);
1414
1415 RTHCPTR HCPtrGuestCR3;
1416 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
1417 AssertRCReturn(rc, NULL);
1418
1419 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1420# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1421 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1422# endif
1423
1424 pgmUnlock(pVM);
1425 return pPGM->CTX_SUFF(pGstPaePdpt);
1426}
1427
1428#endif /* IN_RING3 */
1429
1430#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1431/**
1432 * Performs the lazy mapping / updating of a PAE guest PD.
1433 *
1434 * @returns Pointer to the mapping.
1435 * @param pPGM The PGM instance data.
1436 * @param iPdpt Which PD entry to map (0..3).
1437 */
1438PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1439{
1440 PVM pVM = PGMCPU2VM(pPGM);
1441 pgmLock(pVM);
1442
1443 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1444 Assert(pGuestPDPT);
1445 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1446 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1447 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1448
1449 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1450 if (RT_LIKELY(pPage))
1451 {
1452 int rc = VINF_SUCCESS;
1453 RTRCPTR RCPtr = NIL_RTRCPTR;
1454 RTHCPTR HCPtr = NIL_RTHCPTR;
1455#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1456 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1457 AssertRC(rc);
1458#endif
1459 if (RT_SUCCESS(rc) && fChanged)
1460 {
1461 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1462 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1463 }
1464 if (RT_SUCCESS(rc))
1465 {
1466 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1467# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1468 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1469# endif
1470 if (fChanged)
1471 {
1472 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1473 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1474 }
1475
1476 pgmUnlock(pVM);
1477 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1478 }
1479 }
1480
1481 /* Invalid page or some failure, invalidate the entry. */
1482 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1483 pPGM->apGstPaePDsR3[iPdpt] = 0;
1484# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1485 pPGM->apGstPaePDsR0[iPdpt] = 0;
1486# endif
1487 pPGM->apGstPaePDsRC[iPdpt] = 0;
1488
1489 pgmUnlock(pVM);
1490 return NULL;
1491}
1492#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1493
1494
1495#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1496/**
1497 * Performs the lazy mapping of the 32-bit guest PD.
1498 *
1499 * @returns Pointer to the mapping.
1500 * @param pPGM The PGM instance data.
1501 */
1502PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1503{
1504 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1505 PVM pVM = PGMCPU2VM(pPGM);
1506 pgmLock(pVM);
1507
1508 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1509 AssertReturn(pPage, NULL);
1510
1511 RTHCPTR HCPtrGuestCR3;
1512 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
1513 AssertRCReturn(rc, NULL);
1514
1515 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1516# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1517 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1518# endif
1519
1520 pgmUnlock(pVM);
1521 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1522}
1523#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1524
1525
1526/**
1527 * Gets the specified page directory pointer table entry.
1528 *
1529 * @returns PDP entry
1530 * @param pVCpu VMCPU handle.
1531 * @param iPdpt PDPT index
1532 */
1533VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1534{
1535 Assert(iPdpt <= 3);
1536 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1537}
1538
1539
1540/**
1541 * Gets the current CR3 register value for the shadow memory context.
1542 * @returns CR3 value.
1543 * @param pVCpu VMCPU handle.
1544 */
1545VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1546{
1547 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1548 AssertPtrReturn(pPoolPage, 0);
1549 return pPoolPage->Core.Key;
1550}
1551
1552
1553/**
1554 * Gets the current CR3 register value for the nested memory context.
1555 * @returns CR3 value.
1556 * @param pVCpu VMCPU handle.
1557 */
1558VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1559{
1560 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1561 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1562}
1563
1564
1565/**
1566 * Gets the current CR3 register value for the HC intermediate memory context.
1567 * @returns CR3 value.
1568 * @param pVM The VM handle.
1569 */
1570VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1571{
1572 switch (pVM->pgm.s.enmHostMode)
1573 {
1574 case SUPPAGINGMODE_32_BIT:
1575 case SUPPAGINGMODE_32_BIT_GLOBAL:
1576 return pVM->pgm.s.HCPhysInterPD;
1577
1578 case SUPPAGINGMODE_PAE:
1579 case SUPPAGINGMODE_PAE_GLOBAL:
1580 case SUPPAGINGMODE_PAE_NX:
1581 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1582 return pVM->pgm.s.HCPhysInterPaePDPT;
1583
1584 case SUPPAGINGMODE_AMD64:
1585 case SUPPAGINGMODE_AMD64_GLOBAL:
1586 case SUPPAGINGMODE_AMD64_NX:
1587 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1588 return pVM->pgm.s.HCPhysInterPaePDPT;
1589
1590 default:
1591 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1592 return ~0;
1593 }
1594}
1595
1596
1597/**
1598 * Gets the current CR3 register value for the RC intermediate memory context.
1599 * @returns CR3 value.
1600 * @param pVM The VM handle.
1601 * @param pVCpu VMCPU handle.
1602 */
1603VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1604{
1605 switch (pVCpu->pgm.s.enmShadowMode)
1606 {
1607 case PGMMODE_32_BIT:
1608 return pVM->pgm.s.HCPhysInterPD;
1609
1610 case PGMMODE_PAE:
1611 case PGMMODE_PAE_NX:
1612 return pVM->pgm.s.HCPhysInterPaePDPT;
1613
1614 case PGMMODE_AMD64:
1615 case PGMMODE_AMD64_NX:
1616 return pVM->pgm.s.HCPhysInterPaePML4;
1617
1618 case PGMMODE_EPT:
1619 case PGMMODE_NESTED:
1620 return 0; /* not relevant */
1621
1622 default:
1623 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1624 return ~0;
1625 }
1626}
1627
1628
1629/**
1630 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1631 * @returns CR3 value.
1632 * @param pVM The VM handle.
1633 */
1634VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1635{
1636 return pVM->pgm.s.HCPhysInterPD;
1637}
1638
1639
1640/**
1641 * Gets the CR3 register value for the PAE intermediate memory context.
1642 * @returns CR3 value.
1643 * @param pVM The VM handle.
1644 */
1645VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1646{
1647 return pVM->pgm.s.HCPhysInterPaePDPT;
1648}
1649
1650
1651/**
1652 * Gets the CR3 register value for the AMD64 intermediate memory context.
1653 * @returns CR3 value.
1654 * @param pVM The VM handle.
1655 */
1656VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1657{
1658 return pVM->pgm.s.HCPhysInterPaePML4;
1659}
1660
1661
1662/**
1663 * Performs and schedules necessary updates following a CR3 load or reload.
1664 *
1665 * This will normally involve mapping the guest PD or nPDPT
1666 *
1667 * @returns VBox status code.
1668 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1669 * safely be ignored and overridden since the FF will be set too then.
1670 * @param pVCpu VMCPU handle.
1671 * @param cr3 The new cr3.
1672 * @param fGlobal Indicates whether this is a global flush or not.
1673 */
1674VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1675{
1676 PVM pVM = pVCpu->CTX_SUFF(pVM);
1677
1678 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1679
1680 /*
1681 * Always flag the necessary updates; necessary for hardware acceleration
1682 */
1683 /** @todo optimize this, it shouldn't always be necessary. */
1684 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1685 if (fGlobal)
1686 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1687 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1688
1689 /*
1690 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1691 */
1692 int rc = VINF_SUCCESS;
1693 RTGCPHYS GCPhysCR3;
1694 switch (pVCpu->pgm.s.enmGuestMode)
1695 {
1696 case PGMMODE_PAE:
1697 case PGMMODE_PAE_NX:
1698 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1699 break;
1700 case PGMMODE_AMD64:
1701 case PGMMODE_AMD64_NX:
1702 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1703 break;
1704 default:
1705 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1706 break;
1707 }
1708
1709 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1710 {
1711 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1712 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1713 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1714 if (RT_LIKELY(rc == VINF_SUCCESS))
1715 {
1716 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1717 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1718 }
1719 else
1720 {
1721 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1722 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1723 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1724 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1725 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1726 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1727 }
1728
1729 if (fGlobal)
1730 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1731 else
1732 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1733 }
1734 else
1735 {
1736# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1737 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1738 if (pPool->cDirtyPages)
1739 {
1740 pgmLock(pVM);
1741 pgmPoolResetDirtyPages(pVM);
1742 pgmUnlock(pVM);
1743 }
1744# endif
1745 /*
1746 * Check if we have a pending update of the CR3 monitoring.
1747 */
1748 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1749 {
1750 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1751 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1752 }
1753 if (fGlobal)
1754 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1755 else
1756 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1757 }
1758
1759 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1760 return rc;
1761}
1762
1763
1764/**
1765 * Performs and schedules necessary updates following a CR3 load or reload when
1766 * using nested or extended paging.
1767 *
1768 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1769 * TLB and triggering a SyncCR3.
1770 *
1771 * This will normally involve mapping the guest PD or nPDPT
1772 *
1773 * @returns VBox status code.
1774 * @retval VINF_SUCCESS.
1775 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1776 * requires a CR3 sync. This can safely be ignored and overridden since
1777 * the FF will be set too then.)
1778 * @param pVCpu VMCPU handle.
1779 * @param cr3 The new cr3.
1780 */
1781VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1782{
1783 PVM pVM = pVCpu->CTX_SUFF(pVM);
1784
1785 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1786
1787 /* We assume we're only called in nested paging mode. */
1788 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1789 Assert(pVM->pgm.s.fMappingsDisabled);
1790 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1791
1792 /*
1793 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1794 */
1795 int rc = VINF_SUCCESS;
1796 RTGCPHYS GCPhysCR3;
1797 switch (pVCpu->pgm.s.enmGuestMode)
1798 {
1799 case PGMMODE_PAE:
1800 case PGMMODE_PAE_NX:
1801 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1802 break;
1803 case PGMMODE_AMD64:
1804 case PGMMODE_AMD64_NX:
1805 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1806 break;
1807 default:
1808 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1809 break;
1810 }
1811 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1812 {
1813 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1814 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1815 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1816 }
1817 return rc;
1818}
1819
1820
1821/**
1822 * Synchronize the paging structures.
1823 *
1824 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1825 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1826 * in several places, most importantly whenever the CR3 is loaded.
1827 *
1828 * @returns VBox status code.
1829 * @param pVCpu VMCPU handle.
1830 * @param cr0 Guest context CR0 register
1831 * @param cr3 Guest context CR3 register
1832 * @param cr4 Guest context CR4 register
1833 * @param fGlobal Including global page directories or not
1834 */
1835VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1836{
1837 PVM pVM = pVCpu->CTX_SUFF(pVM);
1838 int rc;
1839
1840 /*
1841 * The pool may have pending stuff and even require a return to ring-3 to
1842 * clear the whole thing.
1843 */
1844 rc = pgmPoolSyncCR3(pVCpu);
1845 if (rc != VINF_SUCCESS)
1846 return rc;
1847
1848 /*
1849 * We might be called when we shouldn't.
1850 *
1851 * The mode switching will ensure that the PD is resynced
1852 * after every mode switch. So, if we find ourselves here
1853 * when in protected or real mode we can safely disable the
1854 * FF and return immediately.
1855 */
1856 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1857 {
1858 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1859 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1860 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1861 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1862 return VINF_SUCCESS;
1863 }
1864
1865 /* If global pages are not supported, then all flushes are global. */
1866 if (!(cr4 & X86_CR4_PGE))
1867 fGlobal = true;
1868 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1869 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1870
1871 /*
1872 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1873 * This should be done before SyncCR3.
1874 */
1875 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1876 {
1877 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1878
1879 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1880 RTGCPHYS GCPhysCR3;
1881 switch (pVCpu->pgm.s.enmGuestMode)
1882 {
1883 case PGMMODE_PAE:
1884 case PGMMODE_PAE_NX:
1885 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1886 break;
1887 case PGMMODE_AMD64:
1888 case PGMMODE_AMD64_NX:
1889 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1890 break;
1891 default:
1892 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1893 break;
1894 }
1895
1896 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1897 {
1898 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1899 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1900 }
1901 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
1902 if ( rc == VINF_PGM_SYNC_CR3
1903 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
1904 {
1905 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
1906#ifdef IN_RING3
1907 rc = pgmPoolSyncCR3(pVCpu);
1908#else
1909 if (rc == VINF_PGM_SYNC_CR3)
1910 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1911 return VINF_PGM_SYNC_CR3;
1912#endif
1913 }
1914 AssertRCReturn(rc, rc);
1915 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1916 }
1917
1918 /*
1919 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1920 */
1921 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1922 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1923 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1924 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1925 if (rc == VINF_SUCCESS)
1926 {
1927 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
1928 {
1929 /* Go back to ring 3 if a pgm pool sync is again pending. */
1930 return VINF_PGM_SYNC_CR3;
1931 }
1932
1933 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1934 {
1935 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1936 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1937 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1938 }
1939
1940 /*
1941 * Check if we have a pending update of the CR3 monitoring.
1942 */
1943 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1944 {
1945 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1946 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1947 }
1948 }
1949
1950 /*
1951 * Now flush the CR3 (guest context).
1952 */
1953 if (rc == VINF_SUCCESS)
1954 PGM_INVL_VCPU_TLBS(pVCpu);
1955 return rc;
1956}
1957
1958
1959/**
1960 * Called whenever CR0 or CR4 in a way which may change
1961 * the paging mode.
1962 *
1963 * @returns VBox status code, with the following informational code for
1964 * VM scheduling.
1965 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1966 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1967 * (I.e. not in R3.)
1968 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1969 *
1970 * @param pVCpu VMCPU handle.
1971 * @param cr0 The new cr0.
1972 * @param cr4 The new cr4.
1973 * @param efer The new extended feature enable register.
1974 */
1975VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1976{
1977 PVM pVM = pVCpu->CTX_SUFF(pVM);
1978 PGMMODE enmGuestMode;
1979
1980 /*
1981 * Calc the new guest mode.
1982 */
1983 if (!(cr0 & X86_CR0_PE))
1984 enmGuestMode = PGMMODE_REAL;
1985 else if (!(cr0 & X86_CR0_PG))
1986 enmGuestMode = PGMMODE_PROTECTED;
1987 else if (!(cr4 & X86_CR4_PAE))
1988 enmGuestMode = PGMMODE_32_BIT;
1989 else if (!(efer & MSR_K6_EFER_LME))
1990 {
1991 if (!(efer & MSR_K6_EFER_NXE))
1992 enmGuestMode = PGMMODE_PAE;
1993 else
1994 enmGuestMode = PGMMODE_PAE_NX;
1995 }
1996 else
1997 {
1998 if (!(efer & MSR_K6_EFER_NXE))
1999 enmGuestMode = PGMMODE_AMD64;
2000 else
2001 enmGuestMode = PGMMODE_AMD64_NX;
2002 }
2003
2004 /*
2005 * Did it change?
2006 */
2007 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2008 return VINF_SUCCESS;
2009
2010 /* Flush the TLB */
2011 PGM_INVL_VCPU_TLBS(pVCpu);
2012
2013#ifdef IN_RING3
2014 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2015#else
2016 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2017 return VINF_PGM_CHANGE_MODE;
2018#endif
2019}
2020
2021
2022/**
2023 * Gets the current guest paging mode.
2024 *
2025 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2026 *
2027 * @returns The current paging mode.
2028 * @param pVCpu VMCPU handle.
2029 */
2030VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2031{
2032 return pVCpu->pgm.s.enmGuestMode;
2033}
2034
2035
2036/**
2037 * Gets the current shadow paging mode.
2038 *
2039 * @returns The current paging mode.
2040 * @param pVCpu VMCPU handle.
2041 */
2042VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2043{
2044 return pVCpu->pgm.s.enmShadowMode;
2045}
2046
2047/**
2048 * Gets the current host paging mode.
2049 *
2050 * @returns The current paging mode.
2051 * @param pVM The VM handle.
2052 */
2053VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2054{
2055 switch (pVM->pgm.s.enmHostMode)
2056 {
2057 case SUPPAGINGMODE_32_BIT:
2058 case SUPPAGINGMODE_32_BIT_GLOBAL:
2059 return PGMMODE_32_BIT;
2060
2061 case SUPPAGINGMODE_PAE:
2062 case SUPPAGINGMODE_PAE_GLOBAL:
2063 return PGMMODE_PAE;
2064
2065 case SUPPAGINGMODE_PAE_NX:
2066 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2067 return PGMMODE_PAE_NX;
2068
2069 case SUPPAGINGMODE_AMD64:
2070 case SUPPAGINGMODE_AMD64_GLOBAL:
2071 return PGMMODE_AMD64;
2072
2073 case SUPPAGINGMODE_AMD64_NX:
2074 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2075 return PGMMODE_AMD64_NX;
2076
2077 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2078 }
2079
2080 return PGMMODE_INVALID;
2081}
2082
2083
2084/**
2085 * Get mode name.
2086 *
2087 * @returns read-only name string.
2088 * @param enmMode The mode which name is desired.
2089 */
2090VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2091{
2092 switch (enmMode)
2093 {
2094 case PGMMODE_REAL: return "Real";
2095 case PGMMODE_PROTECTED: return "Protected";
2096 case PGMMODE_32_BIT: return "32-bit";
2097 case PGMMODE_PAE: return "PAE";
2098 case PGMMODE_PAE_NX: return "PAE+NX";
2099 case PGMMODE_AMD64: return "AMD64";
2100 case PGMMODE_AMD64_NX: return "AMD64+NX";
2101 case PGMMODE_NESTED: return "Nested";
2102 case PGMMODE_EPT: return "EPT";
2103 default: return "unknown mode value";
2104 }
2105}
2106
2107
2108/**
2109 * Check if any pgm pool pages are marked dirty (not monitored)
2110 *
2111 * @returns bool locked/not locked
2112 * @param pVM The VM to operate on.
2113 */
2114VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2115{
2116 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2117}
2118
2119/**
2120 * Check if the PGM lock is currently taken.
2121 *
2122 * @returns bool locked/not locked
2123 * @param pVM The VM to operate on.
2124 */
2125VMMDECL(bool) PGMIsLocked(PVM pVM)
2126{
2127 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2128}
2129
2130
2131/**
2132 * Check if this VCPU currently owns the PGM lock.
2133 *
2134 * @returns bool owner/not owner
2135 * @param pVM The VM to operate on.
2136 */
2137VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2138{
2139 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2140}
2141
2142
2143/**
2144 * Enable or disable large page usage
2145 *
2146 * @param pVM The VM to operate on.
2147 * @param fUseLargePages Use/not use large pages
2148 */
2149VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2150{
2151 pVM->fUseLargePages = fUseLargePages;
2152}
2153
2154/**
2155 * Acquire the PGM lock.
2156 *
2157 * @returns VBox status code
2158 * @param pVM The VM to operate on.
2159 */
2160int pgmLock(PVM pVM)
2161{
2162 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2163#if defined(IN_RC) || defined(IN_RING0)
2164 if (rc == VERR_SEM_BUSY)
2165 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2166#endif
2167 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2168 return rc;
2169}
2170
2171
2172/**
2173 * Release the PGM lock.
2174 *
2175 * @returns VBox status code
2176 * @param pVM The VM to operate on.
2177 */
2178void pgmUnlock(PVM pVM)
2179{
2180 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2181}
2182
2183#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2184
2185/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
2186DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2187{
2188 pgmLock(pVM);
2189
2190 /*
2191 * Convert it to a writable page and it on to PGMDynMapHCPage.
2192 */
2193 int rc;
2194 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2195 if (RT_LIKELY(pPage))
2196 {
2197 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2198 if (RT_SUCCESS(rc))
2199 {
2200 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2201#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2202 rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2203#else
2204 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2205#endif
2206 }
2207 else
2208 AssertRC(rc);
2209 }
2210 else
2211 {
2212 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2213 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2214 }
2215
2216 pgmUnlock(pVM);
2217 return rc;
2218}
2219
2220/**
2221 * Temporarily maps one guest page specified by GC physical address.
2222 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2223 *
2224 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2225 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2226 *
2227 * @returns VBox status.
2228 * @param pVM VM handle.
2229 * @param GCPhys GC Physical address of the page.
2230 * @param ppv Where to store the address of the mapping.
2231 */
2232VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2233{
2234 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2235 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
2236}
2237
2238
2239/**
2240 * Temporarily maps one guest page specified by unaligned GC physical address.
2241 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2242 *
2243 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2244 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2245 *
2246 * The caller is aware that only the speicifed page is mapped and that really bad things
2247 * will happen if writing beyond the page!
2248 *
2249 * @returns VBox status.
2250 * @param pVM VM handle.
2251 * @param GCPhys GC Physical address within the page to be mapped.
2252 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2253 */
2254VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2255{
2256 void *pv;
2257 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
2258 if (RT_SUCCESS(rc))
2259 {
2260 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
2261 return VINF_SUCCESS;
2262 }
2263 return rc;
2264}
2265
2266# ifdef IN_RC
2267
2268/**
2269 * Temporarily maps one host page specified by HC physical address.
2270 *
2271 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2272 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2273 *
2274 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2275 * @param pVM VM handle.
2276 * @param HCPhys HC Physical address of the page.
2277 * @param ppv Where to store the address of the mapping. This is the
2278 * address of the PAGE not the exact address corresponding
2279 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2280 * page offset.
2281 */
2282VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2283{
2284 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2285
2286 /*
2287 * Check the cache.
2288 */
2289 register unsigned iCache;
2290 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2291 {
2292 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2293 {
2294 { 0, 9, 10, 11, 12, 13, 14, 15},
2295 { 0, 1, 10, 11, 12, 13, 14, 15},
2296 { 0, 1, 2, 11, 12, 13, 14, 15},
2297 { 0, 1, 2, 3, 12, 13, 14, 15},
2298 { 0, 1, 2, 3, 4, 13, 14, 15},
2299 { 0, 1, 2, 3, 4, 5, 14, 15},
2300 { 0, 1, 2, 3, 4, 5, 6, 15},
2301 { 0, 1, 2, 3, 4, 5, 6, 7},
2302 { 8, 1, 2, 3, 4, 5, 6, 7},
2303 { 8, 9, 2, 3, 4, 5, 6, 7},
2304 { 8, 9, 10, 3, 4, 5, 6, 7},
2305 { 8, 9, 10, 11, 4, 5, 6, 7},
2306 { 8, 9, 10, 11, 12, 5, 6, 7},
2307 { 8, 9, 10, 11, 12, 13, 6, 7},
2308 { 8, 9, 10, 11, 12, 13, 14, 7},
2309 { 8, 9, 10, 11, 12, 13, 14, 15},
2310 };
2311 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2312 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2313
2314 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2315 {
2316 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2317
2318 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2319 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2320 {
2321 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2322 *ppv = pv;
2323 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2324 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2325 return VINF_SUCCESS;
2326 }
2327 LogFlow(("Out of sync entry %d\n", iPage));
2328 }
2329 }
2330 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2331 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2332 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2333
2334 /*
2335 * Update the page tables.
2336 */
2337 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2338 unsigned i;
2339 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2340 {
2341 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2342 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2343 break;
2344 iPage++;
2345 }
2346 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2347
2348 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2349 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2350 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2351 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2352
2353 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2354 *ppv = pv;
2355 ASMInvalidatePage(pv);
2356 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2357 return VINF_SUCCESS;
2358}
2359
2360
2361/**
2362 * Temporarily lock a dynamic page to prevent it from being reused.
2363 *
2364 * @param pVM VM handle.
2365 * @param GCPage GC address of page
2366 */
2367VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2368{
2369 unsigned iPage;
2370
2371 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2372 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2373 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2374 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2375}
2376
2377
2378/**
2379 * Unlock a dynamic page
2380 *
2381 * @param pVM VM handle.
2382 * @param GCPage GC address of page
2383 */
2384VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2385{
2386 unsigned iPage;
2387
2388 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2389 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2390
2391 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2392 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2393 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2394 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2395 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2396}
2397
2398
2399# ifdef VBOX_STRICT
2400/**
2401 * Check for lock leaks.
2402 *
2403 * @param pVM VM handle.
2404 */
2405VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2406{
2407 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
2408 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2409}
2410# endif /* VBOX_STRICT */
2411
2412# endif /* IN_RC */
2413#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2414
2415#if !defined(IN_R0) || defined(LOG_ENABLED)
2416
2417/** Format handler for PGMPAGE.
2418 * @copydoc FNRTSTRFORMATTYPE */
2419static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2420 const char *pszType, void const *pvValue,
2421 int cchWidth, int cchPrecision, unsigned fFlags,
2422 void *pvUser)
2423{
2424 size_t cch;
2425 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2426 if (VALID_PTR(pPage))
2427 {
2428 char szTmp[64+80];
2429
2430 cch = 0;
2431
2432 /* The single char state stuff. */
2433 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2434 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2435
2436#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2437 if (IS_PART_INCLUDED(5))
2438 {
2439 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2440 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2441 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2442 }
2443
2444 /* The type. */
2445 if (IS_PART_INCLUDED(4))
2446 {
2447 szTmp[cch++] = ':';
2448 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2449 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2450 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2451 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2452 }
2453
2454 /* The numbers. */
2455 if (IS_PART_INCLUDED(3))
2456 {
2457 szTmp[cch++] = ':';
2458 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2459 }
2460
2461 if (IS_PART_INCLUDED(2))
2462 {
2463 szTmp[cch++] = ':';
2464 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2465 }
2466
2467 if (IS_PART_INCLUDED(6))
2468 {
2469 szTmp[cch++] = ':';
2470 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2471 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2472 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2473 }
2474#undef IS_PART_INCLUDED
2475
2476 cch = pfnOutput(pvArgOutput, szTmp, cch);
2477 }
2478 else
2479 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2480 return cch;
2481}
2482
2483
2484/** Format handler for PGMRAMRANGE.
2485 * @copydoc FNRTSTRFORMATTYPE */
2486static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2487 const char *pszType, void const *pvValue,
2488 int cchWidth, int cchPrecision, unsigned fFlags,
2489 void *pvUser)
2490{
2491 size_t cch;
2492 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2493 if (VALID_PTR(pRam))
2494 {
2495 char szTmp[80];
2496 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2497 cch = pfnOutput(pvArgOutput, szTmp, cch);
2498 }
2499 else
2500 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2501 return cch;
2502}
2503
2504/** Format type andlers to be registered/deregistered. */
2505static const struct
2506{
2507 char szType[24];
2508 PFNRTSTRFORMATTYPE pfnHandler;
2509} g_aPgmFormatTypes[] =
2510{
2511 { "pgmpage", pgmFormatTypeHandlerPage },
2512 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2513};
2514
2515#endif /* !IN_R0 || LOG_ENABLED */
2516
2517/**
2518 * Registers the global string format types.
2519 *
2520 * This should be called at module load time or in some other manner that ensure
2521 * that it's called exactly one time.
2522 *
2523 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2524 */
2525VMMDECL(int) PGMRegisterStringFormatTypes(void)
2526{
2527#if !defined(IN_R0) || defined(LOG_ENABLED)
2528 int rc = VINF_SUCCESS;
2529 unsigned i;
2530 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2531 {
2532 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2533# ifdef IN_RING0
2534 if (rc == VERR_ALREADY_EXISTS)
2535 {
2536 /* in case of cleanup failure in ring-0 */
2537 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2538 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2539 }
2540# endif
2541 }
2542 if (RT_FAILURE(rc))
2543 while (i-- > 0)
2544 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2545
2546 return rc;
2547#else
2548 return VINF_SUCCESS;
2549#endif
2550}
2551
2552
2553/**
2554 * Deregisters the global string format types.
2555 *
2556 * This should be called at module unload time or in some other manner that
2557 * ensure that it's called exactly one time.
2558 */
2559VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2560{
2561#if !defined(IN_R0) || defined(LOG_ENABLED)
2562 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2563 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2564#endif
2565}
2566
2567#ifdef VBOX_STRICT
2568
2569/**
2570 * Asserts that there are no mapping conflicts.
2571 *
2572 * @returns Number of conflicts.
2573 * @param pVM The VM Handle.
2574 */
2575VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2576{
2577 unsigned cErrors = 0;
2578
2579 /* Only applies to raw mode -> 1 VPCU */
2580 Assert(pVM->cCpus == 1);
2581 PVMCPU pVCpu = &pVM->aCpus[0];
2582
2583 /*
2584 * Check for mapping conflicts.
2585 */
2586 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2587 pMapping;
2588 pMapping = pMapping->CTX_SUFF(pNext))
2589 {
2590 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2591 for (RTGCPTR GCPtr = pMapping->GCPtr;
2592 GCPtr <= pMapping->GCPtrLast;
2593 GCPtr += PAGE_SIZE)
2594 {
2595 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2596 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2597 {
2598 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2599 cErrors++;
2600 break;
2601 }
2602 }
2603 }
2604
2605 return cErrors;
2606}
2607
2608
2609/**
2610 * Asserts that everything related to the guest CR3 is correctly shadowed.
2611 *
2612 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2613 * and assert the correctness of the guest CR3 mapping before asserting that the
2614 * shadow page tables is in sync with the guest page tables.
2615 *
2616 * @returns Number of conflicts.
2617 * @param pVM The VM Handle.
2618 * @param pVCpu VMCPU handle.
2619 * @param cr3 The current guest CR3 register value.
2620 * @param cr4 The current guest CR4 register value.
2621 */
2622VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2623{
2624 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2625 pgmLock(pVM);
2626 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2627 pgmUnlock(pVM);
2628 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2629 return cErrors;
2630}
2631
2632#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette