VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 38953

Last change on this file since 38953 was 38953, checked in by vboxsync, 13 years ago

PGM: Attempt at fixing the VERR_MAP_FAILED during state save problem on 32-bit hosts when assigning lots of memory to the guest. PGM should lock down guest RAM pages before use and release them afterwards like everyone else. Still quite some stuff left to do there, so I've deviced a little hack for tracking unlocked mappings and using this as input when deciding to do async or sync chunk unmapping at save/load time. See xtracker #5912 and public ticket 7929.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 86.3 KB
Line 
1/* $Id: PGMAll.cpp 38953 2011-10-06 08:49:36Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/sup.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
32#include <VBox/vmm/rem.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/hwacc_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vm.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
408 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 PGM_LOCK_ASSERT_OWNER(pVM);
460 pgmUnlock(pVM);
461 }
462 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
463
464 /*
465 * Return code tweaks.
466 */
467 if (rc != VINF_SUCCESS)
468 {
469 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
470 rc = VINF_SUCCESS;
471
472# ifdef IN_RING0
473 /* Note: hack alert for difficult to reproduce problem. */
474 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
475 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
476 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
477 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
478 {
479 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
480 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
481 rc = VINF_SUCCESS;
482 }
483# endif
484 }
485
486 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
487 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
488 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
489 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
490 return rc;
491}
492#endif /* !IN_RING3 */
493
494
495/**
496 * Prefetch a page
497 *
498 * Typically used to sync commonly used pages before entering raw mode
499 * after a CR3 reload.
500 *
501 * @returns VBox status code suitable for scheduling.
502 * @retval VINF_SUCCESS on success.
503 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
504 * @param pVCpu VMCPU handle.
505 * @param GCPtrPage Page to invalidate.
506 */
507VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
508{
509 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
510 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
511 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
512 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
513 return rc;
514}
515
516
517/**
518 * Gets the mapping corresponding to the specified address (if any).
519 *
520 * @returns Pointer to the mapping.
521 * @returns NULL if not
522 *
523 * @param pVM The virtual machine.
524 * @param GCPtr The guest context pointer.
525 */
526PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
527{
528 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
529 while (pMapping)
530 {
531 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
532 break;
533 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
534 return pMapping;
535 pMapping = pMapping->CTX_SUFF(pNext);
536 }
537 return NULL;
538}
539
540
541/**
542 * Verifies a range of pages for read or write access
543 *
544 * Only checks the guest's page tables
545 *
546 * @returns VBox status code.
547 * @param pVCpu VMCPU handle.
548 * @param Addr Guest virtual address to check
549 * @param cbSize Access size
550 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
551 * @remarks Current not in use.
552 */
553VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
554{
555 /*
556 * Validate input.
557 */
558 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
559 {
560 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
561 return VERR_INVALID_PARAMETER;
562 }
563
564 uint64_t fPage;
565 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
566 if (RT_FAILURE(rc))
567 {
568 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
569 return VINF_EM_RAW_GUEST_TRAP;
570 }
571
572 /*
573 * Check if the access would cause a page fault
574 *
575 * Note that hypervisor page directories are not present in the guest's tables, so this check
576 * is sufficient.
577 */
578 bool fWrite = !!(fAccess & X86_PTE_RW);
579 bool fUser = !!(fAccess & X86_PTE_US);
580 if ( !(fPage & X86_PTE_P)
581 || (fWrite && !(fPage & X86_PTE_RW))
582 || (fUser && !(fPage & X86_PTE_US)) )
583 {
584 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
585 return VINF_EM_RAW_GUEST_TRAP;
586 }
587 if ( RT_SUCCESS(rc)
588 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
589 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
590 return rc;
591}
592
593
594/**
595 * Verifies a range of pages for read or write access
596 *
597 * Supports handling of pages marked for dirty bit tracking and CSAM
598 *
599 * @returns VBox status code.
600 * @param pVCpu VMCPU handle.
601 * @param Addr Guest virtual address to check
602 * @param cbSize Access size
603 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
604 */
605VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
606{
607 PVM pVM = pVCpu->CTX_SUFF(pVM);
608
609 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
610
611 /*
612 * Get going.
613 */
614 uint64_t fPageGst;
615 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
616 if (RT_FAILURE(rc))
617 {
618 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
619 return VINF_EM_RAW_GUEST_TRAP;
620 }
621
622 /*
623 * Check if the access would cause a page fault
624 *
625 * Note that hypervisor page directories are not present in the guest's tables, so this check
626 * is sufficient.
627 */
628 const bool fWrite = !!(fAccess & X86_PTE_RW);
629 const bool fUser = !!(fAccess & X86_PTE_US);
630 if ( !(fPageGst & X86_PTE_P)
631 || (fWrite && !(fPageGst & X86_PTE_RW))
632 || (fUser && !(fPageGst & X86_PTE_US)) )
633 {
634 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
635 return VINF_EM_RAW_GUEST_TRAP;
636 }
637
638 if (!pVM->pgm.s.fNestedPaging)
639 {
640 /*
641 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
642 */
643 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
644 if ( rc == VERR_PAGE_NOT_PRESENT
645 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
646 {
647 /*
648 * Page is not present in our page tables.
649 * Try to sync it!
650 */
651 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
652 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
653 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
654 if (rc != VINF_SUCCESS)
655 return rc;
656 }
657 else
658 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
659 }
660
661#if 0 /* def VBOX_STRICT; triggers too often now */
662 /*
663 * This check is a bit paranoid, but useful.
664 */
665 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
666 uint64_t fPageShw;
667 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
668 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
669 || (fWrite && !(fPageShw & X86_PTE_RW))
670 || (fUser && !(fPageShw & X86_PTE_US)) )
671 {
672 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
673 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
674 return VINF_EM_RAW_GUEST_TRAP;
675 }
676#endif
677
678 if ( RT_SUCCESS(rc)
679 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
680 || Addr + cbSize < Addr))
681 {
682 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
683 for (;;)
684 {
685 Addr += PAGE_SIZE;
686 if (cbSize > PAGE_SIZE)
687 cbSize -= PAGE_SIZE;
688 else
689 cbSize = 1;
690 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
691 if (rc != VINF_SUCCESS)
692 break;
693 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
694 break;
695 }
696 }
697 return rc;
698}
699
700
701/**
702 * Emulation of the invlpg instruction (HC only actually).
703 *
704 * @returns Strict VBox status code, special care required.
705 * @retval VINF_PGM_SYNC_CR3 - handled.
706 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
707 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
708 *
709 * @param pVCpu VMCPU handle.
710 * @param GCPtrPage Page to invalidate.
711 *
712 * @remark ASSUMES the page table entry or page directory is valid. Fairly
713 * safe, but there could be edge cases!
714 *
715 * @todo Flush page or page directory only if necessary!
716 * @todo VBOXSTRICTRC
717 */
718VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
719{
720 PVM pVM = pVCpu->CTX_SUFF(pVM);
721 int rc;
722 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
723
724#ifndef IN_RING3
725 /*
726 * Notify the recompiler so it can record this instruction.
727 */
728 REMNotifyInvalidatePage(pVM, GCPtrPage);
729#endif /* !IN_RING3 */
730
731
732#ifdef IN_RC
733 /*
734 * Check for conflicts and pending CR3 monitoring updates.
735 */
736 if (pgmMapAreMappingsFloating(pVM))
737 {
738 if ( pgmGetMapping(pVM, GCPtrPage)
739 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
742 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
743 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
744 return VINF_PGM_SYNC_CR3;
745 }
746
747 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
748 {
749 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
750 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
751 return VINF_EM_RAW_EMULATE_INSTR;
752 }
753 }
754#endif /* IN_RC */
755
756 /*
757 * Call paging mode specific worker.
758 */
759 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
760 pgmLock(pVM);
761 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
762 pgmUnlock(pVM);
763 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
764
765#ifdef IN_RING3
766 /*
767 * Check if we have a pending update of the CR3 monitoring.
768 */
769 if ( RT_SUCCESS(rc)
770 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
771 {
772 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
773 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
774 }
775
776 /*
777 * Inform CSAM about the flush
778 *
779 * Note: This is to check if monitored pages have been changed; when we implement
780 * callbacks for virtual handlers, this is no longer required.
781 */
782 CSAMR3FlushPage(pVM, GCPtrPage);
783#endif /* IN_RING3 */
784
785 /* Ignore all irrelevant error codes. */
786 if ( rc == VERR_PAGE_NOT_PRESENT
787 || rc == VERR_PAGE_TABLE_NOT_PRESENT
788 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
789 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
790 rc = VINF_SUCCESS;
791
792 return rc;
793}
794
795
796/**
797 * Executes an instruction using the interpreter.
798 *
799 * @returns VBox status code (appropriate for trap handling and GC return).
800 * @param pVM VM handle.
801 * @param pVCpu VMCPU handle.
802 * @param pRegFrame Register frame.
803 * @param pvFault Fault address.
804 */
805VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
806{
807 uint32_t cb;
808 VBOXSTRICTRC rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
809 if (rc == VERR_EM_INTERPRETER)
810 rc = VINF_EM_RAW_EMULATE_INSTR;
811 if (rc != VINF_SUCCESS)
812 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
813 return rc;
814}
815
816
817/**
818 * Gets effective page information (from the VMM page directory).
819 *
820 * @returns VBox status.
821 * @param pVCpu VMCPU handle.
822 * @param GCPtr Guest Context virtual address of the page.
823 * @param pfFlags Where to store the flags. These are X86_PTE_*.
824 * @param pHCPhys Where to store the HC physical address of the page.
825 * This is page aligned.
826 * @remark You should use PGMMapGetPage() for pages in a mapping.
827 */
828VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
829{
830 pgmLock(pVCpu->CTX_SUFF(pVM));
831 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
832 pgmUnlock(pVCpu->CTX_SUFF(pVM));
833 return rc;
834}
835
836
837/**
838 * Modify page flags for a range of pages in the shadow context.
839 *
840 * The existing flags are ANDed with the fMask and ORed with the fFlags.
841 *
842 * @returns VBox status code.
843 * @param pVCpu VMCPU handle.
844 * @param GCPtr Virtual address of the first page in the range.
845 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
846 * @param fMask The AND mask - page flags X86_PTE_*.
847 * Be very CAREFUL when ~'ing constants which could be 32-bit!
848 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
849 * @remark You must use PGMMapModifyPage() for pages in a mapping.
850 */
851DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
852{
853 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
854 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
855
856 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
857
858 PVM pVM = pVCpu->CTX_SUFF(pVM);
859 pgmLock(pVM);
860 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
861 pgmUnlock(pVM);
862 return rc;
863}
864
865
866/**
867 * Changing the page flags for a single page in the shadow page tables so as to
868 * make it read-only.
869 *
870 * @returns VBox status code.
871 * @param pVCpu VMCPU handle.
872 * @param GCPtr Virtual address of the first page in the range.
873 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
874 */
875VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
876{
877 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
878}
879
880
881/**
882 * Changing the page flags for a single page in the shadow page tables so as to
883 * make it writable.
884 *
885 * The call must know with 101% certainty that the guest page tables maps this
886 * as writable too. This function will deal shared, zero and write monitored
887 * pages.
888 *
889 * @returns VBox status code.
890 * @param pVCpu VMCPU handle.
891 * @param GCPtr Virtual address of the first page in the range.
892 * @param fMmio2 Set if it is an MMIO2 page.
893 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
894 */
895VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
896{
897 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
898}
899
900
901/**
902 * Changing the page flags for a single page in the shadow page tables so as to
903 * make it not present.
904 *
905 * @returns VBox status code.
906 * @param pVCpu VMCPU handle.
907 * @param GCPtr Virtual address of the first page in the range.
908 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
909 */
910VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
911{
912 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
913}
914
915
916/**
917 * Gets the shadow page directory for the specified address, PAE.
918 *
919 * @returns Pointer to the shadow PD.
920 * @param pVCpu The VMCPU handle.
921 * @param GCPtr The address.
922 * @param uGstPdpe Guest PDPT entry. Valid.
923 * @param ppPD Receives address of page directory
924 */
925int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
926{
927 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
928 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
929 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
930 PVM pVM = pVCpu->CTX_SUFF(pVM);
931 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
932 PPGMPOOLPAGE pShwPage;
933 int rc;
934
935 PGM_LOCK_ASSERT_OWNER(pVM);
936
937 /* Allocate page directory if not present. */
938 if ( !pPdpe->n.u1Present
939 && !(pPdpe->u & X86_PDPE_PG_MASK))
940 {
941 RTGCPTR64 GCPdPt;
942 PGMPOOLKIND enmKind;
943
944 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
945 {
946 /* AMD-V nested paging or real/protected mode without paging. */
947 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
948 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
949 }
950 else
951 {
952 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
953 {
954 if (!(uGstPdpe & X86_PDPE_P))
955 {
956 /* PD not present; guest must reload CR3 to change it.
957 * No need to monitor anything in this case.
958 */
959 Assert(!HWACCMIsEnabled(pVM));
960
961 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
962 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
963 uGstPdpe |= X86_PDPE_P;
964 }
965 else
966 {
967 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
968 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
969 }
970 }
971 else
972 {
973 GCPdPt = CPUMGetGuestCR3(pVCpu);
974 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
975 }
976 }
977
978 /* Create a reference back to the PDPT by using the index in its shadow page. */
979 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
980 AssertRCReturn(rc, rc);
981
982 /* The PD was cached or created; hook it up now. */
983 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
984
985# if defined(IN_RC)
986 /*
987 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
988 * PDPT entry; the CPU fetches them only during cr3 load, so any
989 * non-present PDPT will continue to cause page faults.
990 */
991 ASMReloadCR3();
992# endif
993 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
994 }
995 else
996 {
997 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
998 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
999 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1000
1001 pgmPoolCacheUsed(pPool, pShwPage);
1002 }
1003 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Gets the pointer to the shadow page directory entry for an address, PAE.
1010 *
1011 * @returns Pointer to the PDE.
1012 * @param pVCpu The current CPU.
1013 * @param GCPtr The address.
1014 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1015 */
1016DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1017{
1018 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1019 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1020 PVM pVM = pVCpu->CTX_SUFF(pVM);
1021
1022 PGM_LOCK_ASSERT_OWNER(pVM);
1023
1024 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1025 if (!pPdpt->a[iPdPt].n.u1Present)
1026 {
1027 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1028 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1029 }
1030 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1031
1032 /* Fetch the pgm pool shadow descriptor. */
1033 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1034 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1035
1036 *ppShwPde = pShwPde;
1037 return VINF_SUCCESS;
1038}
1039
1040#ifndef IN_RC
1041
1042/**
1043 * Syncs the SHADOW page directory pointer for the specified address.
1044 *
1045 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1046 *
1047 * The caller is responsible for making sure the guest has a valid PD before
1048 * calling this function.
1049 *
1050 * @returns VBox status.
1051 * @param pVCpu VMCPU handle.
1052 * @param GCPtr The address.
1053 * @param uGstPml4e Guest PML4 entry (valid).
1054 * @param uGstPdpe Guest PDPT entry (valid).
1055 * @param ppPD Receives address of page directory
1056 */
1057static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1058{
1059 PPGMCPU pPGM = &pVCpu->pgm.s;
1060 PVM pVM = pVCpu->CTX_SUFF(pVM);
1061 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1062 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1063 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1064 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1065 PPGMPOOLPAGE pShwPage;
1066 int rc;
1067
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069
1070 /* Allocate page directory pointer table if not present. */
1071 if ( !pPml4e->n.u1Present
1072 && !(pPml4e->u & X86_PML4E_PG_MASK))
1073 {
1074 RTGCPTR64 GCPml4;
1075 PGMPOOLKIND enmKind;
1076
1077 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1078
1079 if (fNestedPagingOrNoGstPaging)
1080 {
1081 /* AMD-V nested paging or real/protected mode without paging */
1082 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1083 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1084 }
1085 else
1086 {
1087 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1088 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1089 }
1090
1091 /* Create a reference back to the PDPT by using the index in its shadow page. */
1092 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1098 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1099
1100 pgmPoolCacheUsed(pPool, pShwPage);
1101 }
1102 /* The PDPT was cached or created; hook it up now. */
1103 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1104
1105 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1106 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1107 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1108
1109 /* Allocate page directory if not present. */
1110 if ( !pPdpe->n.u1Present
1111 && !(pPdpe->u & X86_PDPE_PG_MASK))
1112 {
1113 RTGCPTR64 GCPdPt;
1114 PGMPOOLKIND enmKind;
1115
1116 if (fNestedPagingOrNoGstPaging)
1117 {
1118 /* AMD-V nested paging or real/protected mode without paging */
1119 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1120 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1121 }
1122 else
1123 {
1124 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1125 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1126 }
1127
1128 /* Create a reference back to the PDPT by using the index in its shadow page. */
1129 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1130 AssertRCReturn(rc, rc);
1131 }
1132 else
1133 {
1134 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1135 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1136
1137 pgmPoolCacheUsed(pPool, pShwPage);
1138 }
1139 /* The PD was cached or created; hook it up now. */
1140 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1141
1142 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1143 return VINF_SUCCESS;
1144}
1145
1146
1147/**
1148 * Gets the SHADOW page directory pointer for the specified address (long mode).
1149 *
1150 * @returns VBox status.
1151 * @param pVCpu VMCPU handle.
1152 * @param GCPtr The address.
1153 * @param ppPdpt Receives address of pdpt
1154 * @param ppPD Receives address of page directory
1155 */
1156DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1157{
1158 PPGMCPU pPGM = &pVCpu->pgm.s;
1159 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1160 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1161
1162 PGM_LOCK_ASSERT_OWNER(PGMCPU2VM(pPGM));
1163
1164 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1165 if (ppPml4e)
1166 *ppPml4e = (PX86PML4E)pPml4e;
1167
1168 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1169
1170 if (!pPml4e->n.u1Present)
1171 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1172
1173 PVM pVM = pVCpu->CTX_SUFF(pVM);
1174 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1175 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1176 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1177
1178 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1179 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1180 if (!pPdpt->a[iPdPt].n.u1Present)
1181 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1182
1183 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1184 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1185
1186 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1187 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1194 * backing pages in case the PDPT or PML4 entry is missing.
1195 *
1196 * @returns VBox status.
1197 * @param pVCpu VMCPU handle.
1198 * @param GCPtr The address.
1199 * @param ppPdpt Receives address of pdpt
1200 * @param ppPD Receives address of page directory
1201 */
1202static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1203{
1204 PVM pVM = pVCpu->CTX_SUFF(pVM);
1205 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1206 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1207 PEPTPML4 pPml4;
1208 PEPTPML4E pPml4e;
1209 PPGMPOOLPAGE pShwPage;
1210 int rc;
1211
1212 Assert(pVM->pgm.s.fNestedPaging);
1213 PGM_LOCK_ASSERT_OWNER(pVM);
1214
1215 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1216 Assert(pPml4);
1217
1218 /* Allocate page directory pointer table if not present. */
1219 pPml4e = &pPml4->a[iPml4];
1220 if ( !pPml4e->n.u1Present
1221 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1222 {
1223 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1224 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1225
1226 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1227 AssertRCReturn(rc, rc);
1228 }
1229 else
1230 {
1231 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1232 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1233
1234 pgmPoolCacheUsed(pPool, pShwPage);
1235 }
1236 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1237 pPml4e->u = pShwPage->Core.Key;
1238 pPml4e->n.u1Present = 1;
1239 pPml4e->n.u1Write = 1;
1240 pPml4e->n.u1Execute = 1;
1241
1242 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1243 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1244 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1245
1246 if (ppPdpt)
1247 *ppPdpt = pPdpt;
1248
1249 /* Allocate page directory if not present. */
1250 if ( !pPdpe->n.u1Present
1251 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1252 {
1253 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1254
1255 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1256 AssertRCReturn(rc, rc);
1257 }
1258 else
1259 {
1260 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1261 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1262
1263 pgmPoolCacheUsed(pPool, pShwPage);
1264 }
1265 /* The PD was cached or created; hook it up now and fill with the default value. */
1266 pPdpe->u = pShwPage->Core.Key;
1267 pPdpe->n.u1Present = 1;
1268 pPdpe->n.u1Write = 1;
1269 pPdpe->n.u1Execute = 1;
1270
1271 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1272 return VINF_SUCCESS;
1273}
1274
1275#endif /* IN_RC */
1276
1277#ifdef IN_RING0
1278/**
1279 * Synchronizes a range of nested page table entries.
1280 *
1281 * The caller must own the PGM lock.
1282 *
1283 * @param pVCpu The current CPU.
1284 * @param GCPhys Where to start.
1285 * @param cPages How many pages which entries should be synced.
1286 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1287 * host paging mode for AMD-V).
1288 */
1289int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1290{
1291 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1292
1293 int rc;
1294 switch (enmShwPagingMode)
1295 {
1296 case PGMMODE_32_BIT:
1297 {
1298 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1299 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1300 break;
1301 }
1302
1303 case PGMMODE_PAE:
1304 case PGMMODE_PAE_NX:
1305 {
1306 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1307 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1308 break;
1309 }
1310
1311 case PGMMODE_AMD64:
1312 case PGMMODE_AMD64_NX:
1313 {
1314 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1315 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1316 break;
1317 }
1318
1319 case PGMMODE_EPT:
1320 {
1321 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1322 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1323 break;
1324 }
1325
1326 default:
1327 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_INTERNAL_ERROR_5);
1328 }
1329 return rc;
1330}
1331#endif /* IN_RING0 */
1332
1333
1334/**
1335 * Gets effective Guest OS page information.
1336 *
1337 * When GCPtr is in a big page, the function will return as if it was a normal
1338 * 4KB page. If the need for distinguishing between big and normal page becomes
1339 * necessary at a later point, a PGMGstGetPage() will be created for that
1340 * purpose.
1341 *
1342 * @returns VBox status.
1343 * @param pVCpu The current CPU.
1344 * @param GCPtr Guest Context virtual address of the page.
1345 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1346 * @param pGCPhys Where to store the GC physical address of the page.
1347 * This is page aligned. The fact that the
1348 */
1349VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1350{
1351 VMCPU_ASSERT_EMT(pVCpu);
1352 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1353}
1354
1355
1356/**
1357 * Checks if the page is present.
1358 *
1359 * @returns true if the page is present.
1360 * @returns false if the page is not present.
1361 * @param pVCpu VMCPU handle.
1362 * @param GCPtr Address within the page.
1363 */
1364VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1365{
1366 VMCPU_ASSERT_EMT(pVCpu);
1367 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1368 return RT_SUCCESS(rc);
1369}
1370
1371
1372/**
1373 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1374 *
1375 * @returns VBox status.
1376 * @param pVCpu VMCPU handle.
1377 * @param GCPtr The address of the first page.
1378 * @param cb The size of the range in bytes.
1379 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1380 */
1381VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1382{
1383 VMCPU_ASSERT_EMT(pVCpu);
1384 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1385}
1386
1387
1388/**
1389 * Modify page flags for a range of pages in the guest's tables
1390 *
1391 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1392 *
1393 * @returns VBox status code.
1394 * @param pVCpu VMCPU handle.
1395 * @param GCPtr Virtual address of the first page in the range.
1396 * @param cb Size (in bytes) of the range to apply the modification to.
1397 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1398 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1399 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1400 */
1401VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1402{
1403 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1404 VMCPU_ASSERT_EMT(pVCpu);
1405
1406 /*
1407 * Validate input.
1408 */
1409 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1410 Assert(cb);
1411
1412 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1413
1414 /*
1415 * Adjust input.
1416 */
1417 cb += GCPtr & PAGE_OFFSET_MASK;
1418 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1419 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1420
1421 /*
1422 * Call worker.
1423 */
1424 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1425
1426 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1427 return rc;
1428}
1429
1430
1431#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1432
1433/**
1434 * Performs the lazy mapping of the 32-bit guest PD.
1435 *
1436 * @returns VBox status code.
1437 * @param pVCpu The current CPU.
1438 * @param ppPd Where to return the pointer to the mapping. This is
1439 * always set.
1440 */
1441int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1442{
1443 PVM pVM = pVCpu->CTX_SUFF(pVM);
1444 pgmLock(pVM);
1445
1446 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1447
1448 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1449 PPGMPAGE pPage;
1450 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1451 if (RT_SUCCESS(rc))
1452 {
1453 RTHCPTR HCPtrGuestCR3;
1454 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1455 if (RT_SUCCESS(rc))
1456 {
1457 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1458# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1459 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1460# endif
1461 *ppPd = (PX86PD)HCPtrGuestCR3;
1462
1463 pgmUnlock(pVM);
1464 return VINF_SUCCESS;
1465 }
1466
1467 AssertRC(rc);
1468 }
1469 pgmUnlock(pVM);
1470
1471 *ppPd = NULL;
1472 return rc;
1473}
1474
1475
1476/**
1477 * Performs the lazy mapping of the PAE guest PDPT.
1478 *
1479 * @returns VBox status code.
1480 * @param pVCpu The current CPU.
1481 * @param ppPdpt Where to return the pointer to the mapping. This is
1482 * always set.
1483 */
1484int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1485{
1486 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1487 PVM pVM = pVCpu->CTX_SUFF(pVM);
1488 pgmLock(pVM);
1489
1490 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1491 PPGMPAGE pPage;
1492 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1493 if (RT_SUCCESS(rc))
1494 {
1495 RTHCPTR HCPtrGuestCR3;
1496 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1497 if (RT_SUCCESS(rc))
1498 {
1499 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1500# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1501 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1502# endif
1503 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1504
1505 pgmUnlock(pVM);
1506 return VINF_SUCCESS;
1507 }
1508
1509 AssertRC(rc);
1510 }
1511
1512 pgmUnlock(pVM);
1513 *ppPdpt = NULL;
1514 return rc;
1515}
1516
1517
1518/**
1519 * Performs the lazy mapping / updating of a PAE guest PD.
1520 *
1521 * @returns Pointer to the mapping.
1522 * @returns VBox status code.
1523 * @param pVCpu The current CPU.
1524 * @param iPdpt Which PD entry to map (0..3).
1525 * @param ppPd Where to return the pointer to the mapping. This is
1526 * always set.
1527 */
1528int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1529{
1530 PVM pVM = pVCpu->CTX_SUFF(pVM);
1531 pgmLock(pVM);
1532
1533 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1534 Assert(pGuestPDPT);
1535 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1536 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1537 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1538
1539 PPGMPAGE pPage;
1540 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1541 if (RT_SUCCESS(rc))
1542 {
1543 RTRCPTR RCPtr = NIL_RTRCPTR;
1544 RTHCPTR HCPtr = NIL_RTHCPTR;
1545#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1546 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1547 AssertRC(rc);
1548#endif
1549 if (RT_SUCCESS(rc) && fChanged)
1550 {
1551 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1552 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1553 }
1554 if (RT_SUCCESS(rc))
1555 {
1556 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1557# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1558 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1559# endif
1560 if (fChanged)
1561 {
1562 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1563 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1564 }
1565
1566 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1567 pgmUnlock(pVM);
1568 return VINF_SUCCESS;
1569 }
1570 }
1571
1572 /* Invalid page or some failure, invalidate the entry. */
1573 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1574 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1575# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1576 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1577# endif
1578 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1579
1580 pgmUnlock(pVM);
1581 return rc;
1582}
1583
1584#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1585#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1586/**
1587 * Performs the lazy mapping of the 32-bit guest PD.
1588 *
1589 * @returns VBox status code.
1590 * @param pVCpu The current CPU.
1591 * @param ppPml4 Where to return the pointer to the mapping. This will
1592 * always be set.
1593 */
1594int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1595{
1596 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1597 PVM pVM = pVCpu->CTX_SUFF(pVM);
1598 pgmLock(pVM);
1599
1600 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1601 PPGMPAGE pPage;
1602 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1603 if (RT_SUCCESS(rc))
1604 {
1605 RTHCPTR HCPtrGuestCR3;
1606 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1607 if (RT_SUCCESS(rc))
1608 {
1609 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1610# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1611 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1612# endif
1613 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1614
1615 pgmUnlock(pVM);
1616 return VINF_SUCCESS;
1617 }
1618 }
1619
1620 pgmUnlock(pVM);
1621 *ppPml4 = NULL;
1622 return rc;
1623}
1624#endif
1625
1626
1627/**
1628 * Gets the PAE PDPEs values cached by the CPU.
1629 *
1630 * @returns VBox status code.
1631 * @param pVCpu The virtual CPU.
1632 * @param paPdpes Where to return the four PDPEs. The array
1633 * pointed to must have 4 entries.
1634 */
1635VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1636{
1637 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1638
1639 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1640 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1641 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1642 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1643 return VINF_SUCCESS;
1644}
1645
1646
1647/**
1648 * Sets the PAE PDPEs values cached by the CPU.
1649 *
1650 * @remarks This must be called *AFTER* PGMUpdateCR3.
1651 *
1652 * @returns VBox status code.
1653 * @param pVCpu The virtual CPU.
1654 * @param paPdpes The four PDPE values. The array pointed to
1655 * must have exactly 4 entries.
1656 */
1657VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1658{
1659 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1660
1661 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1662 {
1663 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1664 {
1665 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1666
1667 /* Force lazy remapping if it changed in any way. */
1668 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1669# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1670 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1671# endif
1672 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1673 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1674 }
1675 }
1676 return VINF_SUCCESS;
1677}
1678
1679
1680/**
1681 * Gets the current CR3 register value for the shadow memory context.
1682 * @returns CR3 value.
1683 * @param pVCpu VMCPU handle.
1684 */
1685VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1686{
1687 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1688 AssertPtrReturn(pPoolPage, 0);
1689 return pPoolPage->Core.Key;
1690}
1691
1692
1693/**
1694 * Gets the current CR3 register value for the nested memory context.
1695 * @returns CR3 value.
1696 * @param pVCpu VMCPU handle.
1697 */
1698VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1699{
1700 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1701 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1702}
1703
1704
1705/**
1706 * Gets the current CR3 register value for the HC intermediate memory context.
1707 * @returns CR3 value.
1708 * @param pVM The VM handle.
1709 */
1710VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1711{
1712 switch (pVM->pgm.s.enmHostMode)
1713 {
1714 case SUPPAGINGMODE_32_BIT:
1715 case SUPPAGINGMODE_32_BIT_GLOBAL:
1716 return pVM->pgm.s.HCPhysInterPD;
1717
1718 case SUPPAGINGMODE_PAE:
1719 case SUPPAGINGMODE_PAE_GLOBAL:
1720 case SUPPAGINGMODE_PAE_NX:
1721 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1722 return pVM->pgm.s.HCPhysInterPaePDPT;
1723
1724 case SUPPAGINGMODE_AMD64:
1725 case SUPPAGINGMODE_AMD64_GLOBAL:
1726 case SUPPAGINGMODE_AMD64_NX:
1727 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1728 return pVM->pgm.s.HCPhysInterPaePDPT;
1729
1730 default:
1731 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1732 return ~0;
1733 }
1734}
1735
1736
1737/**
1738 * Gets the current CR3 register value for the RC intermediate memory context.
1739 * @returns CR3 value.
1740 * @param pVM The VM handle.
1741 * @param pVCpu VMCPU handle.
1742 */
1743VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1744{
1745 switch (pVCpu->pgm.s.enmShadowMode)
1746 {
1747 case PGMMODE_32_BIT:
1748 return pVM->pgm.s.HCPhysInterPD;
1749
1750 case PGMMODE_PAE:
1751 case PGMMODE_PAE_NX:
1752 return pVM->pgm.s.HCPhysInterPaePDPT;
1753
1754 case PGMMODE_AMD64:
1755 case PGMMODE_AMD64_NX:
1756 return pVM->pgm.s.HCPhysInterPaePML4;
1757
1758 case PGMMODE_EPT:
1759 case PGMMODE_NESTED:
1760 return 0; /* not relevant */
1761
1762 default:
1763 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1764 return ~0;
1765 }
1766}
1767
1768
1769/**
1770 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1771 * @returns CR3 value.
1772 * @param pVM The VM handle.
1773 */
1774VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1775{
1776 return pVM->pgm.s.HCPhysInterPD;
1777}
1778
1779
1780/**
1781 * Gets the CR3 register value for the PAE intermediate memory context.
1782 * @returns CR3 value.
1783 * @param pVM The VM handle.
1784 */
1785VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1786{
1787 return pVM->pgm.s.HCPhysInterPaePDPT;
1788}
1789
1790
1791/**
1792 * Gets the CR3 register value for the AMD64 intermediate memory context.
1793 * @returns CR3 value.
1794 * @param pVM The VM handle.
1795 */
1796VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1797{
1798 return pVM->pgm.s.HCPhysInterPaePML4;
1799}
1800
1801
1802/**
1803 * Performs and schedules necessary updates following a CR3 load or reload.
1804 *
1805 * This will normally involve mapping the guest PD or nPDPT
1806 *
1807 * @returns VBox status code.
1808 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1809 * safely be ignored and overridden since the FF will be set too then.
1810 * @param pVCpu VMCPU handle.
1811 * @param cr3 The new cr3.
1812 * @param fGlobal Indicates whether this is a global flush or not.
1813 */
1814VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1815{
1816 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1817 PVM pVM = pVCpu->CTX_SUFF(pVM);
1818
1819 VMCPU_ASSERT_EMT(pVCpu);
1820
1821 /*
1822 * Always flag the necessary updates; necessary for hardware acceleration
1823 */
1824 /** @todo optimize this, it shouldn't always be necessary. */
1825 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1826 if (fGlobal)
1827 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1828 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1829
1830 /*
1831 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1832 */
1833 int rc = VINF_SUCCESS;
1834 RTGCPHYS GCPhysCR3;
1835 switch (pVCpu->pgm.s.enmGuestMode)
1836 {
1837 case PGMMODE_PAE:
1838 case PGMMODE_PAE_NX:
1839 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1840 break;
1841 case PGMMODE_AMD64:
1842 case PGMMODE_AMD64_NX:
1843 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1844 break;
1845 default:
1846 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1847 break;
1848 }
1849
1850 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1851 {
1852 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1853 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1854 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1855 if (RT_LIKELY(rc == VINF_SUCCESS))
1856 {
1857 if (pgmMapAreMappingsFloating(pVM))
1858 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1859 }
1860 else
1861 {
1862 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1863 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1864 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1865 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1866 if (pgmMapAreMappingsFloating(pVM))
1867 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1868 }
1869
1870 if (fGlobal)
1871 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1872 else
1873 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1874 }
1875 else
1876 {
1877# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1879 if (pPool->cDirtyPages)
1880 {
1881 pgmLock(pVM);
1882 pgmPoolResetDirtyPages(pVM);
1883 pgmUnlock(pVM);
1884 }
1885# endif
1886 /*
1887 * Check if we have a pending update of the CR3 monitoring.
1888 */
1889 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1890 {
1891 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1892 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1893 }
1894 if (fGlobal)
1895 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1896 else
1897 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1898 }
1899
1900 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1901 return rc;
1902}
1903
1904
1905/**
1906 * Performs and schedules necessary updates following a CR3 load or reload when
1907 * using nested or extended paging.
1908 *
1909 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
1910 * TLB and triggering a SyncCR3.
1911 *
1912 * This will normally involve mapping the guest PD or nPDPT
1913 *
1914 * @returns VBox status code.
1915 * @retval VINF_SUCCESS.
1916 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1917 * requires a CR3 sync. This can safely be ignored and overridden since
1918 * the FF will be set too then.)
1919 * @param pVCpu VMCPU handle.
1920 * @param cr3 The new cr3.
1921 */
1922VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1923{
1924 PVM pVM = pVCpu->CTX_SUFF(pVM);
1925
1926 VMCPU_ASSERT_EMT(pVCpu);
1927 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1928
1929 /* We assume we're only called in nested paging mode. */
1930 Assert(pVM->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1931 Assert(pVM->pgm.s.fMappingsDisabled);
1932 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1933
1934 /*
1935 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1936 */
1937 int rc = VINF_SUCCESS;
1938 RTGCPHYS GCPhysCR3;
1939 switch (pVCpu->pgm.s.enmGuestMode)
1940 {
1941 case PGMMODE_PAE:
1942 case PGMMODE_PAE_NX:
1943 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1944 break;
1945 case PGMMODE_AMD64:
1946 case PGMMODE_AMD64_NX:
1947 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1948 break;
1949 default:
1950 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1951 break;
1952 }
1953 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1954 {
1955 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1956 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1957 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1958 }
1959 return rc;
1960}
1961
1962
1963/**
1964 * Synchronize the paging structures.
1965 *
1966 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1967 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1968 * in several places, most importantly whenever the CR3 is loaded.
1969 *
1970 * @returns VBox status code.
1971 * @param pVCpu VMCPU handle.
1972 * @param cr0 Guest context CR0 register
1973 * @param cr3 Guest context CR3 register
1974 * @param cr4 Guest context CR4 register
1975 * @param fGlobal Including global page directories or not
1976 */
1977VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1978{
1979 PVM pVM = pVCpu->CTX_SUFF(pVM);
1980 int rc;
1981
1982 VMCPU_ASSERT_EMT(pVCpu);
1983
1984 /*
1985 * The pool may have pending stuff and even require a return to ring-3 to
1986 * clear the whole thing.
1987 */
1988 rc = pgmPoolSyncCR3(pVCpu);
1989 if (rc != VINF_SUCCESS)
1990 return rc;
1991
1992 /*
1993 * We might be called when we shouldn't.
1994 *
1995 * The mode switching will ensure that the PD is resynced
1996 * after every mode switch. So, if we find ourselves here
1997 * when in protected or real mode we can safely disable the
1998 * FF and return immediately.
1999 */
2000 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2001 {
2002 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2003 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2004 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2005 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2006 return VINF_SUCCESS;
2007 }
2008
2009 /* If global pages are not supported, then all flushes are global. */
2010 if (!(cr4 & X86_CR4_PGE))
2011 fGlobal = true;
2012 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2013 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2014
2015 /*
2016 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2017 * This should be done before SyncCR3.
2018 */
2019 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2020 {
2021 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2022
2023 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
2024 RTGCPHYS GCPhysCR3;
2025 switch (pVCpu->pgm.s.enmGuestMode)
2026 {
2027 case PGMMODE_PAE:
2028 case PGMMODE_PAE_NX:
2029 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2030 break;
2031 case PGMMODE_AMD64:
2032 case PGMMODE_AMD64_NX:
2033 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2034 break;
2035 default:
2036 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2037 break;
2038 }
2039
2040 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2041 {
2042 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2043 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2044 }
2045
2046 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2047 if ( rc == VINF_PGM_SYNC_CR3
2048 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2049 {
2050 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2051#ifdef IN_RING3
2052 rc = pgmPoolSyncCR3(pVCpu);
2053#else
2054 if (rc == VINF_PGM_SYNC_CR3)
2055 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2056 return VINF_PGM_SYNC_CR3;
2057#endif
2058 }
2059 AssertRCReturn(rc, rc);
2060 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
2061 }
2062
2063 /*
2064 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2065 */
2066 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2067 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2068 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2069 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2070 if (rc == VINF_SUCCESS)
2071 {
2072 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2073 {
2074 /* Go back to ring 3 if a pgm pool sync is again pending. */
2075 return VINF_PGM_SYNC_CR3;
2076 }
2077
2078 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2079 {
2080 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2081 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2082 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2083 }
2084
2085 /*
2086 * Check if we have a pending update of the CR3 monitoring.
2087 */
2088 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2089 {
2090 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2091 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
2092 }
2093 }
2094
2095 /*
2096 * Now flush the CR3 (guest context).
2097 */
2098 if (rc == VINF_SUCCESS)
2099 PGM_INVL_VCPU_TLBS(pVCpu);
2100 return rc;
2101}
2102
2103
2104/**
2105 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2106 *
2107 * @returns VBox status code, with the following informational code for
2108 * VM scheduling.
2109 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2110 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2111 * (I.e. not in R3.)
2112 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2113 *
2114 * @param pVCpu VMCPU handle.
2115 * @param cr0 The new cr0.
2116 * @param cr4 The new cr4.
2117 * @param efer The new extended feature enable register.
2118 */
2119VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2120{
2121 PVM pVM = pVCpu->CTX_SUFF(pVM);
2122 PGMMODE enmGuestMode;
2123
2124 VMCPU_ASSERT_EMT(pVCpu);
2125
2126 /*
2127 * Calc the new guest mode.
2128 */
2129 if (!(cr0 & X86_CR0_PE))
2130 enmGuestMode = PGMMODE_REAL;
2131 else if (!(cr0 & X86_CR0_PG))
2132 enmGuestMode = PGMMODE_PROTECTED;
2133 else if (!(cr4 & X86_CR4_PAE))
2134 {
2135 bool const fPse = !!(cr4 & X86_CR4_PSE);
2136 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2137 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2138 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2139 enmGuestMode = PGMMODE_32_BIT;
2140 }
2141 else if (!(efer & MSR_K6_EFER_LME))
2142 {
2143 if (!(efer & MSR_K6_EFER_NXE))
2144 enmGuestMode = PGMMODE_PAE;
2145 else
2146 enmGuestMode = PGMMODE_PAE_NX;
2147 }
2148 else
2149 {
2150 if (!(efer & MSR_K6_EFER_NXE))
2151 enmGuestMode = PGMMODE_AMD64;
2152 else
2153 enmGuestMode = PGMMODE_AMD64_NX;
2154 }
2155
2156 /*
2157 * Did it change?
2158 */
2159 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2160 return VINF_SUCCESS;
2161
2162 /* Flush the TLB */
2163 PGM_INVL_VCPU_TLBS(pVCpu);
2164
2165#ifdef IN_RING3
2166 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2167#else
2168 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2169 return VINF_PGM_CHANGE_MODE;
2170#endif
2171}
2172
2173
2174/**
2175 * Gets the current guest paging mode.
2176 *
2177 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2178 *
2179 * @returns The current paging mode.
2180 * @param pVCpu VMCPU handle.
2181 */
2182VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2183{
2184 return pVCpu->pgm.s.enmGuestMode;
2185}
2186
2187
2188/**
2189 * Gets the current shadow paging mode.
2190 *
2191 * @returns The current paging mode.
2192 * @param pVCpu VMCPU handle.
2193 */
2194VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2195{
2196 return pVCpu->pgm.s.enmShadowMode;
2197}
2198
2199
2200/**
2201 * Gets the current host paging mode.
2202 *
2203 * @returns The current paging mode.
2204 * @param pVM The VM handle.
2205 */
2206VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2207{
2208 switch (pVM->pgm.s.enmHostMode)
2209 {
2210 case SUPPAGINGMODE_32_BIT:
2211 case SUPPAGINGMODE_32_BIT_GLOBAL:
2212 return PGMMODE_32_BIT;
2213
2214 case SUPPAGINGMODE_PAE:
2215 case SUPPAGINGMODE_PAE_GLOBAL:
2216 return PGMMODE_PAE;
2217
2218 case SUPPAGINGMODE_PAE_NX:
2219 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2220 return PGMMODE_PAE_NX;
2221
2222 case SUPPAGINGMODE_AMD64:
2223 case SUPPAGINGMODE_AMD64_GLOBAL:
2224 return PGMMODE_AMD64;
2225
2226 case SUPPAGINGMODE_AMD64_NX:
2227 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2228 return PGMMODE_AMD64_NX;
2229
2230 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2231 }
2232
2233 return PGMMODE_INVALID;
2234}
2235
2236
2237/**
2238 * Get mode name.
2239 *
2240 * @returns read-only name string.
2241 * @param enmMode The mode which name is desired.
2242 */
2243VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2244{
2245 switch (enmMode)
2246 {
2247 case PGMMODE_REAL: return "Real";
2248 case PGMMODE_PROTECTED: return "Protected";
2249 case PGMMODE_32_BIT: return "32-bit";
2250 case PGMMODE_PAE: return "PAE";
2251 case PGMMODE_PAE_NX: return "PAE+NX";
2252 case PGMMODE_AMD64: return "AMD64";
2253 case PGMMODE_AMD64_NX: return "AMD64+NX";
2254 case PGMMODE_NESTED: return "Nested";
2255 case PGMMODE_EPT: return "EPT";
2256 default: return "unknown mode value";
2257 }
2258}
2259
2260
2261
2262/**
2263 * Notification from CPUM that the EFER.NXE bit has changed.
2264 *
2265 * @param pVCpu The virtual CPU for which EFER changed.
2266 * @param fNxe The new NXE state.
2267 */
2268VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2269{
2270/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2271 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2272
2273 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2274 if (fNxe)
2275 {
2276 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2277 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2278 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2279 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2280 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2281 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2282 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2283 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2284 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2285 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2286 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2287
2288 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2289 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2290 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2291 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2292 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2293 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2294 }
2295 else
2296 {
2297 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2298 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2299 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2300 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2301 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2302 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2303 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2304 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2305 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2306 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2307 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2308
2309 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2310 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2311 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2312 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2313 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2314 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2315 }
2316}
2317
2318
2319/**
2320 * Check if any pgm pool pages are marked dirty (not monitored)
2321 *
2322 * @returns bool locked/not locked
2323 * @param pVM The VM to operate on.
2324 */
2325VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2326{
2327 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2328}
2329
2330
2331/**
2332 * Check if this VCPU currently owns the PGM lock.
2333 *
2334 * @returns bool owner/not owner
2335 * @param pVM The VM to operate on.
2336 */
2337VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2338{
2339 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2340}
2341
2342
2343/**
2344 * Enable or disable large page usage
2345 *
2346 * @returns VBox status code.
2347 * @param pVM The VM to operate on.
2348 * @param fUseLargePages Use/not use large pages
2349 */
2350VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2351{
2352 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2353
2354 pVM->fUseLargePages = fUseLargePages;
2355 return VINF_SUCCESS;
2356}
2357
2358
2359/**
2360 * Acquire the PGM lock.
2361 *
2362 * @returns VBox status code
2363 * @param pVM The VM to operate on.
2364 */
2365int pgmLock(PVM pVM)
2366{
2367 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2368#if defined(IN_RC) || defined(IN_RING0)
2369 if (rc == VERR_SEM_BUSY)
2370 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2371#endif
2372 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2373 return rc;
2374}
2375
2376
2377/**
2378 * Release the PGM lock.
2379 *
2380 * @returns VBox status code
2381 * @param pVM The VM to operate on.
2382 */
2383void pgmUnlock(PVM pVM)
2384{
2385 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2386 pVM->pgm.s.cDeprecatedPageLocks = 0;
2387 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2388 if (rc == VINF_SEM_NESTED)
2389 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2390}
2391
2392#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2393
2394/**
2395 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2396 *
2397 * @returns VBox status code.
2398 * @param pVM The VM handle.
2399 * @param pVCpu The current CPU.
2400 * @param GCPhys The guest physical address of the page to map. The
2401 * offset bits are not ignored.
2402 * @param ppv Where to return the address corresponding to @a GCPhys.
2403 */
2404int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2405{
2406 pgmLock(pVM);
2407
2408 /*
2409 * Convert it to a writable page and it on to the dynamic mapper.
2410 */
2411 int rc;
2412 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2413 if (RT_LIKELY(pPage))
2414 {
2415 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2416 if (RT_SUCCESS(rc))
2417 {
2418 void *pv;
2419 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2420 if (RT_SUCCESS(rc))
2421 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2422 }
2423 else
2424 AssertRC(rc);
2425 }
2426 else
2427 {
2428 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2429 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2430 }
2431
2432 pgmUnlock(pVM);
2433 return rc;
2434}
2435
2436#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2437#if !defined(IN_R0) || defined(LOG_ENABLED)
2438
2439/** Format handler for PGMPAGE.
2440 * @copydoc FNRTSTRFORMATTYPE */
2441static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2442 const char *pszType, void const *pvValue,
2443 int cchWidth, int cchPrecision, unsigned fFlags,
2444 void *pvUser)
2445{
2446 size_t cch;
2447 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2448 if (VALID_PTR(pPage))
2449 {
2450 char szTmp[64+80];
2451
2452 cch = 0;
2453
2454 /* The single char state stuff. */
2455 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2456 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2457
2458#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2459 if (IS_PART_INCLUDED(5))
2460 {
2461 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2462 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2463 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2464 }
2465
2466 /* The type. */
2467 if (IS_PART_INCLUDED(4))
2468 {
2469 szTmp[cch++] = ':';
2470 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2471 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2472 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2473 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2474 }
2475
2476 /* The numbers. */
2477 if (IS_PART_INCLUDED(3))
2478 {
2479 szTmp[cch++] = ':';
2480 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2481 }
2482
2483 if (IS_PART_INCLUDED(2))
2484 {
2485 szTmp[cch++] = ':';
2486 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2487 }
2488
2489 if (IS_PART_INCLUDED(6))
2490 {
2491 szTmp[cch++] = ':';
2492 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2493 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2494 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2495 }
2496#undef IS_PART_INCLUDED
2497
2498 cch = pfnOutput(pvArgOutput, szTmp, cch);
2499 }
2500 else
2501 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2502 return cch;
2503}
2504
2505
2506/** Format handler for PGMRAMRANGE.
2507 * @copydoc FNRTSTRFORMATTYPE */
2508static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2509 const char *pszType, void const *pvValue,
2510 int cchWidth, int cchPrecision, unsigned fFlags,
2511 void *pvUser)
2512{
2513 size_t cch;
2514 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2515 if (VALID_PTR(pRam))
2516 {
2517 char szTmp[80];
2518 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2519 cch = pfnOutput(pvArgOutput, szTmp, cch);
2520 }
2521 else
2522 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2523 return cch;
2524}
2525
2526/** Format type andlers to be registered/deregistered. */
2527static const struct
2528{
2529 char szType[24];
2530 PFNRTSTRFORMATTYPE pfnHandler;
2531} g_aPgmFormatTypes[] =
2532{
2533 { "pgmpage", pgmFormatTypeHandlerPage },
2534 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2535};
2536
2537#endif /* !IN_R0 || LOG_ENABLED */
2538
2539/**
2540 * Registers the global string format types.
2541 *
2542 * This should be called at module load time or in some other manner that ensure
2543 * that it's called exactly one time.
2544 *
2545 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2546 */
2547VMMDECL(int) PGMRegisterStringFormatTypes(void)
2548{
2549#if !defined(IN_R0) || defined(LOG_ENABLED)
2550 int rc = VINF_SUCCESS;
2551 unsigned i;
2552 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2553 {
2554 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2555# ifdef IN_RING0
2556 if (rc == VERR_ALREADY_EXISTS)
2557 {
2558 /* in case of cleanup failure in ring-0 */
2559 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2560 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2561 }
2562# endif
2563 }
2564 if (RT_FAILURE(rc))
2565 while (i-- > 0)
2566 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2567
2568 return rc;
2569#else
2570 return VINF_SUCCESS;
2571#endif
2572}
2573
2574
2575/**
2576 * Deregisters the global string format types.
2577 *
2578 * This should be called at module unload time or in some other manner that
2579 * ensure that it's called exactly one time.
2580 */
2581VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2582{
2583#if !defined(IN_R0) || defined(LOG_ENABLED)
2584 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2585 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2586#endif
2587}
2588
2589#ifdef VBOX_STRICT
2590
2591/**
2592 * Asserts that there are no mapping conflicts.
2593 *
2594 * @returns Number of conflicts.
2595 * @param pVM The VM Handle.
2596 */
2597VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2598{
2599 unsigned cErrors = 0;
2600
2601 /* Only applies to raw mode -> 1 VPCU */
2602 Assert(pVM->cCpus == 1);
2603 PVMCPU pVCpu = &pVM->aCpus[0];
2604
2605 /*
2606 * Check for mapping conflicts.
2607 */
2608 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2609 pMapping;
2610 pMapping = pMapping->CTX_SUFF(pNext))
2611 {
2612 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2613 for (RTGCPTR GCPtr = pMapping->GCPtr;
2614 GCPtr <= pMapping->GCPtrLast;
2615 GCPtr += PAGE_SIZE)
2616 {
2617 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2618 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2619 {
2620 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2621 cErrors++;
2622 break;
2623 }
2624 }
2625 }
2626
2627 return cErrors;
2628}
2629
2630
2631/**
2632 * Asserts that everything related to the guest CR3 is correctly shadowed.
2633 *
2634 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2635 * and assert the correctness of the guest CR3 mapping before asserting that the
2636 * shadow page tables is in sync with the guest page tables.
2637 *
2638 * @returns Number of conflicts.
2639 * @param pVM The VM Handle.
2640 * @param pVCpu VMCPU handle.
2641 * @param cr3 The current guest CR3 register value.
2642 * @param cr4 The current guest CR4 register value.
2643 */
2644VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2645{
2646 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2647 pgmLock(pVM);
2648 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2649 pgmUnlock(pVM);
2650 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2651 return cErrors;
2652}
2653
2654#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette