VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 39402

Last change on this file since 39402 was 39402, checked in by vboxsync, 13 years ago

VMM: don't use generic IPE status codes, use specific ones. Part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 86.5 KB
Line 
1/* $Id: PGMAll.cpp 39402 2011-11-23 16:25:04Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/sup.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
32#include <VBox/vmm/rem.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hwaccm.h>
35#include <VBox/vmm/hwacc_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vm.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
408 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 PGM_LOCK_ASSERT_OWNER(pVM);
460 pgmUnlock(pVM);
461 }
462 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
463
464 /*
465 * Return code tweaks.
466 */
467 if (rc != VINF_SUCCESS)
468 {
469 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
470 rc = VINF_SUCCESS;
471
472# ifdef IN_RING0
473 /* Note: hack alert for difficult to reproduce problem. */
474 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
475 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
476 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
477 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
478 {
479 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
480 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
481 rc = VINF_SUCCESS;
482 }
483# endif
484 }
485
486 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
487 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
488 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
489 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
490 return rc;
491}
492#endif /* !IN_RING3 */
493
494
495/**
496 * Prefetch a page
497 *
498 * Typically used to sync commonly used pages before entering raw mode
499 * after a CR3 reload.
500 *
501 * @returns VBox status code suitable for scheduling.
502 * @retval VINF_SUCCESS on success.
503 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
504 * @param pVCpu VMCPU handle.
505 * @param GCPtrPage Page to invalidate.
506 */
507VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
508{
509 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
510 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
511 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
512 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
513 return rc;
514}
515
516
517/**
518 * Gets the mapping corresponding to the specified address (if any).
519 *
520 * @returns Pointer to the mapping.
521 * @returns NULL if not
522 *
523 * @param pVM The virtual machine.
524 * @param GCPtr The guest context pointer.
525 */
526PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
527{
528 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
529 while (pMapping)
530 {
531 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
532 break;
533 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
534 return pMapping;
535 pMapping = pMapping->CTX_SUFF(pNext);
536 }
537 return NULL;
538}
539
540
541/**
542 * Verifies a range of pages for read or write access
543 *
544 * Only checks the guest's page tables
545 *
546 * @returns VBox status code.
547 * @param pVCpu VMCPU handle.
548 * @param Addr Guest virtual address to check
549 * @param cbSize Access size
550 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
551 * @remarks Current not in use.
552 */
553VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
554{
555 /*
556 * Validate input.
557 */
558 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
559 {
560 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
561 return VERR_INVALID_PARAMETER;
562 }
563
564 uint64_t fPage;
565 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
566 if (RT_FAILURE(rc))
567 {
568 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
569 return VINF_EM_RAW_GUEST_TRAP;
570 }
571
572 /*
573 * Check if the access would cause a page fault
574 *
575 * Note that hypervisor page directories are not present in the guest's tables, so this check
576 * is sufficient.
577 */
578 bool fWrite = !!(fAccess & X86_PTE_RW);
579 bool fUser = !!(fAccess & X86_PTE_US);
580 if ( !(fPage & X86_PTE_P)
581 || (fWrite && !(fPage & X86_PTE_RW))
582 || (fUser && !(fPage & X86_PTE_US)) )
583 {
584 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
585 return VINF_EM_RAW_GUEST_TRAP;
586 }
587 if ( RT_SUCCESS(rc)
588 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
589 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
590 return rc;
591}
592
593
594/**
595 * Verifies a range of pages for read or write access
596 *
597 * Supports handling of pages marked for dirty bit tracking and CSAM
598 *
599 * @returns VBox status code.
600 * @param pVCpu VMCPU handle.
601 * @param Addr Guest virtual address to check
602 * @param cbSize Access size
603 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
604 */
605VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
606{
607 PVM pVM = pVCpu->CTX_SUFF(pVM);
608
609 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
610
611 /*
612 * Get going.
613 */
614 uint64_t fPageGst;
615 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
616 if (RT_FAILURE(rc))
617 {
618 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
619 return VINF_EM_RAW_GUEST_TRAP;
620 }
621
622 /*
623 * Check if the access would cause a page fault
624 *
625 * Note that hypervisor page directories are not present in the guest's tables, so this check
626 * is sufficient.
627 */
628 const bool fWrite = !!(fAccess & X86_PTE_RW);
629 const bool fUser = !!(fAccess & X86_PTE_US);
630 if ( !(fPageGst & X86_PTE_P)
631 || (fWrite && !(fPageGst & X86_PTE_RW))
632 || (fUser && !(fPageGst & X86_PTE_US)) )
633 {
634 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
635 return VINF_EM_RAW_GUEST_TRAP;
636 }
637
638 if (!pVM->pgm.s.fNestedPaging)
639 {
640 /*
641 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
642 */
643 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
644 if ( rc == VERR_PAGE_NOT_PRESENT
645 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
646 {
647 /*
648 * Page is not present in our page tables.
649 * Try to sync it!
650 */
651 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
652 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
653 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
654 if (rc != VINF_SUCCESS)
655 return rc;
656 }
657 else
658 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
659 }
660
661#if 0 /* def VBOX_STRICT; triggers too often now */
662 /*
663 * This check is a bit paranoid, but useful.
664 */
665 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
666 uint64_t fPageShw;
667 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
668 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
669 || (fWrite && !(fPageShw & X86_PTE_RW))
670 || (fUser && !(fPageShw & X86_PTE_US)) )
671 {
672 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
673 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
674 return VINF_EM_RAW_GUEST_TRAP;
675 }
676#endif
677
678 if ( RT_SUCCESS(rc)
679 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
680 || Addr + cbSize < Addr))
681 {
682 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
683 for (;;)
684 {
685 Addr += PAGE_SIZE;
686 if (cbSize > PAGE_SIZE)
687 cbSize -= PAGE_SIZE;
688 else
689 cbSize = 1;
690 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
691 if (rc != VINF_SUCCESS)
692 break;
693 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
694 break;
695 }
696 }
697 return rc;
698}
699
700
701/**
702 * Emulation of the invlpg instruction (HC only actually).
703 *
704 * @returns Strict VBox status code, special care required.
705 * @retval VINF_PGM_SYNC_CR3 - handled.
706 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
707 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
708 *
709 * @param pVCpu VMCPU handle.
710 * @param GCPtrPage Page to invalidate.
711 *
712 * @remark ASSUMES the page table entry or page directory is valid. Fairly
713 * safe, but there could be edge cases!
714 *
715 * @todo Flush page or page directory only if necessary!
716 * @todo VBOXSTRICTRC
717 */
718VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
719{
720 PVM pVM = pVCpu->CTX_SUFF(pVM);
721 int rc;
722 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
723
724#ifndef IN_RING3
725 /*
726 * Notify the recompiler so it can record this instruction.
727 */
728 REMNotifyInvalidatePage(pVM, GCPtrPage);
729#endif /* !IN_RING3 */
730
731
732#ifdef IN_RC
733 /*
734 * Check for conflicts and pending CR3 monitoring updates.
735 */
736 if (pgmMapAreMappingsFloating(pVM))
737 {
738 if ( pgmGetMapping(pVM, GCPtrPage)
739 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
740 {
741 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
742 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
743 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
744 return VINF_PGM_SYNC_CR3;
745 }
746
747 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
748 {
749 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
750 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
751 return VINF_EM_RAW_EMULATE_INSTR;
752 }
753 }
754#endif /* IN_RC */
755
756 /*
757 * Call paging mode specific worker.
758 */
759 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
760 pgmLock(pVM);
761 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
762 pgmUnlock(pVM);
763 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
764
765#ifdef IN_RING3
766 /*
767 * Check if we have a pending update of the CR3 monitoring.
768 */
769 if ( RT_SUCCESS(rc)
770 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
771 {
772 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
773 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
774 }
775
776 /*
777 * Inform CSAM about the flush
778 *
779 * Note: This is to check if monitored pages have been changed; when we implement
780 * callbacks for virtual handlers, this is no longer required.
781 */
782 CSAMR3FlushPage(pVM, GCPtrPage);
783#endif /* IN_RING3 */
784
785 /* Ignore all irrelevant error codes. */
786 if ( rc == VERR_PAGE_NOT_PRESENT
787 || rc == VERR_PAGE_TABLE_NOT_PRESENT
788 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
789 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
790 rc = VINF_SUCCESS;
791
792 return rc;
793}
794
795
796/**
797 * Executes an instruction using the interpreter.
798 *
799 * @returns VBox status code (appropriate for trap handling and GC return).
800 * @param pVM VM handle.
801 * @param pVCpu VMCPU handle.
802 * @param pRegFrame Register frame.
803 * @param pvFault Fault address.
804 */
805VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
806{
807 uint32_t cb;
808 VBOXSTRICTRC rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
809 if (rc == VERR_EM_INTERPRETER)
810 rc = VINF_EM_RAW_EMULATE_INSTR;
811 if (rc != VINF_SUCCESS)
812 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
813 return rc;
814}
815
816
817/**
818 * Gets effective page information (from the VMM page directory).
819 *
820 * @returns VBox status.
821 * @param pVCpu VMCPU handle.
822 * @param GCPtr Guest Context virtual address of the page.
823 * @param pfFlags Where to store the flags. These are X86_PTE_*.
824 * @param pHCPhys Where to store the HC physical address of the page.
825 * This is page aligned.
826 * @remark You should use PGMMapGetPage() for pages in a mapping.
827 */
828VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
829{
830 pgmLock(pVCpu->CTX_SUFF(pVM));
831 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
832 pgmUnlock(pVCpu->CTX_SUFF(pVM));
833 return rc;
834}
835
836
837/**
838 * Modify page flags for a range of pages in the shadow context.
839 *
840 * The existing flags are ANDed with the fMask and ORed with the fFlags.
841 *
842 * @returns VBox status code.
843 * @param pVCpu VMCPU handle.
844 * @param GCPtr Virtual address of the first page in the range.
845 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
846 * @param fMask The AND mask - page flags X86_PTE_*.
847 * Be very CAREFUL when ~'ing constants which could be 32-bit!
848 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
849 * @remark You must use PGMMapModifyPage() for pages in a mapping.
850 */
851DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
852{
853 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
854 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
855
856 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
857
858 PVM pVM = pVCpu->CTX_SUFF(pVM);
859 pgmLock(pVM);
860 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
861 pgmUnlock(pVM);
862 return rc;
863}
864
865
866/**
867 * Changing the page flags for a single page in the shadow page tables so as to
868 * make it read-only.
869 *
870 * @returns VBox status code.
871 * @param pVCpu VMCPU handle.
872 * @param GCPtr Virtual address of the first page in the range.
873 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
874 */
875VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
876{
877 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
878}
879
880
881/**
882 * Changing the page flags for a single page in the shadow page tables so as to
883 * make it writable.
884 *
885 * The call must know with 101% certainty that the guest page tables maps this
886 * as writable too. This function will deal shared, zero and write monitored
887 * pages.
888 *
889 * @returns VBox status code.
890 * @param pVCpu VMCPU handle.
891 * @param GCPtr Virtual address of the first page in the range.
892 * @param fMmio2 Set if it is an MMIO2 page.
893 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
894 */
895VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
896{
897 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
898}
899
900
901/**
902 * Changing the page flags for a single page in the shadow page tables so as to
903 * make it not present.
904 *
905 * @returns VBox status code.
906 * @param pVCpu VMCPU handle.
907 * @param GCPtr Virtual address of the first page in the range.
908 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
909 */
910VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
911{
912 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
913}
914
915
916/**
917 * Gets the shadow page directory for the specified address, PAE.
918 *
919 * @returns Pointer to the shadow PD.
920 * @param pVCpu The VMCPU handle.
921 * @param GCPtr The address.
922 * @param uGstPdpe Guest PDPT entry. Valid.
923 * @param ppPD Receives address of page directory
924 */
925int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
926{
927 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
928 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
929 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
930 PVM pVM = pVCpu->CTX_SUFF(pVM);
931 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
932 PPGMPOOLPAGE pShwPage;
933 int rc;
934
935 PGM_LOCK_ASSERT_OWNER(pVM);
936
937 /* Allocate page directory if not present. */
938 if ( !pPdpe->n.u1Present
939 && !(pPdpe->u & X86_PDPE_PG_MASK))
940 {
941 RTGCPTR64 GCPdPt;
942 PGMPOOLKIND enmKind;
943
944 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
945 {
946 /* AMD-V nested paging or real/protected mode without paging. */
947 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
948 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
949 }
950 else
951 {
952 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
953 {
954 if (!(uGstPdpe & X86_PDPE_P))
955 {
956 /* PD not present; guest must reload CR3 to change it.
957 * No need to monitor anything in this case.
958 */
959 Assert(!HWACCMIsEnabled(pVM));
960
961 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
962 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
963 uGstPdpe |= X86_PDPE_P;
964 }
965 else
966 {
967 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
968 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
969 }
970 }
971 else
972 {
973 GCPdPt = CPUMGetGuestCR3(pVCpu);
974 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
975 }
976 }
977
978 /* Create a reference back to the PDPT by using the index in its shadow page. */
979 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
980 AssertRCReturn(rc, rc);
981
982 /* The PD was cached or created; hook it up now. */
983 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
984
985# if defined(IN_RC)
986 /*
987 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
988 * PDPT entry; the CPU fetches them only during cr3 load, so any
989 * non-present PDPT will continue to cause page faults.
990 */
991 ASMReloadCR3();
992# endif
993 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
994 }
995 else
996 {
997 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
998 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
999 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1000
1001 pgmPoolCacheUsed(pPool, pShwPage);
1002 }
1003 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1004 return VINF_SUCCESS;
1005}
1006
1007
1008/**
1009 * Gets the pointer to the shadow page directory entry for an address, PAE.
1010 *
1011 * @returns Pointer to the PDE.
1012 * @param pVCpu The current CPU.
1013 * @param GCPtr The address.
1014 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1015 */
1016DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1017{
1018 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1019 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1020 PVM pVM = pVCpu->CTX_SUFF(pVM);
1021
1022 PGM_LOCK_ASSERT_OWNER(pVM);
1023
1024 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1025 if (!pPdpt->a[iPdPt].n.u1Present)
1026 {
1027 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1028 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1029 }
1030 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1031
1032 /* Fetch the pgm pool shadow descriptor. */
1033 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1034 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1035
1036 *ppShwPde = pShwPde;
1037 return VINF_SUCCESS;
1038}
1039
1040#ifndef IN_RC
1041
1042/**
1043 * Syncs the SHADOW page directory pointer for the specified address.
1044 *
1045 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1046 *
1047 * The caller is responsible for making sure the guest has a valid PD before
1048 * calling this function.
1049 *
1050 * @returns VBox status.
1051 * @param pVCpu VMCPU handle.
1052 * @param GCPtr The address.
1053 * @param uGstPml4e Guest PML4 entry (valid).
1054 * @param uGstPdpe Guest PDPT entry (valid).
1055 * @param ppPD Receives address of page directory
1056 */
1057static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1058{
1059 PVM pVM = pVCpu->CTX_SUFF(pVM);
1060 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1061 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1062 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1063 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1064 PPGMPOOLPAGE pShwPage;
1065 int rc;
1066
1067 PGM_LOCK_ASSERT_OWNER(pVM);
1068
1069 /* Allocate page directory pointer table if not present. */
1070 if ( !pPml4e->n.u1Present
1071 && !(pPml4e->u & X86_PML4E_PG_MASK))
1072 {
1073 RTGCPTR64 GCPml4;
1074 PGMPOOLKIND enmKind;
1075
1076 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1077
1078 if (fNestedPagingOrNoGstPaging)
1079 {
1080 /* AMD-V nested paging or real/protected mode without paging */
1081 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1082 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1083 }
1084 else
1085 {
1086 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1087 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1088 }
1089
1090 /* Create a reference back to the PDPT by using the index in its shadow page. */
1091 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1092 AssertRCReturn(rc, rc);
1093 }
1094 else
1095 {
1096 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1097 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1098
1099 pgmPoolCacheUsed(pPool, pShwPage);
1100 }
1101 /* The PDPT was cached or created; hook it up now. */
1102 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1103
1104 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1105 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1106 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1107
1108 /* Allocate page directory if not present. */
1109 if ( !pPdpe->n.u1Present
1110 && !(pPdpe->u & X86_PDPE_PG_MASK))
1111 {
1112 RTGCPTR64 GCPdPt;
1113 PGMPOOLKIND enmKind;
1114
1115 if (fNestedPagingOrNoGstPaging)
1116 {
1117 /* AMD-V nested paging or real/protected mode without paging */
1118 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1119 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1120 }
1121 else
1122 {
1123 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1124 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1125 }
1126
1127 /* Create a reference back to the PDPT by using the index in its shadow page. */
1128 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1129 AssertRCReturn(rc, rc);
1130 }
1131 else
1132 {
1133 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1134 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1135
1136 pgmPoolCacheUsed(pPool, pShwPage);
1137 }
1138 /* The PD was cached or created; hook it up now. */
1139 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1140
1141 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1142 return VINF_SUCCESS;
1143}
1144
1145
1146/**
1147 * Gets the SHADOW page directory pointer for the specified address (long mode).
1148 *
1149 * @returns VBox status.
1150 * @param pVCpu VMCPU handle.
1151 * @param GCPtr The address.
1152 * @param ppPdpt Receives address of pdpt
1153 * @param ppPD Receives address of page directory
1154 */
1155DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1156{
1157 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1158 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1159
1160 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1161
1162 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1163 if (ppPml4e)
1164 *ppPml4e = (PX86PML4E)pPml4e;
1165
1166 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1167
1168 if (!pPml4e->n.u1Present)
1169 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1170
1171 PVM pVM = pVCpu->CTX_SUFF(pVM);
1172 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1173 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1174 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1175
1176 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1177 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1178 if (!pPdpt->a[iPdPt].n.u1Present)
1179 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1180
1181 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1182 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1183
1184 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1185 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1186 return VINF_SUCCESS;
1187}
1188
1189
1190/**
1191 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1192 * backing pages in case the PDPT or PML4 entry is missing.
1193 *
1194 * @returns VBox status.
1195 * @param pVCpu VMCPU handle.
1196 * @param GCPtr The address.
1197 * @param ppPdpt Receives address of pdpt
1198 * @param ppPD Receives address of page directory
1199 */
1200static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1201{
1202 PVM pVM = pVCpu->CTX_SUFF(pVM);
1203 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1204 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1205 PEPTPML4 pPml4;
1206 PEPTPML4E pPml4e;
1207 PPGMPOOLPAGE pShwPage;
1208 int rc;
1209
1210 Assert(pVM->pgm.s.fNestedPaging);
1211 PGM_LOCK_ASSERT_OWNER(pVM);
1212
1213 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1214 Assert(pPml4);
1215
1216 /* Allocate page directory pointer table if not present. */
1217 pPml4e = &pPml4->a[iPml4];
1218 if ( !pPml4e->n.u1Present
1219 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1220 {
1221 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1222 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1223
1224 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1225 AssertRCReturn(rc, rc);
1226 }
1227 else
1228 {
1229 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1230 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1231
1232 pgmPoolCacheUsed(pPool, pShwPage);
1233 }
1234 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1235 pPml4e->u = pShwPage->Core.Key;
1236 pPml4e->n.u1Present = 1;
1237 pPml4e->n.u1Write = 1;
1238 pPml4e->n.u1Execute = 1;
1239
1240 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1241 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1242 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1243
1244 if (ppPdpt)
1245 *ppPdpt = pPdpt;
1246
1247 /* Allocate page directory if not present. */
1248 if ( !pPdpe->n.u1Present
1249 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1250 {
1251 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1252
1253 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1254 AssertRCReturn(rc, rc);
1255 }
1256 else
1257 {
1258 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1259 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1260
1261 pgmPoolCacheUsed(pPool, pShwPage);
1262 }
1263 /* The PD was cached or created; hook it up now and fill with the default value. */
1264 pPdpe->u = pShwPage->Core.Key;
1265 pPdpe->n.u1Present = 1;
1266 pPdpe->n.u1Write = 1;
1267 pPdpe->n.u1Execute = 1;
1268
1269 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1270 return VINF_SUCCESS;
1271}
1272
1273#endif /* IN_RC */
1274
1275#ifdef IN_RING0
1276/**
1277 * Synchronizes a range of nested page table entries.
1278 *
1279 * The caller must own the PGM lock.
1280 *
1281 * @param pVCpu The current CPU.
1282 * @param GCPhys Where to start.
1283 * @param cPages How many pages which entries should be synced.
1284 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1285 * host paging mode for AMD-V).
1286 */
1287int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1288{
1289 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1290
1291 int rc;
1292 switch (enmShwPagingMode)
1293 {
1294 case PGMMODE_32_BIT:
1295 {
1296 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1297 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1298 break;
1299 }
1300
1301 case PGMMODE_PAE:
1302 case PGMMODE_PAE_NX:
1303 {
1304 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1305 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1306 break;
1307 }
1308
1309 case PGMMODE_AMD64:
1310 case PGMMODE_AMD64_NX:
1311 {
1312 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1313 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1314 break;
1315 }
1316
1317 case PGMMODE_EPT:
1318 {
1319 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1320 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1321 break;
1322 }
1323
1324 default:
1325 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1326 }
1327 return rc;
1328}
1329#endif /* IN_RING0 */
1330
1331
1332/**
1333 * Gets effective Guest OS page information.
1334 *
1335 * When GCPtr is in a big page, the function will return as if it was a normal
1336 * 4KB page. If the need for distinguishing between big and normal page becomes
1337 * necessary at a later point, a PGMGstGetPage() will be created for that
1338 * purpose.
1339 *
1340 * @returns VBox status.
1341 * @param pVCpu The current CPU.
1342 * @param GCPtr Guest Context virtual address of the page.
1343 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1344 * @param pGCPhys Where to store the GC physical address of the page.
1345 * This is page aligned. The fact that the
1346 */
1347VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1348{
1349 VMCPU_ASSERT_EMT(pVCpu);
1350 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1351}
1352
1353
1354/**
1355 * Checks if the page is present.
1356 *
1357 * @returns true if the page is present.
1358 * @returns false if the page is not present.
1359 * @param pVCpu VMCPU handle.
1360 * @param GCPtr Address within the page.
1361 */
1362VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1363{
1364 VMCPU_ASSERT_EMT(pVCpu);
1365 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1366 return RT_SUCCESS(rc);
1367}
1368
1369
1370/**
1371 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1372 *
1373 * @returns VBox status.
1374 * @param pVCpu VMCPU handle.
1375 * @param GCPtr The address of the first page.
1376 * @param cb The size of the range in bytes.
1377 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1378 */
1379VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1380{
1381 VMCPU_ASSERT_EMT(pVCpu);
1382 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1383}
1384
1385
1386/**
1387 * Modify page flags for a range of pages in the guest's tables
1388 *
1389 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1390 *
1391 * @returns VBox status code.
1392 * @param pVCpu VMCPU handle.
1393 * @param GCPtr Virtual address of the first page in the range.
1394 * @param cb Size (in bytes) of the range to apply the modification to.
1395 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1396 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1397 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1398 */
1399VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1400{
1401 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1402 VMCPU_ASSERT_EMT(pVCpu);
1403
1404 /*
1405 * Validate input.
1406 */
1407 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1408 Assert(cb);
1409
1410 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1411
1412 /*
1413 * Adjust input.
1414 */
1415 cb += GCPtr & PAGE_OFFSET_MASK;
1416 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1417 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1418
1419 /*
1420 * Call worker.
1421 */
1422 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1423
1424 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1425 return rc;
1426}
1427
1428
1429#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1430
1431/**
1432 * Performs the lazy mapping of the 32-bit guest PD.
1433 *
1434 * @returns VBox status code.
1435 * @param pVCpu The current CPU.
1436 * @param ppPd Where to return the pointer to the mapping. This is
1437 * always set.
1438 */
1439int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1440{
1441 PVM pVM = pVCpu->CTX_SUFF(pVM);
1442 pgmLock(pVM);
1443
1444 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1445
1446 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1447 PPGMPAGE pPage;
1448 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1449 if (RT_SUCCESS(rc))
1450 {
1451 RTHCPTR HCPtrGuestCR3;
1452 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1453 if (RT_SUCCESS(rc))
1454 {
1455 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1456# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1457 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1458# endif
1459 *ppPd = (PX86PD)HCPtrGuestCR3;
1460
1461 pgmUnlock(pVM);
1462 return VINF_SUCCESS;
1463 }
1464
1465 AssertRC(rc);
1466 }
1467 pgmUnlock(pVM);
1468
1469 *ppPd = NULL;
1470 return rc;
1471}
1472
1473
1474/**
1475 * Performs the lazy mapping of the PAE guest PDPT.
1476 *
1477 * @returns VBox status code.
1478 * @param pVCpu The current CPU.
1479 * @param ppPdpt Where to return the pointer to the mapping. This is
1480 * always set.
1481 */
1482int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1483{
1484 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1485 PVM pVM = pVCpu->CTX_SUFF(pVM);
1486 pgmLock(pVM);
1487
1488 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1489 PPGMPAGE pPage;
1490 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1491 if (RT_SUCCESS(rc))
1492 {
1493 RTHCPTR HCPtrGuestCR3;
1494 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1495 if (RT_SUCCESS(rc))
1496 {
1497 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1498# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1499 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1500# endif
1501 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1502
1503 pgmUnlock(pVM);
1504 return VINF_SUCCESS;
1505 }
1506
1507 AssertRC(rc);
1508 }
1509
1510 pgmUnlock(pVM);
1511 *ppPdpt = NULL;
1512 return rc;
1513}
1514
1515
1516/**
1517 * Performs the lazy mapping / updating of a PAE guest PD.
1518 *
1519 * @returns Pointer to the mapping.
1520 * @returns VBox status code.
1521 * @param pVCpu The current CPU.
1522 * @param iPdpt Which PD entry to map (0..3).
1523 * @param ppPd Where to return the pointer to the mapping. This is
1524 * always set.
1525 */
1526int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1527{
1528 PVM pVM = pVCpu->CTX_SUFF(pVM);
1529 pgmLock(pVM);
1530
1531 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1532 Assert(pGuestPDPT);
1533 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1534 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1535 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1536
1537 PPGMPAGE pPage;
1538 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1539 if (RT_SUCCESS(rc))
1540 {
1541 RTRCPTR RCPtr = NIL_RTRCPTR;
1542 RTHCPTR HCPtr = NIL_RTHCPTR;
1543#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1544 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1545 AssertRC(rc);
1546#endif
1547 if (RT_SUCCESS(rc) && fChanged)
1548 {
1549 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1550 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1551 }
1552 if (RT_SUCCESS(rc))
1553 {
1554 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1555# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1556 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1557# endif
1558 if (fChanged)
1559 {
1560 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1561 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1562 }
1563
1564 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1565 pgmUnlock(pVM);
1566 return VINF_SUCCESS;
1567 }
1568 }
1569
1570 /* Invalid page or some failure, invalidate the entry. */
1571 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1572 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1573# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1574 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1575# endif
1576 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1577
1578 pgmUnlock(pVM);
1579 return rc;
1580}
1581
1582#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1583#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1584/**
1585 * Performs the lazy mapping of the 32-bit guest PD.
1586 *
1587 * @returns VBox status code.
1588 * @param pVCpu The current CPU.
1589 * @param ppPml4 Where to return the pointer to the mapping. This will
1590 * always be set.
1591 */
1592int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1593{
1594 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1595 PVM pVM = pVCpu->CTX_SUFF(pVM);
1596 pgmLock(pVM);
1597
1598 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1599 PPGMPAGE pPage;
1600 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1601 if (RT_SUCCESS(rc))
1602 {
1603 RTHCPTR HCPtrGuestCR3;
1604 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1605 if (RT_SUCCESS(rc))
1606 {
1607 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1608# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1609 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1610# endif
1611 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1612
1613 pgmUnlock(pVM);
1614 return VINF_SUCCESS;
1615 }
1616 }
1617
1618 pgmUnlock(pVM);
1619 *ppPml4 = NULL;
1620 return rc;
1621}
1622#endif
1623
1624
1625/**
1626 * Gets the PAE PDPEs values cached by the CPU.
1627 *
1628 * @returns VBox status code.
1629 * @param pVCpu The virtual CPU.
1630 * @param paPdpes Where to return the four PDPEs. The array
1631 * pointed to must have 4 entries.
1632 */
1633VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1634{
1635 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1636
1637 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1638 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1639 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1640 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1641 return VINF_SUCCESS;
1642}
1643
1644
1645/**
1646 * Sets the PAE PDPEs values cached by the CPU.
1647 *
1648 * @remarks This must be called *AFTER* PGMUpdateCR3.
1649 *
1650 * @returns VBox status code.
1651 * @param pVCpu The virtual CPU.
1652 * @param paPdpes The four PDPE values. The array pointed to
1653 * must have exactly 4 entries.
1654 */
1655VMM_INT_DECL(int) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1656{
1657 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1658
1659 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1660 {
1661 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1662 {
1663 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1664
1665 /* Force lazy remapping if it changed in any way. */
1666 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1667# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1668 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1669# endif
1670 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1671 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1672 }
1673 }
1674 return VINF_SUCCESS;
1675}
1676
1677
1678/**
1679 * Gets the current CR3 register value for the shadow memory context.
1680 * @returns CR3 value.
1681 * @param pVCpu VMCPU handle.
1682 */
1683VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1684{
1685 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1686 AssertPtrReturn(pPoolPage, 0);
1687 return pPoolPage->Core.Key;
1688}
1689
1690
1691/**
1692 * Gets the current CR3 register value for the nested memory context.
1693 * @returns CR3 value.
1694 * @param pVCpu VMCPU handle.
1695 */
1696VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1697{
1698 NOREF(enmShadowMode);
1699 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1700 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1701}
1702
1703
1704/**
1705 * Gets the current CR3 register value for the HC intermediate memory context.
1706 * @returns CR3 value.
1707 * @param pVM The VM handle.
1708 */
1709VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1710{
1711 switch (pVM->pgm.s.enmHostMode)
1712 {
1713 case SUPPAGINGMODE_32_BIT:
1714 case SUPPAGINGMODE_32_BIT_GLOBAL:
1715 return pVM->pgm.s.HCPhysInterPD;
1716
1717 case SUPPAGINGMODE_PAE:
1718 case SUPPAGINGMODE_PAE_GLOBAL:
1719 case SUPPAGINGMODE_PAE_NX:
1720 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1721 return pVM->pgm.s.HCPhysInterPaePDPT;
1722
1723 case SUPPAGINGMODE_AMD64:
1724 case SUPPAGINGMODE_AMD64_GLOBAL:
1725 case SUPPAGINGMODE_AMD64_NX:
1726 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1727 return pVM->pgm.s.HCPhysInterPaePDPT;
1728
1729 default:
1730 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1731 return NIL_RTHCPHYS;
1732 }
1733}
1734
1735
1736/**
1737 * Gets the current CR3 register value for the RC intermediate memory context.
1738 * @returns CR3 value.
1739 * @param pVM The VM handle.
1740 * @param pVCpu VMCPU handle.
1741 */
1742VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1743{
1744 switch (pVCpu->pgm.s.enmShadowMode)
1745 {
1746 case PGMMODE_32_BIT:
1747 return pVM->pgm.s.HCPhysInterPD;
1748
1749 case PGMMODE_PAE:
1750 case PGMMODE_PAE_NX:
1751 return pVM->pgm.s.HCPhysInterPaePDPT;
1752
1753 case PGMMODE_AMD64:
1754 case PGMMODE_AMD64_NX:
1755 return pVM->pgm.s.HCPhysInterPaePML4;
1756
1757 case PGMMODE_EPT:
1758 case PGMMODE_NESTED:
1759 return 0; /* not relevant */
1760
1761 default:
1762 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1763 return NIL_RTHCPHYS;
1764 }
1765}
1766
1767
1768/**
1769 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1770 * @returns CR3 value.
1771 * @param pVM The VM handle.
1772 */
1773VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1774{
1775 return pVM->pgm.s.HCPhysInterPD;
1776}
1777
1778
1779/**
1780 * Gets the CR3 register value for the PAE intermediate memory context.
1781 * @returns CR3 value.
1782 * @param pVM The VM handle.
1783 */
1784VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1785{
1786 return pVM->pgm.s.HCPhysInterPaePDPT;
1787}
1788
1789
1790/**
1791 * Gets the CR3 register value for the AMD64 intermediate memory context.
1792 * @returns CR3 value.
1793 * @param pVM The VM handle.
1794 */
1795VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1796{
1797 return pVM->pgm.s.HCPhysInterPaePML4;
1798}
1799
1800
1801/**
1802 * Performs and schedules necessary updates following a CR3 load or reload.
1803 *
1804 * This will normally involve mapping the guest PD or nPDPT
1805 *
1806 * @returns VBox status code.
1807 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1808 * safely be ignored and overridden since the FF will be set too then.
1809 * @param pVCpu VMCPU handle.
1810 * @param cr3 The new cr3.
1811 * @param fGlobal Indicates whether this is a global flush or not.
1812 */
1813VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1814{
1815 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1816 PVM pVM = pVCpu->CTX_SUFF(pVM);
1817
1818 VMCPU_ASSERT_EMT(pVCpu);
1819
1820 /*
1821 * Always flag the necessary updates; necessary for hardware acceleration
1822 */
1823 /** @todo optimize this, it shouldn't always be necessary. */
1824 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1825 if (fGlobal)
1826 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1827 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1828
1829 /*
1830 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1831 */
1832 int rc = VINF_SUCCESS;
1833 RTGCPHYS GCPhysCR3;
1834 switch (pVCpu->pgm.s.enmGuestMode)
1835 {
1836 case PGMMODE_PAE:
1837 case PGMMODE_PAE_NX:
1838 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1839 break;
1840 case PGMMODE_AMD64:
1841 case PGMMODE_AMD64_NX:
1842 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1843 break;
1844 default:
1845 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1846 break;
1847 }
1848
1849 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1850 {
1851 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1852 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1853 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1854 if (RT_LIKELY(rc == VINF_SUCCESS))
1855 {
1856 if (pgmMapAreMappingsFloating(pVM))
1857 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1858 }
1859 else
1860 {
1861 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1862 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1863 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1864 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1865 if (pgmMapAreMappingsFloating(pVM))
1866 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1867 }
1868
1869 if (fGlobal)
1870 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1871 else
1872 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1873 }
1874 else
1875 {
1876# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1877 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1878 if (pPool->cDirtyPages)
1879 {
1880 pgmLock(pVM);
1881 pgmPoolResetDirtyPages(pVM);
1882 pgmUnlock(pVM);
1883 }
1884# endif
1885 /*
1886 * Check if we have a pending update of the CR3 monitoring.
1887 */
1888 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1889 {
1890 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1891 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1892 }
1893 if (fGlobal)
1894 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1895 else
1896 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
1897 }
1898
1899 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1900 return rc;
1901}
1902
1903
1904/**
1905 * Performs and schedules necessary updates following a CR3 load or reload when
1906 * using nested or extended paging.
1907 *
1908 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
1909 * TLB and triggering a SyncCR3.
1910 *
1911 * This will normally involve mapping the guest PD or nPDPT
1912 *
1913 * @returns VBox status code.
1914 * @retval VINF_SUCCESS.
1915 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1916 * requires a CR3 sync. This can safely be ignored and overridden since
1917 * the FF will be set too then.)
1918 * @param pVCpu VMCPU handle.
1919 * @param cr3 The new cr3.
1920 */
1921VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1922{
1923 VMCPU_ASSERT_EMT(pVCpu);
1924 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1925
1926 /* We assume we're only called in nested paging mode. */
1927 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1928 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
1929 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1930
1931 /*
1932 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1933 */
1934 int rc = VINF_SUCCESS;
1935 RTGCPHYS GCPhysCR3;
1936 switch (pVCpu->pgm.s.enmGuestMode)
1937 {
1938 case PGMMODE_PAE:
1939 case PGMMODE_PAE_NX:
1940 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1941 break;
1942 case PGMMODE_AMD64:
1943 case PGMMODE_AMD64_NX:
1944 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1945 break;
1946 default:
1947 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1948 break;
1949 }
1950 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1951 {
1952 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1953 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1954 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1955 }
1956 return rc;
1957}
1958
1959
1960/**
1961 * Synchronize the paging structures.
1962 *
1963 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1964 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1965 * in several places, most importantly whenever the CR3 is loaded.
1966 *
1967 * @returns VBox status code.
1968 * @param pVCpu VMCPU handle.
1969 * @param cr0 Guest context CR0 register
1970 * @param cr3 Guest context CR3 register
1971 * @param cr4 Guest context CR4 register
1972 * @param fGlobal Including global page directories or not
1973 */
1974VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1975{
1976 int rc;
1977
1978 VMCPU_ASSERT_EMT(pVCpu);
1979
1980 /*
1981 * The pool may have pending stuff and even require a return to ring-3 to
1982 * clear the whole thing.
1983 */
1984 rc = pgmPoolSyncCR3(pVCpu);
1985 if (rc != VINF_SUCCESS)
1986 return rc;
1987
1988 /*
1989 * We might be called when we shouldn't.
1990 *
1991 * The mode switching will ensure that the PD is resynced
1992 * after every mode switch. So, if we find ourselves here
1993 * when in protected or real mode we can safely disable the
1994 * FF and return immediately.
1995 */
1996 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1997 {
1998 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1999 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2000 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2001 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2002 return VINF_SUCCESS;
2003 }
2004
2005 /* If global pages are not supported, then all flushes are global. */
2006 if (!(cr4 & X86_CR4_PGE))
2007 fGlobal = true;
2008 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2009 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2010
2011 /*
2012 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2013 * This should be done before SyncCR3.
2014 */
2015 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2016 {
2017 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2018
2019 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2020 RTGCPHYS GCPhysCR3;
2021 switch (pVCpu->pgm.s.enmGuestMode)
2022 {
2023 case PGMMODE_PAE:
2024 case PGMMODE_PAE_NX:
2025 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2026 break;
2027 case PGMMODE_AMD64:
2028 case PGMMODE_AMD64_NX:
2029 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2030 break;
2031 default:
2032 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2033 break;
2034 }
2035
2036 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2037 {
2038 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2039 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2040 }
2041
2042 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2043 if ( rc == VINF_PGM_SYNC_CR3
2044 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2045 {
2046 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2047#ifdef IN_RING3
2048 rc = pgmPoolSyncCR3(pVCpu);
2049#else
2050 if (rc == VINF_PGM_SYNC_CR3)
2051 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2052 return VINF_PGM_SYNC_CR3;
2053#endif
2054 }
2055 AssertRCReturn(rc, rc);
2056 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2057 }
2058
2059 /*
2060 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2061 */
2062 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2063 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2064 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2065 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2066 if (rc == VINF_SUCCESS)
2067 {
2068 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2069 {
2070 /* Go back to ring 3 if a pgm pool sync is again pending. */
2071 return VINF_PGM_SYNC_CR3;
2072 }
2073
2074 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2075 {
2076 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2077 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2078 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2079 }
2080
2081 /*
2082 * Check if we have a pending update of the CR3 monitoring.
2083 */
2084 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2085 {
2086 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2087 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2088 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsDisabled);
2089 }
2090 }
2091
2092 /*
2093 * Now flush the CR3 (guest context).
2094 */
2095 if (rc == VINF_SUCCESS)
2096 PGM_INVL_VCPU_TLBS(pVCpu);
2097 return rc;
2098}
2099
2100
2101/**
2102 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2103 *
2104 * @returns VBox status code, with the following informational code for
2105 * VM scheduling.
2106 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2107 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2108 * (I.e. not in R3.)
2109 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2110 *
2111 * @param pVCpu VMCPU handle.
2112 * @param cr0 The new cr0.
2113 * @param cr4 The new cr4.
2114 * @param efer The new extended feature enable register.
2115 */
2116VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2117{
2118 PGMMODE enmGuestMode;
2119
2120 VMCPU_ASSERT_EMT(pVCpu);
2121
2122 /*
2123 * Calc the new guest mode.
2124 */
2125 if (!(cr0 & X86_CR0_PE))
2126 enmGuestMode = PGMMODE_REAL;
2127 else if (!(cr0 & X86_CR0_PG))
2128 enmGuestMode = PGMMODE_PROTECTED;
2129 else if (!(cr4 & X86_CR4_PAE))
2130 {
2131 bool const fPse = !!(cr4 & X86_CR4_PSE);
2132 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2133 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2134 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2135 enmGuestMode = PGMMODE_32_BIT;
2136 }
2137 else if (!(efer & MSR_K6_EFER_LME))
2138 {
2139 if (!(efer & MSR_K6_EFER_NXE))
2140 enmGuestMode = PGMMODE_PAE;
2141 else
2142 enmGuestMode = PGMMODE_PAE_NX;
2143 }
2144 else
2145 {
2146 if (!(efer & MSR_K6_EFER_NXE))
2147 enmGuestMode = PGMMODE_AMD64;
2148 else
2149 enmGuestMode = PGMMODE_AMD64_NX;
2150 }
2151
2152 /*
2153 * Did it change?
2154 */
2155 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2156 return VINF_SUCCESS;
2157
2158 /* Flush the TLB */
2159 PGM_INVL_VCPU_TLBS(pVCpu);
2160
2161#ifdef IN_RING3
2162 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2163#else
2164 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2165 return VINF_PGM_CHANGE_MODE;
2166#endif
2167}
2168
2169
2170/**
2171 * Gets the current guest paging mode.
2172 *
2173 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2174 *
2175 * @returns The current paging mode.
2176 * @param pVCpu VMCPU handle.
2177 */
2178VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2179{
2180 return pVCpu->pgm.s.enmGuestMode;
2181}
2182
2183
2184/**
2185 * Gets the current shadow paging mode.
2186 *
2187 * @returns The current paging mode.
2188 * @param pVCpu VMCPU handle.
2189 */
2190VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2191{
2192 return pVCpu->pgm.s.enmShadowMode;
2193}
2194
2195
2196/**
2197 * Gets the current host paging mode.
2198 *
2199 * @returns The current paging mode.
2200 * @param pVM The VM handle.
2201 */
2202VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2203{
2204 switch (pVM->pgm.s.enmHostMode)
2205 {
2206 case SUPPAGINGMODE_32_BIT:
2207 case SUPPAGINGMODE_32_BIT_GLOBAL:
2208 return PGMMODE_32_BIT;
2209
2210 case SUPPAGINGMODE_PAE:
2211 case SUPPAGINGMODE_PAE_GLOBAL:
2212 return PGMMODE_PAE;
2213
2214 case SUPPAGINGMODE_PAE_NX:
2215 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2216 return PGMMODE_PAE_NX;
2217
2218 case SUPPAGINGMODE_AMD64:
2219 case SUPPAGINGMODE_AMD64_GLOBAL:
2220 return PGMMODE_AMD64;
2221
2222 case SUPPAGINGMODE_AMD64_NX:
2223 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2224 return PGMMODE_AMD64_NX;
2225
2226 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2227 }
2228
2229 return PGMMODE_INVALID;
2230}
2231
2232
2233/**
2234 * Get mode name.
2235 *
2236 * @returns read-only name string.
2237 * @param enmMode The mode which name is desired.
2238 */
2239VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2240{
2241 switch (enmMode)
2242 {
2243 case PGMMODE_REAL: return "Real";
2244 case PGMMODE_PROTECTED: return "Protected";
2245 case PGMMODE_32_BIT: return "32-bit";
2246 case PGMMODE_PAE: return "PAE";
2247 case PGMMODE_PAE_NX: return "PAE+NX";
2248 case PGMMODE_AMD64: return "AMD64";
2249 case PGMMODE_AMD64_NX: return "AMD64+NX";
2250 case PGMMODE_NESTED: return "Nested";
2251 case PGMMODE_EPT: return "EPT";
2252 default: return "unknown mode value";
2253 }
2254}
2255
2256
2257
2258/**
2259 * Notification from CPUM that the EFER.NXE bit has changed.
2260 *
2261 * @param pVCpu The virtual CPU for which EFER changed.
2262 * @param fNxe The new NXE state.
2263 */
2264VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2265{
2266/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2267 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2268
2269 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2270 if (fNxe)
2271 {
2272 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2273 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2274 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2275 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2276 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2277 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2278 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2279 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2280 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2281 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2282 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2283
2284 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2285 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2286 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2287 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2288 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2289 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2290 }
2291 else
2292 {
2293 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2294 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2295 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2296 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2297 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2298 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2299 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2300 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2301 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2302 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2303 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2304
2305 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2306 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2307 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2308 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2309 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2310 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2311 }
2312}
2313
2314
2315/**
2316 * Check if any pgm pool pages are marked dirty (not monitored)
2317 *
2318 * @returns bool locked/not locked
2319 * @param pVM The VM to operate on.
2320 */
2321VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2322{
2323 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2324}
2325
2326
2327/**
2328 * Check if this VCPU currently owns the PGM lock.
2329 *
2330 * @returns bool owner/not owner
2331 * @param pVM The VM to operate on.
2332 */
2333VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2334{
2335 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2336}
2337
2338
2339/**
2340 * Enable or disable large page usage
2341 *
2342 * @returns VBox status code.
2343 * @param pVM The VM to operate on.
2344 * @param fUseLargePages Use/not use large pages
2345 */
2346VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2347{
2348 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2349
2350 pVM->fUseLargePages = fUseLargePages;
2351 return VINF_SUCCESS;
2352}
2353
2354
2355/**
2356 * Acquire the PGM lock.
2357 *
2358 * @returns VBox status code
2359 * @param pVM The VM to operate on.
2360 */
2361int pgmLock(PVM pVM)
2362{
2363 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2364#if defined(IN_RC) || defined(IN_RING0)
2365 if (rc == VERR_SEM_BUSY)
2366 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2367#endif
2368 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2369 return rc;
2370}
2371
2372
2373/**
2374 * Release the PGM lock.
2375 *
2376 * @returns VBox status code
2377 * @param pVM The VM to operate on.
2378 */
2379void pgmUnlock(PVM pVM)
2380{
2381 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2382 pVM->pgm.s.cDeprecatedPageLocks = 0;
2383 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2384 if (rc == VINF_SEM_NESTED)
2385 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2386}
2387
2388#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2389
2390/**
2391 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2392 *
2393 * @returns VBox status code.
2394 * @param pVM The VM handle.
2395 * @param pVCpu The current CPU.
2396 * @param GCPhys The guest physical address of the page to map. The
2397 * offset bits are not ignored.
2398 * @param ppv Where to return the address corresponding to @a GCPhys.
2399 */
2400int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2401{
2402 pgmLock(pVM);
2403
2404 /*
2405 * Convert it to a writable page and it on to the dynamic mapper.
2406 */
2407 int rc;
2408 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2409 if (RT_LIKELY(pPage))
2410 {
2411 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2412 if (RT_SUCCESS(rc))
2413 {
2414 void *pv;
2415 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2416 if (RT_SUCCESS(rc))
2417 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2418 }
2419 else
2420 AssertRC(rc);
2421 }
2422 else
2423 {
2424 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2425 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2426 }
2427
2428 pgmUnlock(pVM);
2429 return rc;
2430}
2431
2432#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2433#if !defined(IN_R0) || defined(LOG_ENABLED)
2434
2435/** Format handler for PGMPAGE.
2436 * @copydoc FNRTSTRFORMATTYPE */
2437static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2438 const char *pszType, void const *pvValue,
2439 int cchWidth, int cchPrecision, unsigned fFlags,
2440 void *pvUser)
2441{
2442 size_t cch;
2443 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2444 if (RT_VALID_PTR(pPage))
2445 {
2446 char szTmp[64+80];
2447
2448 cch = 0;
2449
2450 /* The single char state stuff. */
2451 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2452 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2453
2454#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2455 if (IS_PART_INCLUDED(5))
2456 {
2457 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2458 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2459 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2460 }
2461
2462 /* The type. */
2463 if (IS_PART_INCLUDED(4))
2464 {
2465 szTmp[cch++] = ':';
2466 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2467 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2468 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2469 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2470 }
2471
2472 /* The numbers. */
2473 if (IS_PART_INCLUDED(3))
2474 {
2475 szTmp[cch++] = ':';
2476 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2477 }
2478
2479 if (IS_PART_INCLUDED(2))
2480 {
2481 szTmp[cch++] = ':';
2482 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2483 }
2484
2485 if (IS_PART_INCLUDED(6))
2486 {
2487 szTmp[cch++] = ':';
2488 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2489 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2490 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2491 }
2492#undef IS_PART_INCLUDED
2493
2494 cch = pfnOutput(pvArgOutput, szTmp, cch);
2495 }
2496 else
2497 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2498 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2499 return cch;
2500}
2501
2502
2503/** Format handler for PGMRAMRANGE.
2504 * @copydoc FNRTSTRFORMATTYPE */
2505static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2506 const char *pszType, void const *pvValue,
2507 int cchWidth, int cchPrecision, unsigned fFlags,
2508 void *pvUser)
2509{
2510 size_t cch;
2511 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2512 if (VALID_PTR(pRam))
2513 {
2514 char szTmp[80];
2515 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2516 cch = pfnOutput(pvArgOutput, szTmp, cch);
2517 }
2518 else
2519 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2520 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2521 return cch;
2522}
2523
2524/** Format type andlers to be registered/deregistered. */
2525static const struct
2526{
2527 char szType[24];
2528 PFNRTSTRFORMATTYPE pfnHandler;
2529} g_aPgmFormatTypes[] =
2530{
2531 { "pgmpage", pgmFormatTypeHandlerPage },
2532 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2533};
2534
2535#endif /* !IN_R0 || LOG_ENABLED */
2536
2537/**
2538 * Registers the global string format types.
2539 *
2540 * This should be called at module load time or in some other manner that ensure
2541 * that it's called exactly one time.
2542 *
2543 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2544 */
2545VMMDECL(int) PGMRegisterStringFormatTypes(void)
2546{
2547#if !defined(IN_R0) || defined(LOG_ENABLED)
2548 int rc = VINF_SUCCESS;
2549 unsigned i;
2550 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2551 {
2552 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2553# ifdef IN_RING0
2554 if (rc == VERR_ALREADY_EXISTS)
2555 {
2556 /* in case of cleanup failure in ring-0 */
2557 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2558 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2559 }
2560# endif
2561 }
2562 if (RT_FAILURE(rc))
2563 while (i-- > 0)
2564 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2565
2566 return rc;
2567#else
2568 return VINF_SUCCESS;
2569#endif
2570}
2571
2572
2573/**
2574 * Deregisters the global string format types.
2575 *
2576 * This should be called at module unload time or in some other manner that
2577 * ensure that it's called exactly one time.
2578 */
2579VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2580{
2581#if !defined(IN_R0) || defined(LOG_ENABLED)
2582 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2583 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2584#endif
2585}
2586
2587#ifdef VBOX_STRICT
2588
2589/**
2590 * Asserts that there are no mapping conflicts.
2591 *
2592 * @returns Number of conflicts.
2593 * @param pVM The VM Handle.
2594 */
2595VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2596{
2597 unsigned cErrors = 0;
2598
2599 /* Only applies to raw mode -> 1 VPCU */
2600 Assert(pVM->cCpus == 1);
2601 PVMCPU pVCpu = &pVM->aCpus[0];
2602
2603 /*
2604 * Check for mapping conflicts.
2605 */
2606 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2607 pMapping;
2608 pMapping = pMapping->CTX_SUFF(pNext))
2609 {
2610 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2611 for (RTGCPTR GCPtr = pMapping->GCPtr;
2612 GCPtr <= pMapping->GCPtrLast;
2613 GCPtr += PAGE_SIZE)
2614 {
2615 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2616 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2617 {
2618 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2619 cErrors++;
2620 break;
2621 }
2622 }
2623 }
2624
2625 return cErrors;
2626}
2627
2628
2629/**
2630 * Asserts that everything related to the guest CR3 is correctly shadowed.
2631 *
2632 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2633 * and assert the correctness of the guest CR3 mapping before asserting that the
2634 * shadow page tables is in sync with the guest page tables.
2635 *
2636 * @returns Number of conflicts.
2637 * @param pVM The VM Handle.
2638 * @param pVCpu VMCPU handle.
2639 * @param cr3 The current guest CR3 register value.
2640 * @param cr4 The current guest CR4 register value.
2641 */
2642VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2643{
2644 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2645 pgmLock(pVM);
2646 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2647 pgmUnlock(pVM);
2648 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2649 return cErrors;
2650}
2651
2652#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette