VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 31080

Last change on this file since 31080 was 31080, checked in by vboxsync, 14 years ago

PGM: Micro optimizations.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 88.3 KB
Line 
1/* $Id: PGMAll.cpp 31080 2010-07-24 17:25:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include <VBox/hwaccm.h>
35#include <VBox/hwacc_vmx.h>
36#include "../PGMInternal.h"
37#include <VBox/vm.h>
38#include "../PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*******************************************************************************
48* Structures and Typedefs *
49*******************************************************************************/
50/**
51 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
52 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
53 */
54typedef struct PGMHVUSTATE
55{
56 /** The VM handle. */
57 PVM pVM;
58 /** The VMCPU handle. */
59 PVMCPU pVCpu;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
72#ifndef IN_RC
73static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PCX86PML4E pGstPml4e, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD);
74static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
75#endif
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
91#include "PGMGstDefs.h"
92#include "PGMAllGst.h"
93#include "PGMAllBth.h"
94#undef BTH_PGMPOOLKIND_PT_FOR_PT
95#undef BTH_PGMPOOLKIND_ROOT
96#undef PGM_BTH_NAME
97#undef PGM_GST_TYPE
98#undef PGM_GST_NAME
99
100/* Guest - protected mode */
101#define PGM_GST_TYPE PGM_TYPE_PROT
102#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
103#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
104#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
105#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
106#include "PGMGstDefs.h"
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMGstDefs.h"
123#include "PGMAllGst.h"
124#include "PGMAllBth.h"
125#undef BTH_PGMPOOLKIND_PT_FOR_BIG
126#undef BTH_PGMPOOLKIND_PT_FOR_PT
127#undef BTH_PGMPOOLKIND_ROOT
128#undef PGM_BTH_NAME
129#undef PGM_GST_TYPE
130#undef PGM_GST_NAME
131
132#undef PGM_SHW_TYPE
133#undef PGM_SHW_NAME
134
135
136/*
137 * Shadow - PAE mode
138 */
139#define PGM_SHW_TYPE PGM_TYPE_PAE
140#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
141#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
142#include "PGMAllShw.h"
143
144/* Guest - real mode */
145#define PGM_GST_TYPE PGM_TYPE_REAL
146#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
147#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
148#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
149#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
150#include "PGMGstDefs.h"
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef BTH_PGMPOOLKIND_ROOT
154#undef PGM_BTH_NAME
155#undef PGM_GST_TYPE
156#undef PGM_GST_NAME
157
158/* Guest - protected mode */
159#define PGM_GST_TYPE PGM_TYPE_PROT
160#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
161#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
162#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
163#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
164#include "PGMGstDefs.h"
165#include "PGMAllBth.h"
166#undef BTH_PGMPOOLKIND_PT_FOR_PT
167#undef BTH_PGMPOOLKIND_ROOT
168#undef PGM_BTH_NAME
169#undef PGM_GST_TYPE
170#undef PGM_GST_NAME
171
172/* Guest - 32-bit mode */
173#define PGM_GST_TYPE PGM_TYPE_32BIT
174#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
175#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
176#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
177#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
178#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
179#include "PGMGstDefs.h"
180#include "PGMAllBth.h"
181#undef BTH_PGMPOOLKIND_PT_FOR_BIG
182#undef BTH_PGMPOOLKIND_PT_FOR_PT
183#undef BTH_PGMPOOLKIND_ROOT
184#undef PGM_BTH_NAME
185#undef PGM_GST_TYPE
186#undef PGM_GST_NAME
187
188
189/* Guest - PAE mode */
190#define PGM_GST_TYPE PGM_TYPE_PAE
191#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
192#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
193#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
194#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
195#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
196#include "PGMGstDefs.h"
197#include "PGMAllGst.h"
198#include "PGMAllBth.h"
199#undef BTH_PGMPOOLKIND_PT_FOR_BIG
200#undef BTH_PGMPOOLKIND_PT_FOR_PT
201#undef BTH_PGMPOOLKIND_ROOT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208
209
210#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
211/*
212 * Shadow - AMD64 mode
213 */
214# define PGM_SHW_TYPE PGM_TYPE_AMD64
215# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
216# include "PGMAllShw.h"
217
218/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
219# define PGM_GST_TYPE PGM_TYPE_PROT
220# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
221# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
222# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
223# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
224# include "PGMGstDefs.h"
225# include "PGMAllBth.h"
226# undef BTH_PGMPOOLKIND_PT_FOR_PT
227# undef BTH_PGMPOOLKIND_ROOT
228# undef PGM_BTH_NAME
229# undef PGM_GST_TYPE
230# undef PGM_GST_NAME
231
232# ifdef VBOX_WITH_64_BITS_GUESTS
233/* Guest - AMD64 mode */
234# define PGM_GST_TYPE PGM_TYPE_AMD64
235# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
236# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
237# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
238# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
239# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
240# include "PGMGstDefs.h"
241# include "PGMAllGst.h"
242# include "PGMAllBth.h"
243# undef BTH_PGMPOOLKIND_PT_FOR_BIG
244# undef BTH_PGMPOOLKIND_PT_FOR_PT
245# undef BTH_PGMPOOLKIND_ROOT
246# undef PGM_BTH_NAME
247# undef PGM_GST_TYPE
248# undef PGM_GST_NAME
249# endif /* VBOX_WITH_64_BITS_GUESTS */
250
251# undef PGM_SHW_TYPE
252# undef PGM_SHW_NAME
253
254
255/*
256 * Shadow - Nested paging mode
257 */
258# define PGM_SHW_TYPE PGM_TYPE_NESTED
259# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
260# include "PGMAllShw.h"
261
262/* Guest - real mode */
263# define PGM_GST_TYPE PGM_TYPE_REAL
264# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
266# include "PGMGstDefs.h"
267# include "PGMAllBth.h"
268# undef PGM_BTH_NAME
269# undef PGM_GST_TYPE
270# undef PGM_GST_NAME
271
272/* Guest - protected mode */
273# define PGM_GST_TYPE PGM_TYPE_PROT
274# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
276# include "PGMGstDefs.h"
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - 32-bit mode */
283# define PGM_GST_TYPE PGM_TYPE_32BIT
284# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
286# include "PGMGstDefs.h"
287# include "PGMAllBth.h"
288# undef PGM_BTH_NAME
289# undef PGM_GST_TYPE
290# undef PGM_GST_NAME
291
292/* Guest - PAE mode */
293# define PGM_GST_TYPE PGM_TYPE_PAE
294# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
296# include "PGMGstDefs.h"
297# include "PGMAllBth.h"
298# undef PGM_BTH_NAME
299# undef PGM_GST_TYPE
300# undef PGM_GST_NAME
301
302# ifdef VBOX_WITH_64_BITS_GUESTS
303/* Guest - AMD64 mode */
304# define PGM_GST_TYPE PGM_TYPE_AMD64
305# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
306# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
307# include "PGMGstDefs.h"
308# include "PGMAllBth.h"
309# undef PGM_BTH_NAME
310# undef PGM_GST_TYPE
311# undef PGM_GST_NAME
312# endif /* VBOX_WITH_64_BITS_GUESTS */
313
314# undef PGM_SHW_TYPE
315# undef PGM_SHW_NAME
316
317
318/*
319 * Shadow - EPT
320 */
321# define PGM_SHW_TYPE PGM_TYPE_EPT
322# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
323# include "PGMAllShw.h"
324
325/* Guest - real mode */
326# define PGM_GST_TYPE PGM_TYPE_REAL
327# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
328# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
329# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330# include "PGMGstDefs.h"
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337/* Guest - protected mode */
338# define PGM_GST_TYPE PGM_TYPE_PROT
339# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
340# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
341# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
342# include "PGMGstDefs.h"
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348
349/* Guest - 32-bit mode */
350# define PGM_GST_TYPE PGM_TYPE_32BIT
351# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
352# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
353# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef BTH_PGMPOOLKIND_PT_FOR_PT
357# undef PGM_BTH_NAME
358# undef PGM_GST_TYPE
359# undef PGM_GST_NAME
360
361/* Guest - PAE mode */
362# define PGM_GST_TYPE PGM_TYPE_PAE
363# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
365# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
366# include "PGMGstDefs.h"
367# include "PGMAllBth.h"
368# undef BTH_PGMPOOLKIND_PT_FOR_PT
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372
373# ifdef VBOX_WITH_64_BITS_GUESTS
374/* Guest - AMD64 mode */
375# define PGM_GST_TYPE PGM_TYPE_AMD64
376# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
377# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
378# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
379# include "PGMGstDefs.h"
380# include "PGMAllBth.h"
381# undef BTH_PGMPOOLKIND_PT_FOR_PT
382# undef PGM_BTH_NAME
383# undef PGM_GST_TYPE
384# undef PGM_GST_NAME
385# endif /* VBOX_WITH_64_BITS_GUESTS */
386
387# undef PGM_SHW_TYPE
388# undef PGM_SHW_NAME
389
390#endif /* !IN_RC */
391
392
393#ifndef IN_RING3
394/**
395 * #PF Handler.
396 *
397 * @returns VBox status code (appropriate for trap handling and GC return).
398 * @param pVCpu VMCPU handle.
399 * @param uErr The trap error code.
400 * @param pRegFrame Trap register frame.
401 * @param pvFault The fault address.
402 */
403VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
404{
405 PVM pVM = pVCpu->CTX_SUFF(pVM);
406
407 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
408 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
409 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
410
411
412#ifdef VBOX_WITH_STATISTICS
413 /*
414 * Error code stats.
415 */
416 if (uErr & X86_TRAP_PF_US)
417 {
418 if (!(uErr & X86_TRAP_PF_P))
419 {
420 if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
422 else
423 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
424 }
425 else if (uErr & X86_TRAP_PF_RW)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
427 else if (uErr & X86_TRAP_PF_RSVD)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
429 else if (uErr & X86_TRAP_PF_ID)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
431 else
432 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
433 }
434 else
435 { /* Supervisor */
436 if (!(uErr & X86_TRAP_PF_P))
437 {
438 if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
440 else
441 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
442 }
443 else if (uErr & X86_TRAP_PF_RW)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
445 else if (uErr & X86_TRAP_PF_ID)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
447 else if (uErr & X86_TRAP_PF_RSVD)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
449 }
450#endif /* VBOX_WITH_STATISTICS */
451
452 /*
453 * Call the worker.
454 */
455 bool fLockTaken = false;
456 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
457 if (fLockTaken)
458 {
459 Assert(PGMIsLockOwner(pVM));
460 pgmUnlock(pVM);
461 }
462 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
463 rc = VINF_SUCCESS;
464
465# ifdef IN_RING0
466 /* Note: hack alert for difficult to reproduce problem. */
467 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
468 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
469 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
470 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
471 {
472 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
473 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
474 rc = VINF_SUCCESS;
475 }
476# endif
477
478 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
479 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
480 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
481 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
482 return rc;
483}
484#endif /* !IN_RING3 */
485
486
487/**
488 * Prefetch a page
489 *
490 * Typically used to sync commonly used pages before entering raw mode
491 * after a CR3 reload.
492 *
493 * @returns VBox status code suitable for scheduling.
494 * @retval VINF_SUCCESS on success.
495 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
496 * @param pVCpu VMCPU handle.
497 * @param GCPtrPage Page to invalidate.
498 */
499VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
500{
501 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
502 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
503 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
504 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
505 return rc;
506}
507
508
509/**
510 * Gets the mapping corresponding to the specified address (if any).
511 *
512 * @returns Pointer to the mapping.
513 * @returns NULL if not
514 *
515 * @param pVM The virtual machine.
516 * @param GCPtr The guest context pointer.
517 */
518PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
519{
520 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
521 while (pMapping)
522 {
523 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
524 break;
525 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
526 return pMapping;
527 pMapping = pMapping->CTX_SUFF(pNext);
528 }
529 return NULL;
530}
531
532
533/**
534 * Verifies a range of pages for read or write access
535 *
536 * Only checks the guest's page tables
537 *
538 * @returns VBox status code.
539 * @param pVCpu VMCPU handle.
540 * @param Addr Guest virtual address to check
541 * @param cbSize Access size
542 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
543 * @remarks Current not in use.
544 */
545VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
546{
547 /*
548 * Validate input.
549 */
550 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
551 {
552 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
553 return VERR_INVALID_PARAMETER;
554 }
555
556 uint64_t fPage;
557 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
558 if (RT_FAILURE(rc))
559 {
560 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
561 return VINF_EM_RAW_GUEST_TRAP;
562 }
563
564 /*
565 * Check if the access would cause a page fault
566 *
567 * Note that hypervisor page directories are not present in the guest's tables, so this check
568 * is sufficient.
569 */
570 bool fWrite = !!(fAccess & X86_PTE_RW);
571 bool fUser = !!(fAccess & X86_PTE_US);
572 if ( !(fPage & X86_PTE_P)
573 || (fWrite && !(fPage & X86_PTE_RW))
574 || (fUser && !(fPage & X86_PTE_US)) )
575 {
576 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
577 return VINF_EM_RAW_GUEST_TRAP;
578 }
579 if ( RT_SUCCESS(rc)
580 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
581 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
582 return rc;
583}
584
585
586/**
587 * Verifies a range of pages for read or write access
588 *
589 * Supports handling of pages marked for dirty bit tracking and CSAM
590 *
591 * @returns VBox status code.
592 * @param pVCpu VMCPU handle.
593 * @param Addr Guest virtual address to check
594 * @param cbSize Access size
595 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
596 */
597VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
598{
599 PVM pVM = pVCpu->CTX_SUFF(pVM);
600
601 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
602
603 /*
604 * Get going.
605 */
606 uint64_t fPageGst;
607 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
608 if (RT_FAILURE(rc))
609 {
610 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
611 return VINF_EM_RAW_GUEST_TRAP;
612 }
613
614 /*
615 * Check if the access would cause a page fault
616 *
617 * Note that hypervisor page directories are not present in the guest's tables, so this check
618 * is sufficient.
619 */
620 const bool fWrite = !!(fAccess & X86_PTE_RW);
621 const bool fUser = !!(fAccess & X86_PTE_US);
622 if ( !(fPageGst & X86_PTE_P)
623 || (fWrite && !(fPageGst & X86_PTE_RW))
624 || (fUser && !(fPageGst & X86_PTE_US)) )
625 {
626 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
627 return VINF_EM_RAW_GUEST_TRAP;
628 }
629
630 if (!pVM->pgm.s.fNestedPaging)
631 {
632 /*
633 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
634 */
635 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
636 if ( rc == VERR_PAGE_NOT_PRESENT
637 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
638 {
639 /*
640 * Page is not present in our page tables.
641 * Try to sync it!
642 */
643 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
644 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
645 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
646 if (rc != VINF_SUCCESS)
647 return rc;
648 }
649 else
650 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
651 }
652
653#if 0 /* def VBOX_STRICT; triggers too often now */
654 /*
655 * This check is a bit paranoid, but useful.
656 */
657 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
658 uint64_t fPageShw;
659 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
660 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
661 || (fWrite && !(fPageShw & X86_PTE_RW))
662 || (fUser && !(fPageShw & X86_PTE_US)) )
663 {
664 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
665 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
666 return VINF_EM_RAW_GUEST_TRAP;
667 }
668#endif
669
670 if ( RT_SUCCESS(rc)
671 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
672 || Addr + cbSize < Addr))
673 {
674 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
675 for (;;)
676 {
677 Addr += PAGE_SIZE;
678 if (cbSize > PAGE_SIZE)
679 cbSize -= PAGE_SIZE;
680 else
681 cbSize = 1;
682 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
683 if (rc != VINF_SUCCESS)
684 break;
685 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
686 break;
687 }
688 }
689 return rc;
690}
691
692
693/**
694 * Emulation of the invlpg instruction (HC only actually).
695 *
696 * @returns VBox status code, special care required.
697 * @retval VINF_PGM_SYNC_CR3 - handled.
698 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
699 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
700 *
701 * @param pVCpu VMCPU handle.
702 * @param GCPtrPage Page to invalidate.
703 *
704 * @remark ASSUMES the page table entry or page directory is valid. Fairly
705 * safe, but there could be edge cases!
706 *
707 * @todo Flush page or page directory only if necessary!
708 */
709VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
710{
711 PVM pVM = pVCpu->CTX_SUFF(pVM);
712 int rc;
713 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
714
715#ifndef IN_RING3
716 /*
717 * Notify the recompiler so it can record this instruction.
718 */
719 REMNotifyInvalidatePage(pVM, GCPtrPage);
720#endif /* !IN_RING3 */
721
722
723#ifdef IN_RC
724 /*
725 * Check for conflicts and pending CR3 monitoring updates.
726 */
727 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
728 {
729 if ( pgmGetMapping(pVM, GCPtrPage)
730 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
731 {
732 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
733 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
734 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
735 return VINF_PGM_SYNC_CR3;
736 }
737
738 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
739 {
740 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
741 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
742 return VINF_EM_RAW_EMULATE_INSTR;
743 }
744 }
745#endif /* IN_RC */
746
747 /*
748 * Call paging mode specific worker.
749 */
750 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
751 pgmLock(pVM);
752 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
753 pgmUnlock(pVM);
754 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
755
756 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
757 PGM_INVL_PG(pVCpu, GCPtrPage);
758
759#ifdef IN_RING3
760 /*
761 * Check if we have a pending update of the CR3 monitoring.
762 */
763 if ( RT_SUCCESS(rc)
764 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
765 {
766 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
767 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
768 }
769
770 /*
771 * Inform CSAM about the flush
772 *
773 * Note: This is to check if monitored pages have been changed; when we implement
774 * callbacks for virtual handlers, this is no longer required.
775 */
776 CSAMR3FlushPage(pVM, GCPtrPage);
777#endif /* IN_RING3 */
778
779 /* Ignore all irrelevant error codes. */
780 if ( rc == VERR_PAGE_NOT_PRESENT
781 || rc == VERR_PAGE_TABLE_NOT_PRESENT
782 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
783 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
784 rc = VINF_SUCCESS;
785
786 return rc;
787}
788
789
790/**
791 * Executes an instruction using the interpreter.
792 *
793 * @returns VBox status code (appropriate for trap handling and GC return).
794 * @param pVM VM handle.
795 * @param pVCpu VMCPU handle.
796 * @param pRegFrame Register frame.
797 * @param pvFault Fault address.
798 */
799VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
800{
801 uint32_t cb;
802 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
803 if (rc == VERR_EM_INTERPRETER)
804 rc = VINF_EM_RAW_EMULATE_INSTR;
805 if (rc != VINF_SUCCESS)
806 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
807 return rc;
808}
809
810
811/**
812 * Gets effective page information (from the VMM page directory).
813 *
814 * @returns VBox status.
815 * @param pVCpu VMCPU handle.
816 * @param GCPtr Guest Context virtual address of the page.
817 * @param pfFlags Where to store the flags. These are X86_PTE_*.
818 * @param pHCPhys Where to store the HC physical address of the page.
819 * This is page aligned.
820 * @remark You should use PGMMapGetPage() for pages in a mapping.
821 */
822VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
823{
824 pgmLock(pVCpu->CTX_SUFF(pVM));
825 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
826 pgmUnlock(pVCpu->CTX_SUFF(pVM));
827 return rc;
828}
829
830
831/**
832 * Modify page flags for a range of pages in the shadow context.
833 *
834 * The existing flags are ANDed with the fMask and ORed with the fFlags.
835 *
836 * @returns VBox status code.
837 * @param pVCpu VMCPU handle.
838 * @param GCPtr Virtual address of the first page in the range.
839 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
840 * @param fMask The AND mask - page flags X86_PTE_*.
841 * Be very CAREFUL when ~'ing constants which could be 32-bit!
842 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
843 * @remark You must use PGMMapModifyPage() for pages in a mapping.
844 */
845DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
846{
847 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
848 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
849
850 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
851
852 PVM pVM = pVCpu->CTX_SUFF(pVM);
853 pgmLock(pVM);
854 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
855 pgmUnlock(pVM);
856 return rc;
857}
858
859
860/**
861 * Changing the page flags for a single page in the shadow page tables so as to
862 * make it read-only.
863 *
864 * @returns VBox status code.
865 * @param pVCpu VMCPU handle.
866 * @param GCPtr Virtual address of the first page in the range.
867 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
868 */
869VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
870{
871 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
872}
873
874
875/**
876 * Changing the page flags for a single page in the shadow page tables so as to
877 * make it writable.
878 *
879 * The call must know with 101% certainty that the guest page tables maps this
880 * as writable too. This function will deal shared, zero and write monitored
881 * pages.
882 *
883 * @returns VBox status code.
884 * @param pVCpu VMCPU handle.
885 * @param GCPtr Virtual address of the first page in the range.
886 * @param fMmio2 Set if it is an MMIO2 page.
887 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
888 */
889VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
890{
891 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
892}
893
894
895/**
896 * Changing the page flags for a single page in the shadow page tables so as to
897 * make it not present.
898 *
899 * @returns VBox status code.
900 * @param pVCpu VMCPU handle.
901 * @param GCPtr Virtual address of the first page in the range.
902 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
903 */
904VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
905{
906 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
907}
908
909
910/**
911 * Gets the shadow page directory for the specified address, PAE.
912 *
913 * @returns Pointer to the shadow PD.
914 * @param pVCpu The VMCPU handle.
915 * @param GCPtr The address.
916 * @param pGstPdpe Guest PDPT entry
917 * @param ppPD Receives address of page directory
918 */
919int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD)
920{
921 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
922 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
923 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
924 PVM pVM = pVCpu->CTX_SUFF(pVM);
925 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
926 PPGMPOOLPAGE pShwPage;
927 int rc;
928
929 Assert(PGMIsLockOwner(pVM));
930
931 /* Allocate page directory if not present. */
932 if ( !pPdpe->n.u1Present
933 && !(pPdpe->u & X86_PDPE_PG_MASK))
934 {
935 RTGCPTR64 GCPdPt;
936 PGMPOOLKIND enmKind;
937 Assert(pGstPdpe);
938 X86PDPE GstPdpe = *pGstPdpe;
939
940# if defined(IN_RC)
941 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
942 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
943# endif
944
945 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
946 {
947 /* AMD-V nested paging or real/protected mode without paging. */
948 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
949 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
950 }
951 else
952 {
953 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
954 {
955 if (!GstPdpe.n.u1Present)
956 {
957 /* PD not present; guest must reload CR3 to change it.
958 * No need to monitor anything in this case.
959 */
960 Assert(!HWACCMIsEnabled(pVM));
961
962 GCPdPt = GstPdpe.u & X86_PDPE_PG_MASK;
963 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
964 GstPdpe.n.u1Present = 1;
965 }
966 else
967 {
968 GCPdPt = GstPdpe.u & X86_PDPE_PG_MASK;
969 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
970 }
971 }
972 else
973 {
974 GCPdPt = CPUMGetGuestCR3(pVCpu);
975 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
976 }
977 }
978
979 /* Create a reference back to the PDPT by using the index in its shadow page. */
980 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
981 AssertRCReturn(rc, rc);
982
983 /* The PD was cached or created; hook it up now. */
984 pPdpe->u |= pShwPage->Core.Key
985 | (GstPdpe.u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
986
987# if defined(IN_RC)
988 /*
989 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
990 * PDPT entry; the CPU fetches them only during cr3 load, so any
991 * non-present PDPT will continue to cause page faults.
992 */
993 ASMReloadCR3();
994 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
995# endif
996 }
997 else
998 {
999 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1000 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1001 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1002
1003 pgmPoolCacheUsed(pPool, pShwPage);
1004 }
1005 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1006 return VINF_SUCCESS;
1007}
1008
1009
1010/**
1011 * Gets the pointer to the shadow page directory entry for an address, PAE.
1012 *
1013 * @returns Pointer to the PDE.
1014 * @param pPGM Pointer to the PGMCPU instance data.
1015 * @param GCPtr The address.
1016 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1017 */
1018DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1019{
1020 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1021 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
1022
1023 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1024
1025 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1026 if (!pPdpt->a[iPdPt].n.u1Present)
1027 {
1028 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1029 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1030 }
1031 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1032
1033 /* Fetch the pgm pool shadow descriptor. */
1034 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1035 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1036
1037 *ppShwPde = pShwPde;
1038 return VINF_SUCCESS;
1039}
1040
1041#ifndef IN_RC
1042
1043/**
1044 * Syncs the SHADOW page directory pointer for the specified address.
1045 *
1046 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1047 *
1048 * The caller is responsible for making sure the guest has a valid PD before
1049 * calling this function.
1050 *
1051 * @returns VBox status.
1052 * @param pVCpu VMCPU handle.
1053 * @param GCPtr The address.
1054 * @param pGstPml4e Guest PML4 entry
1055 * @param pGstPdpe Guest PDPT entry
1056 * @param ppPD Receives address of page directory
1057 */
1058static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PCX86PML4E pGstPml4e, PCX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1059{
1060 PPGMCPU pPGM = &pVCpu->pgm.s;
1061 PVM pVM = pVCpu->CTX_SUFF(pVM);
1062 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1063 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1064 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1065 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1066 PPGMPOOLPAGE pShwPage;
1067 int rc;
1068
1069 Assert(PGMIsLockOwner(pVM));
1070
1071 /* Allocate page directory pointer table if not present. */
1072 if ( !pPml4e->n.u1Present
1073 && !(pPml4e->u & X86_PML4E_PG_MASK))
1074 {
1075 RTGCPTR64 GCPml4;
1076 PGMPOOLKIND enmKind;
1077
1078 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1079
1080 if (fNestedPagingOrNoGstPaging)
1081 {
1082 /* AMD-V nested paging or real/protected mode without paging */
1083 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1084 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1085 }
1086 else
1087 {
1088 Assert(pGstPml4e && pGstPdpe);
1089
1090 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1091 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1092 }
1093
1094 /* Create a reference back to the PDPT by using the index in its shadow page. */
1095 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1096 AssertRCReturn(rc, rc);
1097 }
1098 else
1099 {
1100 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1101 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1102
1103 pgmPoolCacheUsed(pPool, pShwPage);
1104 }
1105 /* The PDPT was cached or created; hook it up now. */
1106 pPml4e->u |= pShwPage->Core.Key
1107 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1108
1109 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1110 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1111 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1112
1113 /* Allocate page directory if not present. */
1114 if ( !pPdpe->n.u1Present
1115 && !(pPdpe->u & X86_PDPE_PG_MASK))
1116 {
1117 RTGCPTR64 GCPdPt;
1118 PGMPOOLKIND enmKind;
1119
1120 if (fNestedPagingOrNoGstPaging)
1121 {
1122 /* AMD-V nested paging or real/protected mode without paging */
1123 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1124 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1125 }
1126 else
1127 {
1128 Assert(pGstPdpe);
1129
1130 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1131 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1132 }
1133
1134 /* Create a reference back to the PDPT by using the index in its shadow page. */
1135 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1136 AssertRCReturn(rc, rc);
1137 }
1138 else
1139 {
1140 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1141 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1142
1143 pgmPoolCacheUsed(pPool, pShwPage);
1144 }
1145 /* The PD was cached or created; hook it up now. */
1146 pPdpe->u |= pShwPage->Core.Key
1147 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1148
1149 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1150 return VINF_SUCCESS;
1151}
1152
1153
1154/**
1155 * Gets the SHADOW page directory pointer for the specified address (long mode).
1156 *
1157 * @returns VBox status.
1158 * @param pVCpu VMCPU handle.
1159 * @param GCPtr The address.
1160 * @param ppPdpt Receives address of pdpt
1161 * @param ppPD Receives address of page directory
1162 */
1163DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1164{
1165 PPGMCPU pPGM = &pVCpu->pgm.s;
1166 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1167 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1168
1169 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1170
1171 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1172 if (ppPml4e)
1173 *ppPml4e = (PX86PML4E)pPml4e;
1174
1175 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1176
1177 if (!pPml4e->n.u1Present)
1178 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1179
1180 PVM pVM = pVCpu->CTX_SUFF(pVM);
1181 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1182 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1183 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1184
1185 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1186 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1187 if (!pPdpt->a[iPdPt].n.u1Present)
1188 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1189
1190 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1191 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1192
1193 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1194 return VINF_SUCCESS;
1195}
1196
1197
1198/**
1199 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1200 * backing pages in case the PDPT or PML4 entry is missing.
1201 *
1202 * @returns VBox status.
1203 * @param pVCpu VMCPU handle.
1204 * @param GCPtr The address.
1205 * @param ppPdpt Receives address of pdpt
1206 * @param ppPD Receives address of page directory
1207 */
1208static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1209{
1210 PPGMCPU pPGM = &pVCpu->pgm.s;
1211 PVM pVM = pVCpu->CTX_SUFF(pVM);
1212 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1213 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1214 PEPTPML4 pPml4;
1215 PEPTPML4E pPml4e;
1216 PPGMPOOLPAGE pShwPage;
1217 int rc;
1218
1219 Assert(pVM->pgm.s.fNestedPaging);
1220 Assert(PGMIsLockOwner(pVM));
1221
1222 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1223 Assert(pPml4);
1224
1225 /* Allocate page directory pointer table if not present. */
1226 pPml4e = &pPml4->a[iPml4];
1227 if ( !pPml4e->n.u1Present
1228 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1229 {
1230 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1231 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1232
1233 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1234 AssertRCReturn(rc, rc);
1235 }
1236 else
1237 {
1238 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1239 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1240
1241 pgmPoolCacheUsed(pPool, pShwPage);
1242 }
1243 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1244 pPml4e->u = pShwPage->Core.Key;
1245 pPml4e->n.u1Present = 1;
1246 pPml4e->n.u1Write = 1;
1247 pPml4e->n.u1Execute = 1;
1248
1249 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1250 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1251 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1252
1253 if (ppPdpt)
1254 *ppPdpt = pPdpt;
1255
1256 /* Allocate page directory if not present. */
1257 if ( !pPdpe->n.u1Present
1258 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1259 {
1260 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1261
1262 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1263 AssertRCReturn(rc, rc);
1264 }
1265 else
1266 {
1267 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1268 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1269
1270 pgmPoolCacheUsed(pPool, pShwPage);
1271 }
1272 /* The PD was cached or created; hook it up now and fill with the default value. */
1273 pPdpe->u = pShwPage->Core.Key;
1274 pPdpe->n.u1Present = 1;
1275 pPdpe->n.u1Write = 1;
1276 pPdpe->n.u1Execute = 1;
1277
1278 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1279 return VINF_SUCCESS;
1280}
1281
1282#endif /* IN_RC */
1283
1284/**
1285 * Gets effective Guest OS page information.
1286 *
1287 * When GCPtr is in a big page, the function will return as if it was a normal
1288 * 4KB page. If the need for distinguishing between big and normal page becomes
1289 * necessary at a later point, a PGMGstGetPage() will be created for that
1290 * purpose.
1291 *
1292 * @returns VBox status.
1293 * @param pVCpu VMCPU handle.
1294 * @param GCPtr Guest Context virtual address of the page.
1295 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1296 * @param pGCPhys Where to store the GC physical address of the page.
1297 * This is page aligned. The fact that the
1298 */
1299VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1300{
1301 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1302}
1303
1304
1305/**
1306 * Checks if the page is present.
1307 *
1308 * @returns true if the page is present.
1309 * @returns false if the page is not present.
1310 * @param pVCpu VMCPU handle.
1311 * @param GCPtr Address within the page.
1312 */
1313VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1314{
1315 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1316 return RT_SUCCESS(rc);
1317}
1318
1319
1320/**
1321 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1322 *
1323 * @returns VBox status.
1324 * @param pVCpu VMCPU handle.
1325 * @param GCPtr The address of the first page.
1326 * @param cb The size of the range in bytes.
1327 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1328 */
1329VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1330{
1331 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1332}
1333
1334
1335/**
1336 * Modify page flags for a range of pages in the guest's tables
1337 *
1338 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1339 *
1340 * @returns VBox status code.
1341 * @param pVCpu VMCPU handle.
1342 * @param GCPtr Virtual address of the first page in the range.
1343 * @param cb Size (in bytes) of the range to apply the modification to.
1344 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1345 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1346 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1347 */
1348VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1349{
1350 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1351
1352 /*
1353 * Validate input.
1354 */
1355 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1356 Assert(cb);
1357
1358 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1359
1360 /*
1361 * Adjust input.
1362 */
1363 cb += GCPtr & PAGE_OFFSET_MASK;
1364 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1365 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1366
1367 /*
1368 * Call worker.
1369 */
1370 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1371
1372 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1373 return rc;
1374}
1375
1376
1377#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1378
1379/**
1380 * Performs the lazy mapping of the 32-bit guest PD.
1381 *
1382 * @returns VBox status code.
1383 * @param pVCpu The current CPU.
1384 * @param ppPd Where to return the pointer to the mapping. This is
1385 * always set.
1386 */
1387int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1388{
1389 PVM pVM = pVCpu->CTX_SUFF(pVM);
1390 pgmLock(pVM);
1391
1392 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1393
1394 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1395 PPGMPAGE pPage;
1396 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1397 if (RT_SUCCESS(rc))
1398 {
1399 RTHCPTR HCPtrGuestCR3;
1400 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1401 if (RT_SUCCESS(rc))
1402 {
1403 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1404# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1405 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1406# endif
1407 *ppPd = (PX86PD)HCPtrGuestCR3;
1408
1409 pgmUnlock(pVM);
1410 return VINF_SUCCESS;
1411 }
1412
1413 AssertRC(rc);
1414 }
1415 pgmUnlock(pVM);
1416
1417 *ppPd = NULL;
1418 return rc;
1419}
1420
1421
1422/**
1423 * Performs the lazy mapping of the PAE guest PDPT.
1424 *
1425 * @returns VBox status code.
1426 * @param pVCpu The current CPU.
1427 * @param ppPdpt Where to return the pointer to the mapping. This is
1428 * always set.
1429 */
1430int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1431{
1432 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1433 PVM pVM = pVCpu->CTX_SUFF(pVM);
1434 pgmLock(pVM);
1435
1436 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1437 PPGMPAGE pPage;
1438 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1439 if (RT_SUCCESS(rc))
1440 {
1441 RTHCPTR HCPtrGuestCR3;
1442 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1443 if (RT_SUCCESS(rc))
1444 {
1445 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1446# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1447 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1448# endif
1449 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1450
1451 pgmUnlock(pVM);
1452 return VINF_SUCCESS;
1453 }
1454
1455 AssertRC(rc);
1456 }
1457
1458 pgmUnlock(pVM);
1459 *ppPdpt = NULL;
1460 return rc;
1461}
1462
1463
1464/**
1465 * Performs the lazy mapping / updating of a PAE guest PD.
1466 *
1467 * @returns Pointer to the mapping.
1468 * @returns VBox status code.
1469 * @param pVCpu The current CPU.
1470 * @param iPdpt Which PD entry to map (0..3).
1471 * @param ppPd Where to return the pointer to the mapping. This is
1472 * always set.
1473 */
1474int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1475{
1476 PVM pVM = pVCpu->CTX_SUFF(pVM);
1477 pgmLock(pVM);
1478
1479 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1480 Assert(pGuestPDPT);
1481 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1482 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK_FULL;
1483 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1484
1485 PPGMPAGE pPage;
1486 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1487 if (RT_SUCCESS(rc))
1488 {
1489 RTRCPTR RCPtr = NIL_RTRCPTR;
1490 RTHCPTR HCPtr = NIL_RTHCPTR;
1491#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1492 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1493 AssertRC(rc);
1494#endif
1495 if (RT_SUCCESS(rc) && fChanged)
1496 {
1497 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1498 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1499 }
1500 if (RT_SUCCESS(rc))
1501 {
1502 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1503# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1504 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1505# endif
1506 if (fChanged)
1507 {
1508 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1509 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1510 }
1511
1512 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1513 pgmUnlock(pVM);
1514 return VINF_SUCCESS;
1515 }
1516 }
1517
1518 /* Invalid page or some failure, invalidate the entry. */
1519 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1520 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1521# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1522 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1523# endif
1524 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1525
1526 pgmUnlock(pVM);
1527 return rc;
1528}
1529
1530#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1531#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1532
1533/**
1534 * Performs the lazy mapping of the 32-bit guest PD.
1535 *
1536 * @returns VBox status code.
1537 * @param pVCpu The current CPU.
1538 * @param ppPml4 Where to return the pointer to the mapping. This will
1539 * always be set.
1540 */
1541int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1542{
1543 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1544 PVM pVM = pVCpu->CTX_SUFF(pVM);
1545 pgmLock(pVM);
1546
1547 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1548 PPGMPAGE pPage;
1549 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysCR3, &pPage);
1550 if (RT_SUCCESS(rc))
1551 {
1552 RTHCPTR HCPtrGuestCR3;
1553 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1554 if (RT_SUCCESS(rc))
1555 {
1556 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1557# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1558 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1559# endif
1560 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1561
1562 pgmUnlock(pVM);
1563 return VINF_SUCCESS;
1564 }
1565 }
1566
1567 pgmUnlock(pVM);
1568 *ppPml4 = NULL;
1569 return rc;
1570}
1571
1572#endif
1573
1574/**
1575 * Gets the specified page directory pointer table entry.
1576 *
1577 * @returns PDP entry
1578 * @param pVCpu VMCPU handle.
1579 * @param iPdpt PDPT index
1580 */
1581VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1582{
1583 Assert(iPdpt <= 3);
1584 return pgmGstGetPaePDPTPtr(pVCpu)->a[iPdpt & 3];
1585}
1586
1587
1588/**
1589 * Gets the current CR3 register value for the shadow memory context.
1590 * @returns CR3 value.
1591 * @param pVCpu VMCPU handle.
1592 */
1593VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1594{
1595 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1596 AssertPtrReturn(pPoolPage, 0);
1597 return pPoolPage->Core.Key;
1598}
1599
1600
1601/**
1602 * Gets the current CR3 register value for the nested memory context.
1603 * @returns CR3 value.
1604 * @param pVCpu VMCPU handle.
1605 */
1606VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1607{
1608 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1609 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1610}
1611
1612
1613/**
1614 * Gets the current CR3 register value for the HC intermediate memory context.
1615 * @returns CR3 value.
1616 * @param pVM The VM handle.
1617 */
1618VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1619{
1620 switch (pVM->pgm.s.enmHostMode)
1621 {
1622 case SUPPAGINGMODE_32_BIT:
1623 case SUPPAGINGMODE_32_BIT_GLOBAL:
1624 return pVM->pgm.s.HCPhysInterPD;
1625
1626 case SUPPAGINGMODE_PAE:
1627 case SUPPAGINGMODE_PAE_GLOBAL:
1628 case SUPPAGINGMODE_PAE_NX:
1629 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1630 return pVM->pgm.s.HCPhysInterPaePDPT;
1631
1632 case SUPPAGINGMODE_AMD64:
1633 case SUPPAGINGMODE_AMD64_GLOBAL:
1634 case SUPPAGINGMODE_AMD64_NX:
1635 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1636 return pVM->pgm.s.HCPhysInterPaePDPT;
1637
1638 default:
1639 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1640 return ~0;
1641 }
1642}
1643
1644
1645/**
1646 * Gets the current CR3 register value for the RC intermediate memory context.
1647 * @returns CR3 value.
1648 * @param pVM The VM handle.
1649 * @param pVCpu VMCPU handle.
1650 */
1651VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1652{
1653 switch (pVCpu->pgm.s.enmShadowMode)
1654 {
1655 case PGMMODE_32_BIT:
1656 return pVM->pgm.s.HCPhysInterPD;
1657
1658 case PGMMODE_PAE:
1659 case PGMMODE_PAE_NX:
1660 return pVM->pgm.s.HCPhysInterPaePDPT;
1661
1662 case PGMMODE_AMD64:
1663 case PGMMODE_AMD64_NX:
1664 return pVM->pgm.s.HCPhysInterPaePML4;
1665
1666 case PGMMODE_EPT:
1667 case PGMMODE_NESTED:
1668 return 0; /* not relevant */
1669
1670 default:
1671 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1672 return ~0;
1673 }
1674}
1675
1676
1677/**
1678 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1679 * @returns CR3 value.
1680 * @param pVM The VM handle.
1681 */
1682VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1683{
1684 return pVM->pgm.s.HCPhysInterPD;
1685}
1686
1687
1688/**
1689 * Gets the CR3 register value for the PAE intermediate memory context.
1690 * @returns CR3 value.
1691 * @param pVM The VM handle.
1692 */
1693VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1694{
1695 return pVM->pgm.s.HCPhysInterPaePDPT;
1696}
1697
1698
1699/**
1700 * Gets the CR3 register value for the AMD64 intermediate memory context.
1701 * @returns CR3 value.
1702 * @param pVM The VM handle.
1703 */
1704VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1705{
1706 return pVM->pgm.s.HCPhysInterPaePML4;
1707}
1708
1709
1710/**
1711 * Performs and schedules necessary updates following a CR3 load or reload.
1712 *
1713 * This will normally involve mapping the guest PD or nPDPT
1714 *
1715 * @returns VBox status code.
1716 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1717 * safely be ignored and overridden since the FF will be set too then.
1718 * @param pVCpu VMCPU handle.
1719 * @param cr3 The new cr3.
1720 * @param fGlobal Indicates whether this is a global flush or not.
1721 */
1722VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1723{
1724 PVM pVM = pVCpu->CTX_SUFF(pVM);
1725
1726 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1727
1728 /*
1729 * Always flag the necessary updates; necessary for hardware acceleration
1730 */
1731 /** @todo optimize this, it shouldn't always be necessary. */
1732 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1733 if (fGlobal)
1734 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1735 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1736
1737 /*
1738 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1739 */
1740 int rc = VINF_SUCCESS;
1741 RTGCPHYS GCPhysCR3;
1742 switch (pVCpu->pgm.s.enmGuestMode)
1743 {
1744 case PGMMODE_PAE:
1745 case PGMMODE_PAE_NX:
1746 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1747 break;
1748 case PGMMODE_AMD64:
1749 case PGMMODE_AMD64_NX:
1750 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1751 break;
1752 default:
1753 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1754 break;
1755 }
1756
1757 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1758 {
1759 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1760 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1761 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1762 if (RT_LIKELY(rc == VINF_SUCCESS))
1763 {
1764 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1765 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1766 }
1767 else
1768 {
1769 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1770 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1771 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1772 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1773 if (pgmMapAreMappingsFloating(&pVM->pgm.s))
1774 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1775 }
1776
1777 if (fGlobal)
1778 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1779 else
1780 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1781 }
1782 else
1783 {
1784# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1785 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1786 if (pPool->cDirtyPages)
1787 {
1788 pgmLock(pVM);
1789 pgmPoolResetDirtyPages(pVM);
1790 pgmUnlock(pVM);
1791 }
1792# endif
1793 /*
1794 * Check if we have a pending update of the CR3 monitoring.
1795 */
1796 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1797 {
1798 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1799 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1800 }
1801 if (fGlobal)
1802 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1803 else
1804 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1805 }
1806
1807 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1808 return rc;
1809}
1810
1811
1812/**
1813 * Performs and schedules necessary updates following a CR3 load or reload when
1814 * using nested or extended paging.
1815 *
1816 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1817 * TLB and triggering a SyncCR3.
1818 *
1819 * This will normally involve mapping the guest PD or nPDPT
1820 *
1821 * @returns VBox status code.
1822 * @retval VINF_SUCCESS.
1823 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1824 * requires a CR3 sync. This can safely be ignored and overridden since
1825 * the FF will be set too then.)
1826 * @param pVCpu VMCPU handle.
1827 * @param cr3 The new cr3.
1828 */
1829VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1830{
1831 PVM pVM = pVCpu->CTX_SUFF(pVM);
1832
1833 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1834
1835 /* We assume we're only called in nested paging mode. */
1836 Assert(pVM->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1837 Assert(pVM->pgm.s.fMappingsDisabled);
1838 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1839
1840 /*
1841 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1842 */
1843 int rc = VINF_SUCCESS;
1844 RTGCPHYS GCPhysCR3;
1845 switch (pVCpu->pgm.s.enmGuestMode)
1846 {
1847 case PGMMODE_PAE:
1848 case PGMMODE_PAE_NX:
1849 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1850 break;
1851 case PGMMODE_AMD64:
1852 case PGMMODE_AMD64_NX:
1853 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1854 break;
1855 default:
1856 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1857 break;
1858 }
1859 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1860 {
1861 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1862 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1863 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1864 }
1865 return rc;
1866}
1867
1868
1869/**
1870 * Synchronize the paging structures.
1871 *
1872 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1873 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1874 * in several places, most importantly whenever the CR3 is loaded.
1875 *
1876 * @returns VBox status code.
1877 * @param pVCpu VMCPU handle.
1878 * @param cr0 Guest context CR0 register
1879 * @param cr3 Guest context CR3 register
1880 * @param cr4 Guest context CR4 register
1881 * @param fGlobal Including global page directories or not
1882 */
1883VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1884{
1885 PVM pVM = pVCpu->CTX_SUFF(pVM);
1886 int rc;
1887
1888 /*
1889 * The pool may have pending stuff and even require a return to ring-3 to
1890 * clear the whole thing.
1891 */
1892 rc = pgmPoolSyncCR3(pVCpu);
1893 if (rc != VINF_SUCCESS)
1894 return rc;
1895
1896 /*
1897 * We might be called when we shouldn't.
1898 *
1899 * The mode switching will ensure that the PD is resynced
1900 * after every mode switch. So, if we find ourselves here
1901 * when in protected or real mode we can safely disable the
1902 * FF and return immediately.
1903 */
1904 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1905 {
1906 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1907 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1908 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1909 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1910 return VINF_SUCCESS;
1911 }
1912
1913 /* If global pages are not supported, then all flushes are global. */
1914 if (!(cr4 & X86_CR4_PGE))
1915 fGlobal = true;
1916 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1917 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1918
1919 /*
1920 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1921 * This should be done before SyncCR3.
1922 */
1923 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1924 {
1925 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1926
1927 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1928 RTGCPHYS GCPhysCR3;
1929 switch (pVCpu->pgm.s.enmGuestMode)
1930 {
1931 case PGMMODE_PAE:
1932 case PGMMODE_PAE_NX:
1933 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1934 break;
1935 case PGMMODE_AMD64:
1936 case PGMMODE_AMD64_NX:
1937 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1938 break;
1939 default:
1940 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1941 break;
1942 }
1943
1944 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1945 {
1946 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1947 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1948 }
1949 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
1950 if ( rc == VINF_PGM_SYNC_CR3
1951 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
1952 {
1953 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
1954#ifdef IN_RING3
1955 rc = pgmPoolSyncCR3(pVCpu);
1956#else
1957 if (rc == VINF_PGM_SYNC_CR3)
1958 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1959 return VINF_PGM_SYNC_CR3;
1960#endif
1961 }
1962 AssertRCReturn(rc, rc);
1963 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1964 }
1965
1966 /*
1967 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1968 */
1969 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1970 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1971 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1972 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1973 if (rc == VINF_SUCCESS)
1974 {
1975 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
1976 {
1977 /* Go back to ring 3 if a pgm pool sync is again pending. */
1978 return VINF_PGM_SYNC_CR3;
1979 }
1980
1981 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1982 {
1983 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
1984 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1985 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1986 }
1987
1988 /*
1989 * Check if we have a pending update of the CR3 monitoring.
1990 */
1991 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1992 {
1993 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1994 Assert(!pVM->pgm.s.fMappingsFixed); Assert(!pVM->pgm.s.fMappingsDisabled);
1995 }
1996 }
1997
1998 /*
1999 * Now flush the CR3 (guest context).
2000 */
2001 if (rc == VINF_SUCCESS)
2002 PGM_INVL_VCPU_TLBS(pVCpu);
2003 return rc;
2004}
2005
2006
2007/**
2008 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2009 *
2010 * @returns VBox status code, with the following informational code for
2011 * VM scheduling.
2012 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2013 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2014 * (I.e. not in R3.)
2015 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2016 *
2017 * @param pVCpu VMCPU handle.
2018 * @param cr0 The new cr0.
2019 * @param cr4 The new cr4.
2020 * @param efer The new extended feature enable register.
2021 */
2022VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2023{
2024 PVM pVM = pVCpu->CTX_SUFF(pVM);
2025 PGMMODE enmGuestMode;
2026
2027 /*
2028 * Calc the new guest mode.
2029 */
2030 if (!(cr0 & X86_CR0_PE))
2031 enmGuestMode = PGMMODE_REAL;
2032 else if (!(cr0 & X86_CR0_PG))
2033 enmGuestMode = PGMMODE_PROTECTED;
2034 else if (!(cr4 & X86_CR4_PAE))
2035 {
2036 bool const fPse = !!(cr4 & X86_CR4_PSE);
2037 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2038 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2039 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2040 enmGuestMode = PGMMODE_32_BIT;
2041 }
2042 else if (!(efer & MSR_K6_EFER_LME))
2043 {
2044 if (!(efer & MSR_K6_EFER_NXE))
2045 enmGuestMode = PGMMODE_PAE;
2046 else
2047 enmGuestMode = PGMMODE_PAE_NX;
2048 }
2049 else
2050 {
2051 if (!(efer & MSR_K6_EFER_NXE))
2052 enmGuestMode = PGMMODE_AMD64;
2053 else
2054 enmGuestMode = PGMMODE_AMD64_NX;
2055 }
2056
2057 /*
2058 * Did it change?
2059 */
2060 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2061 return VINF_SUCCESS;
2062
2063 /* Flush the TLB */
2064 PGM_INVL_VCPU_TLBS(pVCpu);
2065
2066#ifdef IN_RING3
2067 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
2068#else
2069 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2070 return VINF_PGM_CHANGE_MODE;
2071#endif
2072}
2073
2074
2075/**
2076 * Gets the current guest paging mode.
2077 *
2078 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2079 *
2080 * @returns The current paging mode.
2081 * @param pVCpu VMCPU handle.
2082 */
2083VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2084{
2085 return pVCpu->pgm.s.enmGuestMode;
2086}
2087
2088
2089/**
2090 * Gets the current shadow paging mode.
2091 *
2092 * @returns The current paging mode.
2093 * @param pVCpu VMCPU handle.
2094 */
2095VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2096{
2097 return pVCpu->pgm.s.enmShadowMode;
2098}
2099
2100/**
2101 * Gets the current host paging mode.
2102 *
2103 * @returns The current paging mode.
2104 * @param pVM The VM handle.
2105 */
2106VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2107{
2108 switch (pVM->pgm.s.enmHostMode)
2109 {
2110 case SUPPAGINGMODE_32_BIT:
2111 case SUPPAGINGMODE_32_BIT_GLOBAL:
2112 return PGMMODE_32_BIT;
2113
2114 case SUPPAGINGMODE_PAE:
2115 case SUPPAGINGMODE_PAE_GLOBAL:
2116 return PGMMODE_PAE;
2117
2118 case SUPPAGINGMODE_PAE_NX:
2119 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2120 return PGMMODE_PAE_NX;
2121
2122 case SUPPAGINGMODE_AMD64:
2123 case SUPPAGINGMODE_AMD64_GLOBAL:
2124 return PGMMODE_AMD64;
2125
2126 case SUPPAGINGMODE_AMD64_NX:
2127 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2128 return PGMMODE_AMD64_NX;
2129
2130 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2131 }
2132
2133 return PGMMODE_INVALID;
2134}
2135
2136
2137/**
2138 * Get mode name.
2139 *
2140 * @returns read-only name string.
2141 * @param enmMode The mode which name is desired.
2142 */
2143VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2144{
2145 switch (enmMode)
2146 {
2147 case PGMMODE_REAL: return "Real";
2148 case PGMMODE_PROTECTED: return "Protected";
2149 case PGMMODE_32_BIT: return "32-bit";
2150 case PGMMODE_PAE: return "PAE";
2151 case PGMMODE_PAE_NX: return "PAE+NX";
2152 case PGMMODE_AMD64: return "AMD64";
2153 case PGMMODE_AMD64_NX: return "AMD64+NX";
2154 case PGMMODE_NESTED: return "Nested";
2155 case PGMMODE_EPT: return "EPT";
2156 default: return "unknown mode value";
2157 }
2158}
2159
2160
2161
2162/**
2163 * Notification from CPUM that the EFER.NXE bit has changed.
2164 *
2165 * @param pVCpu The virtual CPU for which EFER changed.
2166 * @param fNxe The new NXE state.
2167 */
2168VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2169{
2170 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2171 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2172 if (fNxe)
2173 {
2174 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2175 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2176 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2177 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2178 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2179 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2180 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2181 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2182 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2183 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2184 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2185 }
2186 else
2187 {
2188 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2189 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2190 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2191 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2192 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2193 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2194 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2195 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2196 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2197 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2198 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2199 }
2200}
2201
2202
2203/**
2204 * Check if any pgm pool pages are marked dirty (not monitored)
2205 *
2206 * @returns bool locked/not locked
2207 * @param pVM The VM to operate on.
2208 */
2209VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2210{
2211 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2212}
2213
2214/**
2215 * Check if the PGM lock is currently taken.
2216 *
2217 * @returns bool locked/not locked
2218 * @param pVM The VM to operate on.
2219 */
2220VMMDECL(bool) PGMIsLocked(PVM pVM)
2221{
2222 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2223}
2224
2225
2226/**
2227 * Check if this VCPU currently owns the PGM lock.
2228 *
2229 * @returns bool owner/not owner
2230 * @param pVM The VM to operate on.
2231 */
2232VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2233{
2234 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2235}
2236
2237
2238/**
2239 * Enable or disable large page usage
2240 *
2241 * @param pVM The VM to operate on.
2242 * @param fUseLargePages Use/not use large pages
2243 */
2244VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2245{
2246 pVM->fUseLargePages = fUseLargePages;
2247}
2248
2249/**
2250 * Acquire the PGM lock.
2251 *
2252 * @returns VBox status code
2253 * @param pVM The VM to operate on.
2254 */
2255int pgmLock(PVM pVM)
2256{
2257 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2258#if defined(IN_RC) || defined(IN_RING0)
2259 if (rc == VERR_SEM_BUSY)
2260 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2261#endif
2262 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2263 return rc;
2264}
2265
2266
2267/**
2268 * Release the PGM lock.
2269 *
2270 * @returns VBox status code
2271 * @param pVM The VM to operate on.
2272 */
2273void pgmUnlock(PVM pVM)
2274{
2275 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2276}
2277
2278#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2279
2280/** Common worker for PGMDynMapGCPage and PGMDynMapGCPageOff. */
2281DECLINLINE(int) pgmDynMapGCPageInternal(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2282{
2283 pgmLock(pVM);
2284
2285 /*
2286 * Convert it to a writable page and it on to PGMDynMapHCPage.
2287 */
2288 int rc;
2289 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
2290 if (RT_LIKELY(pPage))
2291 {
2292 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2293 if (RT_SUCCESS(rc))
2294 {
2295 //Log(("PGMDynMapGCPage: GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
2296#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2297 rc = pgmR0DynMapHCPageInlined(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2298#else
2299 rc = PGMDynMapHCPage(pVM, PGM_PAGE_GET_HCPHYS(pPage), ppv);
2300#endif
2301 }
2302 else
2303 AssertRC(rc);
2304 }
2305 else
2306 {
2307 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2308 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2309 }
2310
2311 pgmUnlock(pVM);
2312 return rc;
2313}
2314
2315/**
2316 * Temporarily maps one guest page specified by GC physical address.
2317 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2318 *
2319 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2320 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2321 *
2322 * @returns VBox status.
2323 * @param pVM VM handle.
2324 * @param GCPhys GC Physical address of the page.
2325 * @param ppv Where to store the address of the mapping.
2326 */
2327VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2328{
2329 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2330 return pgmDynMapGCPageInternal(pVM, GCPhys, ppv);
2331}
2332
2333
2334/**
2335 * Temporarily maps one guest page specified by unaligned GC physical address.
2336 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2337 *
2338 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2339 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2340 *
2341 * The caller is aware that only the speicifed page is mapped and that really bad things
2342 * will happen if writing beyond the page!
2343 *
2344 * @returns VBox status.
2345 * @param pVM VM handle.
2346 * @param GCPhys GC Physical address within the page to be mapped.
2347 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2348 */
2349VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2350{
2351 void *pv;
2352 int rc = pgmDynMapGCPageInternal(pVM, GCPhys, &pv);
2353 if (RT_SUCCESS(rc))
2354 {
2355 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
2356 return VINF_SUCCESS;
2357 }
2358 return rc;
2359}
2360
2361# ifdef IN_RC
2362
2363/**
2364 * Temporarily maps one host page specified by HC physical address.
2365 *
2366 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2367 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2368 *
2369 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2370 * @param pVM VM handle.
2371 * @param HCPhys HC Physical address of the page.
2372 * @param ppv Where to store the address of the mapping. This is the
2373 * address of the PAGE not the exact address corresponding
2374 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2375 * page offset.
2376 */
2377VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2378{
2379 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2380
2381 /*
2382 * Check the cache.
2383 */
2384 register unsigned iCache;
2385 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2386 {
2387 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2388 {
2389 { 0, 9, 10, 11, 12, 13, 14, 15},
2390 { 0, 1, 10, 11, 12, 13, 14, 15},
2391 { 0, 1, 2, 11, 12, 13, 14, 15},
2392 { 0, 1, 2, 3, 12, 13, 14, 15},
2393 { 0, 1, 2, 3, 4, 13, 14, 15},
2394 { 0, 1, 2, 3, 4, 5, 14, 15},
2395 { 0, 1, 2, 3, 4, 5, 6, 15},
2396 { 0, 1, 2, 3, 4, 5, 6, 7},
2397 { 8, 1, 2, 3, 4, 5, 6, 7},
2398 { 8, 9, 2, 3, 4, 5, 6, 7},
2399 { 8, 9, 10, 3, 4, 5, 6, 7},
2400 { 8, 9, 10, 11, 4, 5, 6, 7},
2401 { 8, 9, 10, 11, 12, 5, 6, 7},
2402 { 8, 9, 10, 11, 12, 13, 6, 7},
2403 { 8, 9, 10, 11, 12, 13, 14, 7},
2404 { 8, 9, 10, 11, 12, 13, 14, 15},
2405 };
2406 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2407 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2408
2409 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2410 {
2411 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2412
2413 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2414 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2415 {
2416 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2417 *ppv = pv;
2418 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2419 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2420 return VINF_SUCCESS;
2421 }
2422 LogFlow(("Out of sync entry %d\n", iPage));
2423 }
2424 }
2425 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2426 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2427 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2428
2429 /*
2430 * Update the page tables.
2431 */
2432 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2433 unsigned i;
2434 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2435 {
2436 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2437 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2438 break;
2439 iPage++;
2440 }
2441 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2442
2443 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2444 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2445 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2446 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2447
2448 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2449 *ppv = pv;
2450 ASMInvalidatePage(pv);
2451 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2452 return VINF_SUCCESS;
2453}
2454
2455
2456/**
2457 * Temporarily lock a dynamic page to prevent it from being reused.
2458 *
2459 * @param pVM VM handle.
2460 * @param GCPage GC address of page
2461 */
2462VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2463{
2464 unsigned iPage;
2465
2466 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2467 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2468 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2469 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2470}
2471
2472
2473/**
2474 * Unlock a dynamic page
2475 *
2476 * @param pVM VM handle.
2477 * @param GCPage GC address of page
2478 */
2479VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2480{
2481 unsigned iPage;
2482
2483 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2484 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2485
2486 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2487 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2488 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2489 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2490 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2491}
2492
2493
2494# ifdef VBOX_STRICT
2495/**
2496 * Check for lock leaks.
2497 *
2498 * @param pVM VM handle.
2499 */
2500VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2501{
2502 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache); i++)
2503 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2504}
2505# endif /* VBOX_STRICT */
2506
2507# endif /* IN_RC */
2508#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2509
2510#if !defined(IN_R0) || defined(LOG_ENABLED)
2511
2512/** Format handler for PGMPAGE.
2513 * @copydoc FNRTSTRFORMATTYPE */
2514static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2515 const char *pszType, void const *pvValue,
2516 int cchWidth, int cchPrecision, unsigned fFlags,
2517 void *pvUser)
2518{
2519 size_t cch;
2520 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2521 if (VALID_PTR(pPage))
2522 {
2523 char szTmp[64+80];
2524
2525 cch = 0;
2526
2527 /* The single char state stuff. */
2528 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2529 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2530
2531#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2532 if (IS_PART_INCLUDED(5))
2533 {
2534 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2535 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2536 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2537 }
2538
2539 /* The type. */
2540 if (IS_PART_INCLUDED(4))
2541 {
2542 szTmp[cch++] = ':';
2543 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2544 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2545 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2546 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2547 }
2548
2549 /* The numbers. */
2550 if (IS_PART_INCLUDED(3))
2551 {
2552 szTmp[cch++] = ':';
2553 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2554 }
2555
2556 if (IS_PART_INCLUDED(2))
2557 {
2558 szTmp[cch++] = ':';
2559 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2560 }
2561
2562 if (IS_PART_INCLUDED(6))
2563 {
2564 szTmp[cch++] = ':';
2565 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2566 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2567 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2568 }
2569#undef IS_PART_INCLUDED
2570
2571 cch = pfnOutput(pvArgOutput, szTmp, cch);
2572 }
2573 else
2574 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2575 return cch;
2576}
2577
2578
2579/** Format handler for PGMRAMRANGE.
2580 * @copydoc FNRTSTRFORMATTYPE */
2581static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2582 const char *pszType, void const *pvValue,
2583 int cchWidth, int cchPrecision, unsigned fFlags,
2584 void *pvUser)
2585{
2586 size_t cch;
2587 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2588 if (VALID_PTR(pRam))
2589 {
2590 char szTmp[80];
2591 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2592 cch = pfnOutput(pvArgOutput, szTmp, cch);
2593 }
2594 else
2595 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2596 return cch;
2597}
2598
2599/** Format type andlers to be registered/deregistered. */
2600static const struct
2601{
2602 char szType[24];
2603 PFNRTSTRFORMATTYPE pfnHandler;
2604} g_aPgmFormatTypes[] =
2605{
2606 { "pgmpage", pgmFormatTypeHandlerPage },
2607 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2608};
2609
2610#endif /* !IN_R0 || LOG_ENABLED */
2611
2612/**
2613 * Registers the global string format types.
2614 *
2615 * This should be called at module load time or in some other manner that ensure
2616 * that it's called exactly one time.
2617 *
2618 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2619 */
2620VMMDECL(int) PGMRegisterStringFormatTypes(void)
2621{
2622#if !defined(IN_R0) || defined(LOG_ENABLED)
2623 int rc = VINF_SUCCESS;
2624 unsigned i;
2625 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2626 {
2627 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2628# ifdef IN_RING0
2629 if (rc == VERR_ALREADY_EXISTS)
2630 {
2631 /* in case of cleanup failure in ring-0 */
2632 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2633 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2634 }
2635# endif
2636 }
2637 if (RT_FAILURE(rc))
2638 while (i-- > 0)
2639 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2640
2641 return rc;
2642#else
2643 return VINF_SUCCESS;
2644#endif
2645}
2646
2647
2648/**
2649 * Deregisters the global string format types.
2650 *
2651 * This should be called at module unload time or in some other manner that
2652 * ensure that it's called exactly one time.
2653 */
2654VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2655{
2656#if !defined(IN_R0) || defined(LOG_ENABLED)
2657 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2658 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2659#endif
2660}
2661
2662#ifdef VBOX_STRICT
2663
2664/**
2665 * Asserts that there are no mapping conflicts.
2666 *
2667 * @returns Number of conflicts.
2668 * @param pVM The VM Handle.
2669 */
2670VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2671{
2672 unsigned cErrors = 0;
2673
2674 /* Only applies to raw mode -> 1 VPCU */
2675 Assert(pVM->cCpus == 1);
2676 PVMCPU pVCpu = &pVM->aCpus[0];
2677
2678 /*
2679 * Check for mapping conflicts.
2680 */
2681 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2682 pMapping;
2683 pMapping = pMapping->CTX_SUFF(pNext))
2684 {
2685 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2686 for (RTGCPTR GCPtr = pMapping->GCPtr;
2687 GCPtr <= pMapping->GCPtrLast;
2688 GCPtr += PAGE_SIZE)
2689 {
2690 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2691 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2692 {
2693 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2694 cErrors++;
2695 break;
2696 }
2697 }
2698 }
2699
2700 return cErrors;
2701}
2702
2703
2704/**
2705 * Asserts that everything related to the guest CR3 is correctly shadowed.
2706 *
2707 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2708 * and assert the correctness of the guest CR3 mapping before asserting that the
2709 * shadow page tables is in sync with the guest page tables.
2710 *
2711 * @returns Number of conflicts.
2712 * @param pVM The VM Handle.
2713 * @param pVCpu VMCPU handle.
2714 * @param cr3 The current guest CR3 register value.
2715 * @param cr4 The current guest CR4 register value.
2716 */
2717VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2718{
2719 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2720 pgmLock(pVM);
2721 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2722 pgmUnlock(pVM);
2723 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2724 return cErrors;
2725}
2726
2727#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette