VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 47681

Last change on this file since 47681 was 47444, checked in by vboxsync, 11 years ago

IEM,HM,PGM: Started on string I/O optimizations using IEM (disabled). Cleaned up confusing status code handling in hmR0VmxCheckForceFlags (involving PGM) as well as some use of incorrect doxygen groups (@name).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 92.5 KB
Line 
1/* $Id: PGMAll.cpp 47444 2013-07-29 00:37:31Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/cpum.h>
24#include <VBox/vmm/selm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/sup.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/csam.h>
30#include <VBox/vmm/patm.h>
31#include <VBox/vmm/trpm.h>
32#ifdef VBOX_WITH_REM
33# include <VBox/vmm/rem.h>
34#endif
35#include <VBox/vmm/em.h>
36#include <VBox/vmm/hm.h>
37#include <VBox/vmm/hm_vmx.h>
38#include "PGMInternal.h"
39#include <VBox/vmm/vm.h>
40#include "PGMInline.h"
41#include <iprt/assert.h>
42#include <iprt/asm-amd64-x86.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** Pointer to the VM. */
59 PVM pVM;
60 /** Pointer to the VMCPU. */
61 PVMCPU pVCpu;
62 /** The todo flags. */
63 RTUINT fTodo;
64 /** The CR4 register value. */
65 uint32_t cr4;
66} PGMHVUSTATE, *PPGMHVUSTATE;
67
68
69/*******************************************************************************
70* Internal Functions *
71*******************************************************************************/
72DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
74#ifndef IN_RC
75static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
76static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
77#endif
78
79
80/*
81 * Shadow - 32-bit mode
82 */
83#define PGM_SHW_TYPE PGM_TYPE_32BIT
84#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
85#include "PGMAllShw.h"
86
87/* Guest - real mode */
88#define PGM_GST_TYPE PGM_TYPE_REAL
89#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
92#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
93#include "PGMGstDefs.h"
94#include "PGMAllGst.h"
95#include "PGMAllBth.h"
96#undef BTH_PGMPOOLKIND_PT_FOR_PT
97#undef BTH_PGMPOOLKIND_ROOT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - protected mode */
103#define PGM_GST_TYPE PGM_TYPE_PROT
104#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
107#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
108#include "PGMGstDefs.h"
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
112#undef BTH_PGMPOOLKIND_ROOT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117/* Guest - 32-bit mode */
118#define PGM_GST_TYPE PGM_TYPE_32BIT
119#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
120#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
121#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
122#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
123#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
124#include "PGMGstDefs.h"
125#include "PGMAllGst.h"
126#include "PGMAllBth.h"
127#undef BTH_PGMPOOLKIND_PT_FOR_BIG
128#undef BTH_PGMPOOLKIND_PT_FOR_PT
129#undef BTH_PGMPOOLKIND_ROOT
130#undef PGM_BTH_NAME
131#undef PGM_GST_TYPE
132#undef PGM_GST_NAME
133
134#undef PGM_SHW_TYPE
135#undef PGM_SHW_NAME
136
137
138/*
139 * Shadow - PAE mode
140 */
141#define PGM_SHW_TYPE PGM_TYPE_PAE
142#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
144#include "PGMAllShw.h"
145
146/* Guest - real mode */
147#define PGM_GST_TYPE PGM_TYPE_REAL
148#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
149#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
150#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
151#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
152#include "PGMGstDefs.h"
153#include "PGMAllBth.h"
154#undef BTH_PGMPOOLKIND_PT_FOR_PT
155#undef BTH_PGMPOOLKIND_ROOT
156#undef PGM_BTH_NAME
157#undef PGM_GST_TYPE
158#undef PGM_GST_NAME
159
160/* Guest - protected mode */
161#define PGM_GST_TYPE PGM_TYPE_PROT
162#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
163#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
164#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
165#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
166#include "PGMGstDefs.h"
167#include "PGMAllBth.h"
168#undef BTH_PGMPOOLKIND_PT_FOR_PT
169#undef BTH_PGMPOOLKIND_ROOT
170#undef PGM_BTH_NAME
171#undef PGM_GST_TYPE
172#undef PGM_GST_NAME
173
174/* Guest - 32-bit mode */
175#define PGM_GST_TYPE PGM_TYPE_32BIT
176#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
177#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
178#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
179#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
180#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
181#include "PGMGstDefs.h"
182#include "PGMAllBth.h"
183#undef BTH_PGMPOOLKIND_PT_FOR_BIG
184#undef BTH_PGMPOOLKIND_PT_FOR_PT
185#undef BTH_PGMPOOLKIND_ROOT
186#undef PGM_BTH_NAME
187#undef PGM_GST_TYPE
188#undef PGM_GST_NAME
189
190
191/* Guest - PAE mode */
192#define PGM_GST_TYPE PGM_TYPE_PAE
193#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
196#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
197#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
198#include "PGMGstDefs.h"
199#include "PGMAllGst.h"
200#include "PGMAllBth.h"
201#undef BTH_PGMPOOLKIND_PT_FOR_BIG
202#undef BTH_PGMPOOLKIND_PT_FOR_PT
203#undef BTH_PGMPOOLKIND_ROOT
204#undef PGM_BTH_NAME
205#undef PGM_GST_TYPE
206#undef PGM_GST_NAME
207
208#undef PGM_SHW_TYPE
209#undef PGM_SHW_NAME
210
211
212#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
213/*
214 * Shadow - AMD64 mode
215 */
216# define PGM_SHW_TYPE PGM_TYPE_AMD64
217# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
218# include "PGMAllShw.h"
219
220/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
221# define PGM_GST_TYPE PGM_TYPE_PROT
222# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
223# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
224# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
225# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
226# include "PGMGstDefs.h"
227# include "PGMAllBth.h"
228# undef BTH_PGMPOOLKIND_PT_FOR_PT
229# undef BTH_PGMPOOLKIND_ROOT
230# undef PGM_BTH_NAME
231# undef PGM_GST_TYPE
232# undef PGM_GST_NAME
233
234# ifdef VBOX_WITH_64_BITS_GUESTS
235/* Guest - AMD64 mode */
236# define PGM_GST_TYPE PGM_TYPE_AMD64
237# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
238# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
239# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
240# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
241# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
242# include "PGMGstDefs.h"
243# include "PGMAllGst.h"
244# include "PGMAllBth.h"
245# undef BTH_PGMPOOLKIND_PT_FOR_BIG
246# undef BTH_PGMPOOLKIND_PT_FOR_PT
247# undef BTH_PGMPOOLKIND_ROOT
248# undef PGM_BTH_NAME
249# undef PGM_GST_TYPE
250# undef PGM_GST_NAME
251# endif /* VBOX_WITH_64_BITS_GUESTS */
252
253# undef PGM_SHW_TYPE
254# undef PGM_SHW_NAME
255
256
257/*
258 * Shadow - Nested paging mode
259 */
260# define PGM_SHW_TYPE PGM_TYPE_NESTED
261# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
262# include "PGMAllShw.h"
263
264/* Guest - real mode */
265# define PGM_GST_TYPE PGM_TYPE_REAL
266# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
267# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
268# include "PGMGstDefs.h"
269# include "PGMAllBth.h"
270# undef PGM_BTH_NAME
271# undef PGM_GST_TYPE
272# undef PGM_GST_NAME
273
274/* Guest - protected mode */
275# define PGM_GST_TYPE PGM_TYPE_PROT
276# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
277# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
278# include "PGMGstDefs.h"
279# include "PGMAllBth.h"
280# undef PGM_BTH_NAME
281# undef PGM_GST_TYPE
282# undef PGM_GST_NAME
283
284/* Guest - 32-bit mode */
285# define PGM_GST_TYPE PGM_TYPE_32BIT
286# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
287# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
288# include "PGMGstDefs.h"
289# include "PGMAllBth.h"
290# undef PGM_BTH_NAME
291# undef PGM_GST_TYPE
292# undef PGM_GST_NAME
293
294/* Guest - PAE mode */
295# define PGM_GST_TYPE PGM_TYPE_PAE
296# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
297# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
298# include "PGMGstDefs.h"
299# include "PGMAllBth.h"
300# undef PGM_BTH_NAME
301# undef PGM_GST_TYPE
302# undef PGM_GST_NAME
303
304# ifdef VBOX_WITH_64_BITS_GUESTS
305/* Guest - AMD64 mode */
306# define PGM_GST_TYPE PGM_TYPE_AMD64
307# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
308# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
309# include "PGMGstDefs.h"
310# include "PGMAllBth.h"
311# undef PGM_BTH_NAME
312# undef PGM_GST_TYPE
313# undef PGM_GST_NAME
314# endif /* VBOX_WITH_64_BITS_GUESTS */
315
316# undef PGM_SHW_TYPE
317# undef PGM_SHW_NAME
318
319
320/*
321 * Shadow - EPT
322 */
323# define PGM_SHW_TYPE PGM_TYPE_EPT
324# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
325# include "PGMAllShw.h"
326
327/* Guest - real mode */
328# define PGM_GST_TYPE PGM_TYPE_REAL
329# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
330# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
331# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
332# include "PGMGstDefs.h"
333# include "PGMAllBth.h"
334# undef BTH_PGMPOOLKIND_PT_FOR_PT
335# undef PGM_BTH_NAME
336# undef PGM_GST_TYPE
337# undef PGM_GST_NAME
338
339/* Guest - protected mode */
340# define PGM_GST_TYPE PGM_TYPE_PROT
341# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
342# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
343# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
344# include "PGMGstDefs.h"
345# include "PGMAllBth.h"
346# undef BTH_PGMPOOLKIND_PT_FOR_PT
347# undef PGM_BTH_NAME
348# undef PGM_GST_TYPE
349# undef PGM_GST_NAME
350
351/* Guest - 32-bit mode */
352# define PGM_GST_TYPE PGM_TYPE_32BIT
353# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
354# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
355# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
356# include "PGMGstDefs.h"
357# include "PGMAllBth.h"
358# undef BTH_PGMPOOLKIND_PT_FOR_PT
359# undef PGM_BTH_NAME
360# undef PGM_GST_TYPE
361# undef PGM_GST_NAME
362
363/* Guest - PAE mode */
364# define PGM_GST_TYPE PGM_TYPE_PAE
365# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
366# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
367# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
368# include "PGMGstDefs.h"
369# include "PGMAllBth.h"
370# undef BTH_PGMPOOLKIND_PT_FOR_PT
371# undef PGM_BTH_NAME
372# undef PGM_GST_TYPE
373# undef PGM_GST_NAME
374
375# ifdef VBOX_WITH_64_BITS_GUESTS
376/* Guest - AMD64 mode */
377# define PGM_GST_TYPE PGM_TYPE_AMD64
378# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
379# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
380# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
381# include "PGMGstDefs.h"
382# include "PGMAllBth.h"
383# undef BTH_PGMPOOLKIND_PT_FOR_PT
384# undef PGM_BTH_NAME
385# undef PGM_GST_TYPE
386# undef PGM_GST_NAME
387# endif /* VBOX_WITH_64_BITS_GUESTS */
388
389# undef PGM_SHW_TYPE
390# undef PGM_SHW_NAME
391
392#endif /* !IN_RC */
393
394
395#ifndef IN_RING3
396/**
397 * #PF Handler.
398 *
399 * @returns VBox status code (appropriate for trap handling and GC return).
400 * @param pVCpu Pointer to the VMCPU.
401 * @param uErr The trap error code.
402 * @param pRegFrame Trap register frame.
403 * @param pvFault The fault address.
404 */
405VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
406{
407 PVM pVM = pVCpu->CTX_SUFF(pVM);
408
409 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
410 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
411 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
412
413
414#ifdef VBOX_WITH_STATISTICS
415 /*
416 * Error code stats.
417 */
418 if (uErr & X86_TRAP_PF_US)
419 {
420 if (!(uErr & X86_TRAP_PF_P))
421 {
422 if (uErr & X86_TRAP_PF_RW)
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
424 else
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
426 }
427 else if (uErr & X86_TRAP_PF_RW)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
429 else if (uErr & X86_TRAP_PF_RSVD)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
431 else if (uErr & X86_TRAP_PF_ID)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
433 else
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
435 }
436 else
437 { /* Supervisor */
438 if (!(uErr & X86_TRAP_PF_P))
439 {
440 if (uErr & X86_TRAP_PF_RW)
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
442 else
443 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
444 }
445 else if (uErr & X86_TRAP_PF_RW)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
447 else if (uErr & X86_TRAP_PF_ID)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
449 else if (uErr & X86_TRAP_PF_RSVD)
450 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
451 }
452#endif /* VBOX_WITH_STATISTICS */
453
454 /*
455 * Call the worker.
456 */
457 bool fLockTaken = false;
458 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
459 if (fLockTaken)
460 {
461 PGM_LOCK_ASSERT_OWNER(pVM);
462 pgmUnlock(pVM);
463 }
464 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
465
466 /*
467 * Return code tweaks.
468 */
469 if (rc != VINF_SUCCESS)
470 {
471 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
472 rc = VINF_SUCCESS;
473
474# ifdef IN_RING0
475 /* Note: hack alert for difficult to reproduce problem. */
476 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
477 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
478 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
479 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
480 {
481 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
482 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
483 rc = VINF_SUCCESS;
484 }
485# endif
486 }
487
488 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
489 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
490 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
491 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
492 return rc;
493}
494#endif /* !IN_RING3 */
495
496
497/**
498 * Prefetch a page
499 *
500 * Typically used to sync commonly used pages before entering raw mode
501 * after a CR3 reload.
502 *
503 * @returns VBox status code suitable for scheduling.
504 * @retval VINF_SUCCESS on success.
505 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
506 * @param pVCpu Pointer to the VMCPU.
507 * @param GCPtrPage Page to invalidate.
508 */
509VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
510{
511 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
512 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
513 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
514 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
515 return rc;
516}
517
518
519/**
520 * Gets the mapping corresponding to the specified address (if any).
521 *
522 * @returns Pointer to the mapping.
523 * @returns NULL if not
524 *
525 * @param pVM Pointer to the VM.
526 * @param GCPtr The guest context pointer.
527 */
528PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
529{
530 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
531 while (pMapping)
532 {
533 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
534 break;
535 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
536 return pMapping;
537 pMapping = pMapping->CTX_SUFF(pNext);
538 }
539 return NULL;
540}
541
542
543/**
544 * Verifies a range of pages for read or write access
545 *
546 * Only checks the guest's page tables
547 *
548 * @returns VBox status code.
549 * @param pVCpu Pointer to the VMCPU.
550 * @param Addr Guest virtual address to check
551 * @param cbSize Access size
552 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
553 * @remarks Current not in use.
554 */
555VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
556{
557 /*
558 * Validate input.
559 */
560 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
561 {
562 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
563 return VERR_INVALID_PARAMETER;
564 }
565
566 uint64_t fPage;
567 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
568 if (RT_FAILURE(rc))
569 {
570 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
571 return VINF_EM_RAW_GUEST_TRAP;
572 }
573
574 /*
575 * Check if the access would cause a page fault
576 *
577 * Note that hypervisor page directories are not present in the guest's tables, so this check
578 * is sufficient.
579 */
580 bool fWrite = !!(fAccess & X86_PTE_RW);
581 bool fUser = !!(fAccess & X86_PTE_US);
582 if ( !(fPage & X86_PTE_P)
583 || (fWrite && !(fPage & X86_PTE_RW))
584 || (fUser && !(fPage & X86_PTE_US)) )
585 {
586 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
587 return VINF_EM_RAW_GUEST_TRAP;
588 }
589 if ( RT_SUCCESS(rc)
590 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
591 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
592 return rc;
593}
594
595
596/**
597 * Verifies a range of pages for read or write access
598 *
599 * Supports handling of pages marked for dirty bit tracking and CSAM
600 *
601 * @returns VBox status code.
602 * @param pVCpu Pointer to the VMCPU.
603 * @param Addr Guest virtual address to check
604 * @param cbSize Access size
605 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
606 */
607VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
608{
609 PVM pVM = pVCpu->CTX_SUFF(pVM);
610
611 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
612
613 /*
614 * Get going.
615 */
616 uint64_t fPageGst;
617 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
618 if (RT_FAILURE(rc))
619 {
620 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
621 return VINF_EM_RAW_GUEST_TRAP;
622 }
623
624 /*
625 * Check if the access would cause a page fault
626 *
627 * Note that hypervisor page directories are not present in the guest's tables, so this check
628 * is sufficient.
629 */
630 const bool fWrite = !!(fAccess & X86_PTE_RW);
631 const bool fUser = !!(fAccess & X86_PTE_US);
632 if ( !(fPageGst & X86_PTE_P)
633 || (fWrite && !(fPageGst & X86_PTE_RW))
634 || (fUser && !(fPageGst & X86_PTE_US)) )
635 {
636 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
637 return VINF_EM_RAW_GUEST_TRAP;
638 }
639
640 if (!pVM->pgm.s.fNestedPaging)
641 {
642 /*
643 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
644 */
645 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
646 if ( rc == VERR_PAGE_NOT_PRESENT
647 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
648 {
649 /*
650 * Page is not present in our page tables.
651 * Try to sync it!
652 */
653 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
654 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
655 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
656 if (rc != VINF_SUCCESS)
657 return rc;
658 }
659 else
660 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
661 }
662
663#if 0 /* def VBOX_STRICT; triggers too often now */
664 /*
665 * This check is a bit paranoid, but useful.
666 */
667 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
668 uint64_t fPageShw;
669 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
670 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
671 || (fWrite && !(fPageShw & X86_PTE_RW))
672 || (fUser && !(fPageShw & X86_PTE_US)) )
673 {
674 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
675 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
676 return VINF_EM_RAW_GUEST_TRAP;
677 }
678#endif
679
680 if ( RT_SUCCESS(rc)
681 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
682 || Addr + cbSize < Addr))
683 {
684 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
685 for (;;)
686 {
687 Addr += PAGE_SIZE;
688 if (cbSize > PAGE_SIZE)
689 cbSize -= PAGE_SIZE;
690 else
691 cbSize = 1;
692 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
693 if (rc != VINF_SUCCESS)
694 break;
695 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
696 break;
697 }
698 }
699 return rc;
700}
701
702
703/**
704 * Emulation of the invlpg instruction (HC only actually).
705 *
706 * @returns Strict VBox status code, special care required.
707 * @retval VINF_PGM_SYNC_CR3 - handled.
708 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
709 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
710 *
711 * @param pVCpu Pointer to the VMCPU.
712 * @param GCPtrPage Page to invalidate.
713 *
714 * @remark ASSUMES the page table entry or page directory is valid. Fairly
715 * safe, but there could be edge cases!
716 *
717 * @todo Flush page or page directory only if necessary!
718 * @todo VBOXSTRICTRC
719 */
720VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
721{
722 PVM pVM = pVCpu->CTX_SUFF(pVM);
723 int rc;
724 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
725
726#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
727 /*
728 * Notify the recompiler so it can record this instruction.
729 */
730 REMNotifyInvalidatePage(pVM, GCPtrPage);
731#endif /* !IN_RING3 */
732
733
734#ifdef IN_RC
735 /*
736 * Check for conflicts and pending CR3 monitoring updates.
737 */
738 if (pgmMapAreMappingsFloating(pVM))
739 {
740 if ( pgmGetMapping(pVM, GCPtrPage)
741 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
742 {
743 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
744 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
745 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
746 return VINF_PGM_SYNC_CR3;
747 }
748
749 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
750 {
751 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
752 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
753 return VINF_EM_RAW_EMULATE_INSTR;
754 }
755 }
756#endif /* IN_RC */
757
758 /*
759 * Call paging mode specific worker.
760 */
761 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
762 pgmLock(pVM);
763 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
764 pgmUnlock(pVM);
765 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
766
767#ifdef IN_RING3
768 /*
769 * Check if we have a pending update of the CR3 monitoring.
770 */
771 if ( RT_SUCCESS(rc)
772 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
773 {
774 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
775 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
776 }
777
778# ifdef VBOX_WITH_RAW_MODE
779 /*
780 * Inform CSAM about the flush
781 *
782 * Note: This is to check if monitored pages have been changed; when we implement
783 * callbacks for virtual handlers, this is no longer required.
784 */
785 CSAMR3FlushPage(pVM, GCPtrPage);
786# endif
787#endif /* IN_RING3 */
788
789 /* Ignore all irrelevant error codes. */
790 if ( rc == VERR_PAGE_NOT_PRESENT
791 || rc == VERR_PAGE_TABLE_NOT_PRESENT
792 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
793 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
794 rc = VINF_SUCCESS;
795
796 return rc;
797}
798
799
800/**
801 * Executes an instruction using the interpreter.
802 *
803 * @returns VBox status code (appropriate for trap handling and GC return).
804 * @param pVM Pointer to the VM.
805 * @param pVCpu Pointer to the VMCPU.
806 * @param pRegFrame Register frame.
807 * @param pvFault Fault address.
808 */
809VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
810{
811 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
812 if (rc == VERR_EM_INTERPRETER)
813 rc = VINF_EM_RAW_EMULATE_INSTR;
814 if (rc != VINF_SUCCESS)
815 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
816 return rc;
817}
818
819
820/**
821 * Gets effective page information (from the VMM page directory).
822 *
823 * @returns VBox status.
824 * @param pVCpu Pointer to the VMCPU.
825 * @param GCPtr Guest Context virtual address of the page.
826 * @param pfFlags Where to store the flags. These are X86_PTE_*.
827 * @param pHCPhys Where to store the HC physical address of the page.
828 * This is page aligned.
829 * @remark You should use PGMMapGetPage() for pages in a mapping.
830 */
831VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
832{
833 pgmLock(pVCpu->CTX_SUFF(pVM));
834 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
835 pgmUnlock(pVCpu->CTX_SUFF(pVM));
836 return rc;
837}
838
839
840/**
841 * Modify page flags for a range of pages in the shadow context.
842 *
843 * The existing flags are ANDed with the fMask and ORed with the fFlags.
844 *
845 * @returns VBox status code.
846 * @param pVCpu Pointer to the VMCPU.
847 * @param GCPtr Virtual address of the first page in the range.
848 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
849 * @param fMask The AND mask - page flags X86_PTE_*.
850 * Be very CAREFUL when ~'ing constants which could be 32-bit!
851 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
852 * @remark You must use PGMMapModifyPage() for pages in a mapping.
853 */
854DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
855{
856 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
857 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
858
859 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
860
861 PVM pVM = pVCpu->CTX_SUFF(pVM);
862 pgmLock(pVM);
863 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
864 pgmUnlock(pVM);
865 return rc;
866}
867
868
869/**
870 * Changing the page flags for a single page in the shadow page tables so as to
871 * make it read-only.
872 *
873 * @returns VBox status code.
874 * @param pVCpu Pointer to the VMCPU.
875 * @param GCPtr Virtual address of the first page in the range.
876 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
877 */
878VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
879{
880 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
881}
882
883
884/**
885 * Changing the page flags for a single page in the shadow page tables so as to
886 * make it writable.
887 *
888 * The call must know with 101% certainty that the guest page tables maps this
889 * as writable too. This function will deal shared, zero and write monitored
890 * pages.
891 *
892 * @returns VBox status code.
893 * @param pVCpu Pointer to the VMCPU.
894 * @param GCPtr Virtual address of the first page in the range.
895 * @param fMmio2 Set if it is an MMIO2 page.
896 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
897 */
898VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
899{
900 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
901}
902
903
904/**
905 * Changing the page flags for a single page in the shadow page tables so as to
906 * make it not present.
907 *
908 * @returns VBox status code.
909 * @param pVCpu Pointer to the VMCPU.
910 * @param GCPtr Virtual address of the first page in the range.
911 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
912 */
913VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
914{
915 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
916}
917
918
919/**
920 * Changing the page flags for a single page in the shadow page tables so as to
921 * make it supervisor and writable.
922 *
923 * This if for dealing with CR0.WP=0 and readonly user pages.
924 *
925 * @returns VBox status code.
926 * @param pVCpu Pointer to the VMCPU.
927 * @param GCPtr Virtual address of the first page in the range.
928 * @param fBigPage Whether or not this is a big page. If it is, we have to
929 * change the shadow PDE as well. If it isn't, the caller
930 * has checked that the shadow PDE doesn't need changing.
931 * We ASSUME 4KB pages backing the big page here!
932 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
933 */
934int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
935{
936 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
937 if (rc == VINF_SUCCESS && fBigPage)
938 {
939 /* this is a bit ugly... */
940 switch (pVCpu->pgm.s.enmShadowMode)
941 {
942 case PGMMODE_32_BIT:
943 {
944 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
945 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
946 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
947 pPde->n.u1Write = 1;
948 Log(("-> PDE=%#llx (32)\n", pPde->u));
949 break;
950 }
951 case PGMMODE_PAE:
952 case PGMMODE_PAE_NX:
953 {
954 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
955 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
956 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
957 pPde->n.u1Write = 1;
958 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
959 break;
960 }
961 default:
962 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
963 }
964 }
965 return rc;
966}
967
968
969/**
970 * Gets the shadow page directory for the specified address, PAE.
971 *
972 * @returns Pointer to the shadow PD.
973 * @param pVCpu Pointer to the VMCPU.
974 * @param GCPtr The address.
975 * @param uGstPdpe Guest PDPT entry. Valid.
976 * @param ppPD Receives address of page directory
977 */
978int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
979{
980 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
981 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
982 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
983 PVM pVM = pVCpu->CTX_SUFF(pVM);
984 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
985 PPGMPOOLPAGE pShwPage;
986 int rc;
987
988 PGM_LOCK_ASSERT_OWNER(pVM);
989
990 /* Allocate page directory if not present. */
991 if ( !pPdpe->n.u1Present
992 && !(pPdpe->u & X86_PDPE_PG_MASK))
993 {
994 RTGCPTR64 GCPdPt;
995 PGMPOOLKIND enmKind;
996
997 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
998 {
999 /* AMD-V nested paging or real/protected mode without paging. */
1000 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1001 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1002 }
1003 else
1004 {
1005 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1006 {
1007 if (!(uGstPdpe & X86_PDPE_P))
1008 {
1009 /* PD not present; guest must reload CR3 to change it.
1010 * No need to monitor anything in this case.
1011 */
1012 Assert(!HMIsEnabled(pVM));
1013
1014 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1015 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1016 uGstPdpe |= X86_PDPE_P;
1017 }
1018 else
1019 {
1020 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1021 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1022 }
1023 }
1024 else
1025 {
1026 GCPdPt = CPUMGetGuestCR3(pVCpu);
1027 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1028 }
1029 }
1030
1031 /* Create a reference back to the PDPT by using the index in its shadow page. */
1032 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1033 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1034 &pShwPage);
1035 AssertRCReturn(rc, rc);
1036
1037 /* The PD was cached or created; hook it up now. */
1038 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1039
1040# if defined(IN_RC)
1041 /*
1042 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1043 * PDPT entry; the CPU fetches them only during cr3 load, so any
1044 * non-present PDPT will continue to cause page faults.
1045 */
1046 ASMReloadCR3();
1047# endif
1048 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1049 }
1050 else
1051 {
1052 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1053 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1054 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1055
1056 pgmPoolCacheUsed(pPool, pShwPage);
1057 }
1058 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1059 return VINF_SUCCESS;
1060}
1061
1062
1063/**
1064 * Gets the pointer to the shadow page directory entry for an address, PAE.
1065 *
1066 * @returns Pointer to the PDE.
1067 * @param pVCpu The current CPU.
1068 * @param GCPtr The address.
1069 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1070 */
1071DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1072{
1073 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1074 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1075 PVM pVM = pVCpu->CTX_SUFF(pVM);
1076
1077 PGM_LOCK_ASSERT_OWNER(pVM);
1078
1079 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1080 if (!pPdpt->a[iPdPt].n.u1Present)
1081 {
1082 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1083 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1084 }
1085 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1086
1087 /* Fetch the pgm pool shadow descriptor. */
1088 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1089 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1090
1091 *ppShwPde = pShwPde;
1092 return VINF_SUCCESS;
1093}
1094
1095#ifndef IN_RC
1096
1097/**
1098 * Syncs the SHADOW page directory pointer for the specified address.
1099 *
1100 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1101 *
1102 * The caller is responsible for making sure the guest has a valid PD before
1103 * calling this function.
1104 *
1105 * @returns VBox status.
1106 * @param pVCpu Pointer to the VMCPU.
1107 * @param GCPtr The address.
1108 * @param uGstPml4e Guest PML4 entry (valid).
1109 * @param uGstPdpe Guest PDPT entry (valid).
1110 * @param ppPD Receives address of page directory
1111 */
1112static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1113{
1114 PVM pVM = pVCpu->CTX_SUFF(pVM);
1115 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1116 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1117 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1118 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1119 PPGMPOOLPAGE pShwPage;
1120 int rc;
1121
1122 PGM_LOCK_ASSERT_OWNER(pVM);
1123
1124 /* Allocate page directory pointer table if not present. */
1125 if ( !pPml4e->n.u1Present
1126 && !(pPml4e->u & X86_PML4E_PG_MASK))
1127 {
1128 RTGCPTR64 GCPml4;
1129 PGMPOOLKIND enmKind;
1130
1131 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1132
1133 if (fNestedPagingOrNoGstPaging)
1134 {
1135 /* AMD-V nested paging or real/protected mode without paging */
1136 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1137 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1138 }
1139 else
1140 {
1141 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1142 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1143 }
1144
1145 /* Create a reference back to the PDPT by using the index in its shadow page. */
1146 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1147 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1148 &pShwPage);
1149 AssertRCReturn(rc, rc);
1150 }
1151 else
1152 {
1153 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1154 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1155
1156 pgmPoolCacheUsed(pPool, pShwPage);
1157 }
1158 /* The PDPT was cached or created; hook it up now. */
1159 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1160
1161 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1162 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1163 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1164
1165 /* Allocate page directory if not present. */
1166 if ( !pPdpe->n.u1Present
1167 && !(pPdpe->u & X86_PDPE_PG_MASK))
1168 {
1169 RTGCPTR64 GCPdPt;
1170 PGMPOOLKIND enmKind;
1171
1172 if (fNestedPagingOrNoGstPaging)
1173 {
1174 /* AMD-V nested paging or real/protected mode without paging */
1175 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1176 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1177 }
1178 else
1179 {
1180 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1181 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1182 }
1183
1184 /* Create a reference back to the PDPT by using the index in its shadow page. */
1185 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1186 pShwPage->idx, iPdPt, false /*fLockPage*/,
1187 &pShwPage);
1188 AssertRCReturn(rc, rc);
1189 }
1190 else
1191 {
1192 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1193 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1194
1195 pgmPoolCacheUsed(pPool, pShwPage);
1196 }
1197 /* The PD was cached or created; hook it up now. */
1198 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1199
1200 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1201 return VINF_SUCCESS;
1202}
1203
1204
1205/**
1206 * Gets the SHADOW page directory pointer for the specified address (long mode).
1207 *
1208 * @returns VBox status.
1209 * @param pVCpu Pointer to the VMCPU.
1210 * @param GCPtr The address.
1211 * @param ppPdpt Receives address of pdpt
1212 * @param ppPD Receives address of page directory
1213 */
1214DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1215{
1216 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1217 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1218
1219 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1220
1221 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1222 if (ppPml4e)
1223 *ppPml4e = (PX86PML4E)pPml4e;
1224
1225 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1226
1227 if (!pPml4e->n.u1Present)
1228 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1229
1230 PVM pVM = pVCpu->CTX_SUFF(pVM);
1231 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1232 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1233 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1234
1235 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1236 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1237 if (!pPdpt->a[iPdPt].n.u1Present)
1238 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1239
1240 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1241 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1242
1243 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1244 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1251 * backing pages in case the PDPT or PML4 entry is missing.
1252 *
1253 * @returns VBox status.
1254 * @param pVCpu Pointer to the VMCPU.
1255 * @param GCPtr The address.
1256 * @param ppPdpt Receives address of pdpt
1257 * @param ppPD Receives address of page directory
1258 */
1259static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1260{
1261 PVM pVM = pVCpu->CTX_SUFF(pVM);
1262 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1263 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1264 PEPTPML4 pPml4;
1265 PEPTPML4E pPml4e;
1266 PPGMPOOLPAGE pShwPage;
1267 int rc;
1268
1269 Assert(pVM->pgm.s.fNestedPaging);
1270 PGM_LOCK_ASSERT_OWNER(pVM);
1271
1272 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1273 Assert(pPml4);
1274
1275 /* Allocate page directory pointer table if not present. */
1276 pPml4e = &pPml4->a[iPml4];
1277 if ( !pPml4e->n.u1Present
1278 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1279 {
1280 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1281 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1282
1283 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1284 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1285 &pShwPage);
1286 AssertRCReturn(rc, rc);
1287 }
1288 else
1289 {
1290 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1291 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1292
1293 pgmPoolCacheUsed(pPool, pShwPage);
1294 }
1295 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1296 pPml4e->u = pShwPage->Core.Key;
1297 pPml4e->n.u1Present = 1;
1298 pPml4e->n.u1Write = 1;
1299 pPml4e->n.u1Execute = 1;
1300
1301 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1302 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1303 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1304
1305 if (ppPdpt)
1306 *ppPdpt = pPdpt;
1307
1308 /* Allocate page directory if not present. */
1309 if ( !pPdpe->n.u1Present
1310 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1311 {
1312 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1313 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1314 pShwPage->idx, iPdPt, false /*fLockPage*/,
1315 &pShwPage);
1316 AssertRCReturn(rc, rc);
1317 }
1318 else
1319 {
1320 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1321 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1322
1323 pgmPoolCacheUsed(pPool, pShwPage);
1324 }
1325 /* The PD was cached or created; hook it up now and fill with the default value. */
1326 pPdpe->u = pShwPage->Core.Key;
1327 pPdpe->n.u1Present = 1;
1328 pPdpe->n.u1Write = 1;
1329 pPdpe->n.u1Execute = 1;
1330
1331 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1332 return VINF_SUCCESS;
1333}
1334
1335#endif /* IN_RC */
1336
1337#ifdef IN_RING0
1338/**
1339 * Synchronizes a range of nested page table entries.
1340 *
1341 * The caller must own the PGM lock.
1342 *
1343 * @param pVCpu The current CPU.
1344 * @param GCPhys Where to start.
1345 * @param cPages How many pages which entries should be synced.
1346 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1347 * host paging mode for AMD-V).
1348 */
1349int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhysFault, uint32_t cPages, PGMMODE enmShwPagingMode)
1350{
1351 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1352
1353 int rc;
1354 switch (enmShwPagingMode)
1355 {
1356 case PGMMODE_32_BIT:
1357 {
1358 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1359 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1360 break;
1361 }
1362
1363 case PGMMODE_PAE:
1364 case PGMMODE_PAE_NX:
1365 {
1366 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1367 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1368 break;
1369 }
1370
1371 case PGMMODE_AMD64:
1372 case PGMMODE_AMD64_NX:
1373 {
1374 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1375 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1376 break;
1377 }
1378
1379 case PGMMODE_EPT:
1380 {
1381 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1382 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhysFault, cPages, ~0U /*uErr*/);
1383 break;
1384 }
1385
1386 default:
1387 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1388 }
1389 return rc;
1390}
1391#endif /* IN_RING0 */
1392
1393
1394/**
1395 * Gets effective Guest OS page information.
1396 *
1397 * When GCPtr is in a big page, the function will return as if it was a normal
1398 * 4KB page. If the need for distinguishing between big and normal page becomes
1399 * necessary at a later point, a PGMGstGetPage() will be created for that
1400 * purpose.
1401 *
1402 * @returns VBox status.
1403 * @param pVCpu The current CPU.
1404 * @param GCPtr Guest Context virtual address of the page.
1405 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1406 * @param pGCPhys Where to store the GC physical address of the page.
1407 * This is page aligned. The fact that the
1408 */
1409VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1410{
1411 VMCPU_ASSERT_EMT(pVCpu);
1412 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1413}
1414
1415
1416/**
1417 * Performs a guest page table walk.
1418 *
1419 * The guest should be in paged protect mode or long mode when making a call to
1420 * this function.
1421 *
1422 * @returns VBox status code.
1423 * @retval VINF_SUCCESS on success.
1424 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1425 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1426 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1427 *
1428 * @param pVCpu The current CPU.
1429 * @param GCPtr The guest virtual address to walk by.
1430 * @param pWalk Where to return the walk result. This is valid on some
1431 * error codes as well.
1432 */
1433int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1434{
1435 VMCPU_ASSERT_EMT(pVCpu);
1436 switch (pVCpu->pgm.s.enmGuestMode)
1437 {
1438 case PGMMODE_32_BIT:
1439 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1440 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1441
1442 case PGMMODE_PAE:
1443 case PGMMODE_PAE_NX:
1444 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1445 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1446
1447#if !defined(IN_RC)
1448 case PGMMODE_AMD64:
1449 case PGMMODE_AMD64_NX:
1450 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1451 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1452#endif
1453
1454 case PGMMODE_REAL:
1455 case PGMMODE_PROTECTED:
1456 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1457 return VERR_PGM_NOT_USED_IN_MODE;
1458
1459#if defined(IN_RC)
1460 case PGMMODE_AMD64:
1461 case PGMMODE_AMD64_NX:
1462#endif
1463 case PGMMODE_NESTED:
1464 case PGMMODE_EPT:
1465 default:
1466 AssertFailed();
1467 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1468 return VERR_PGM_NOT_USED_IN_MODE;
1469 }
1470}
1471
1472
1473/**
1474 * Checks if the page is present.
1475 *
1476 * @returns true if the page is present.
1477 * @returns false if the page is not present.
1478 * @param pVCpu Pointer to the VMCPU.
1479 * @param GCPtr Address within the page.
1480 */
1481VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1482{
1483 VMCPU_ASSERT_EMT(pVCpu);
1484 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1485 return RT_SUCCESS(rc);
1486}
1487
1488
1489/**
1490 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1491 *
1492 * @returns VBox status.
1493 * @param pVCpu Pointer to the VMCPU.
1494 * @param GCPtr The address of the first page.
1495 * @param cb The size of the range in bytes.
1496 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1497 */
1498VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1499{
1500 VMCPU_ASSERT_EMT(pVCpu);
1501 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1502}
1503
1504
1505/**
1506 * Modify page flags for a range of pages in the guest's tables
1507 *
1508 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1509 *
1510 * @returns VBox status code.
1511 * @param pVCpu Pointer to the VMCPU.
1512 * @param GCPtr Virtual address of the first page in the range.
1513 * @param cb Size (in bytes) of the range to apply the modification to.
1514 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1515 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1516 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1517 */
1518VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1519{
1520 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1521 VMCPU_ASSERT_EMT(pVCpu);
1522
1523 /*
1524 * Validate input.
1525 */
1526 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1527 Assert(cb);
1528
1529 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1530
1531 /*
1532 * Adjust input.
1533 */
1534 cb += GCPtr & PAGE_OFFSET_MASK;
1535 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1536 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1537
1538 /*
1539 * Call worker.
1540 */
1541 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1542
1543 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1544 return rc;
1545}
1546
1547
1548#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1549
1550/**
1551 * Performs the lazy mapping of the 32-bit guest PD.
1552 *
1553 * @returns VBox status code.
1554 * @param pVCpu The current CPU.
1555 * @param ppPd Where to return the pointer to the mapping. This is
1556 * always set.
1557 */
1558int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1559{
1560 PVM pVM = pVCpu->CTX_SUFF(pVM);
1561 pgmLock(pVM);
1562
1563 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1564
1565 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1566 PPGMPAGE pPage;
1567 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1568 if (RT_SUCCESS(rc))
1569 {
1570 RTHCPTR HCPtrGuestCR3;
1571 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1572 if (RT_SUCCESS(rc))
1573 {
1574 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1575# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1576 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1577# endif
1578 *ppPd = (PX86PD)HCPtrGuestCR3;
1579
1580 pgmUnlock(pVM);
1581 return VINF_SUCCESS;
1582 }
1583
1584 AssertRC(rc);
1585 }
1586 pgmUnlock(pVM);
1587
1588 *ppPd = NULL;
1589 return rc;
1590}
1591
1592
1593/**
1594 * Performs the lazy mapping of the PAE guest PDPT.
1595 *
1596 * @returns VBox status code.
1597 * @param pVCpu The current CPU.
1598 * @param ppPdpt Where to return the pointer to the mapping. This is
1599 * always set.
1600 */
1601int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1602{
1603 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1604 PVM pVM = pVCpu->CTX_SUFF(pVM);
1605 pgmLock(pVM);
1606
1607 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1608 PPGMPAGE pPage;
1609 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1610 if (RT_SUCCESS(rc))
1611 {
1612 RTHCPTR HCPtrGuestCR3;
1613 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1614 if (RT_SUCCESS(rc))
1615 {
1616 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1617# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1618 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1619# endif
1620 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1621
1622 pgmUnlock(pVM);
1623 return VINF_SUCCESS;
1624 }
1625
1626 AssertRC(rc);
1627 }
1628
1629 pgmUnlock(pVM);
1630 *ppPdpt = NULL;
1631 return rc;
1632}
1633
1634
1635/**
1636 * Performs the lazy mapping / updating of a PAE guest PD.
1637 *
1638 * @returns Pointer to the mapping.
1639 * @returns VBox status code.
1640 * @param pVCpu The current CPU.
1641 * @param iPdpt Which PD entry to map (0..3).
1642 * @param ppPd Where to return the pointer to the mapping. This is
1643 * always set.
1644 */
1645int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1646{
1647 PVM pVM = pVCpu->CTX_SUFF(pVM);
1648 pgmLock(pVM);
1649
1650 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1651 Assert(pGuestPDPT);
1652 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1653 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1654 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1655
1656 PPGMPAGE pPage;
1657 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1658 if (RT_SUCCESS(rc))
1659 {
1660 RTRCPTR RCPtr = NIL_RTRCPTR;
1661 RTHCPTR HCPtr = NIL_RTHCPTR;
1662#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1663 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1664 AssertRC(rc);
1665#endif
1666 if (RT_SUCCESS(rc) && fChanged)
1667 {
1668 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1669 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1670 }
1671 if (RT_SUCCESS(rc))
1672 {
1673 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1674# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1675 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1676# endif
1677 if (fChanged)
1678 {
1679 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1680 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1681 }
1682
1683 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1684 pgmUnlock(pVM);
1685 return VINF_SUCCESS;
1686 }
1687 }
1688
1689 /* Invalid page or some failure, invalidate the entry. */
1690 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1691 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1692# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1693 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1694# endif
1695 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1696
1697 pgmUnlock(pVM);
1698 return rc;
1699}
1700
1701#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1702#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1703/**
1704 * Performs the lazy mapping of the 32-bit guest PD.
1705 *
1706 * @returns VBox status code.
1707 * @param pVCpu The current CPU.
1708 * @param ppPml4 Where to return the pointer to the mapping. This will
1709 * always be set.
1710 */
1711int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1712{
1713 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1714 PVM pVM = pVCpu->CTX_SUFF(pVM);
1715 pgmLock(pVM);
1716
1717 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1718 PPGMPAGE pPage;
1719 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1720 if (RT_SUCCESS(rc))
1721 {
1722 RTHCPTR HCPtrGuestCR3;
1723 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1724 if (RT_SUCCESS(rc))
1725 {
1726 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1727# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1728 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1729# endif
1730 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1731
1732 pgmUnlock(pVM);
1733 return VINF_SUCCESS;
1734 }
1735 }
1736
1737 pgmUnlock(pVM);
1738 *ppPml4 = NULL;
1739 return rc;
1740}
1741#endif
1742
1743
1744/**
1745 * Gets the PAE PDPEs values cached by the CPU.
1746 *
1747 * @returns VBox status code.
1748 * @param pVCpu Pointer to the VMCPU.
1749 * @param paPdpes Where to return the four PDPEs. The array
1750 * pointed to must have 4 entries.
1751 */
1752VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1753{
1754 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1755
1756 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1757 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1758 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1759 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1760 return VINF_SUCCESS;
1761}
1762
1763
1764/**
1765 * Sets the PAE PDPEs values cached by the CPU.
1766 *
1767 * @remarks This must be called *AFTER* PGMUpdateCR3.
1768 *
1769 * @param pVCpu Pointer to the VMCPU.
1770 * @param paPdpes The four PDPE values. The array pointed to must
1771 * have exactly 4 entries.
1772 *
1773 * @remarks No-long-jump zone!!!
1774 */
1775VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1776{
1777 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1778
1779 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1780 {
1781 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1782 {
1783 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1784
1785 /* Force lazy remapping if it changed in any way. */
1786 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1787# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1788 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1789# endif
1790 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1791 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1792 }
1793 }
1794
1795 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1796}
1797
1798
1799/**
1800 * Gets the current CR3 register value for the shadow memory context.
1801 * @returns CR3 value.
1802 * @param pVCpu Pointer to the VMCPU.
1803 */
1804VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1805{
1806 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1807 AssertPtrReturn(pPoolPage, 0);
1808 return pPoolPage->Core.Key;
1809}
1810
1811
1812/**
1813 * Gets the current CR3 register value for the nested memory context.
1814 * @returns CR3 value.
1815 * @param pVCpu Pointer to the VMCPU.
1816 */
1817VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1818{
1819 NOREF(enmShadowMode);
1820 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1821 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1822}
1823
1824
1825/**
1826 * Gets the current CR3 register value for the HC intermediate memory context.
1827 * @returns CR3 value.
1828 * @param pVM Pointer to the VM.
1829 */
1830VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1831{
1832 switch (pVM->pgm.s.enmHostMode)
1833 {
1834 case SUPPAGINGMODE_32_BIT:
1835 case SUPPAGINGMODE_32_BIT_GLOBAL:
1836 return pVM->pgm.s.HCPhysInterPD;
1837
1838 case SUPPAGINGMODE_PAE:
1839 case SUPPAGINGMODE_PAE_GLOBAL:
1840 case SUPPAGINGMODE_PAE_NX:
1841 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1842 return pVM->pgm.s.HCPhysInterPaePDPT;
1843
1844 case SUPPAGINGMODE_AMD64:
1845 case SUPPAGINGMODE_AMD64_GLOBAL:
1846 case SUPPAGINGMODE_AMD64_NX:
1847 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1848 return pVM->pgm.s.HCPhysInterPaePDPT;
1849
1850 default:
1851 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1852 return NIL_RTHCPHYS;
1853 }
1854}
1855
1856
1857/**
1858 * Gets the current CR3 register value for the RC intermediate memory context.
1859 * @returns CR3 value.
1860 * @param pVM Pointer to the VM.
1861 * @param pVCpu Pointer to the VMCPU.
1862 */
1863VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1864{
1865 switch (pVCpu->pgm.s.enmShadowMode)
1866 {
1867 case PGMMODE_32_BIT:
1868 return pVM->pgm.s.HCPhysInterPD;
1869
1870 case PGMMODE_PAE:
1871 case PGMMODE_PAE_NX:
1872 return pVM->pgm.s.HCPhysInterPaePDPT;
1873
1874 case PGMMODE_AMD64:
1875 case PGMMODE_AMD64_NX:
1876 return pVM->pgm.s.HCPhysInterPaePML4;
1877
1878 case PGMMODE_EPT:
1879 case PGMMODE_NESTED:
1880 return 0; /* not relevant */
1881
1882 default:
1883 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1884 return NIL_RTHCPHYS;
1885 }
1886}
1887
1888
1889/**
1890 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1891 * @returns CR3 value.
1892 * @param pVM Pointer to the VM.
1893 */
1894VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1895{
1896 return pVM->pgm.s.HCPhysInterPD;
1897}
1898
1899
1900/**
1901 * Gets the CR3 register value for the PAE intermediate memory context.
1902 * @returns CR3 value.
1903 * @param pVM Pointer to the VM.
1904 */
1905VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1906{
1907 return pVM->pgm.s.HCPhysInterPaePDPT;
1908}
1909
1910
1911/**
1912 * Gets the CR3 register value for the AMD64 intermediate memory context.
1913 * @returns CR3 value.
1914 * @param pVM Pointer to the VM.
1915 */
1916VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1917{
1918 return pVM->pgm.s.HCPhysInterPaePML4;
1919}
1920
1921
1922/**
1923 * Performs and schedules necessary updates following a CR3 load or reload.
1924 *
1925 * This will normally involve mapping the guest PD or nPDPT
1926 *
1927 * @returns VBox status code.
1928 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1929 * safely be ignored and overridden since the FF will be set too then.
1930 * @param pVCpu Pointer to the VMCPU.
1931 * @param cr3 The new cr3.
1932 * @param fGlobal Indicates whether this is a global flush or not.
1933 */
1934VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1935{
1936 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1937 PVM pVM = pVCpu->CTX_SUFF(pVM);
1938
1939 VMCPU_ASSERT_EMT(pVCpu);
1940
1941 /*
1942 * Always flag the necessary updates; necessary for hardware acceleration
1943 */
1944 /** @todo optimize this, it shouldn't always be necessary. */
1945 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1946 if (fGlobal)
1947 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1948 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1949
1950 /*
1951 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1952 */
1953 int rc = VINF_SUCCESS;
1954 RTGCPHYS GCPhysCR3;
1955 switch (pVCpu->pgm.s.enmGuestMode)
1956 {
1957 case PGMMODE_PAE:
1958 case PGMMODE_PAE_NX:
1959 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1960 break;
1961 case PGMMODE_AMD64:
1962 case PGMMODE_AMD64_NX:
1963 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1964 break;
1965 default:
1966 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1967 break;
1968 }
1969 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1970
1971 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1972 {
1973 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1974 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1975 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1976 if (RT_LIKELY(rc == VINF_SUCCESS))
1977 {
1978 if (pgmMapAreMappingsFloating(pVM))
1979 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1980 }
1981 else
1982 {
1983 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1984 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1985 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1986 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1987 if (pgmMapAreMappingsFloating(pVM))
1988 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1989 }
1990
1991 if (fGlobal)
1992 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1993 else
1994 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1995 }
1996 else
1997 {
1998# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1999 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2000 if (pPool->cDirtyPages)
2001 {
2002 pgmLock(pVM);
2003 pgmPoolResetDirtyPages(pVM);
2004 pgmUnlock(pVM);
2005 }
2006# endif
2007 /*
2008 * Check if we have a pending update of the CR3 monitoring.
2009 */
2010 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2011 {
2012 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2013 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2014 }
2015 if (fGlobal)
2016 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2017 else
2018 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2019 }
2020
2021 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2022 return rc;
2023}
2024
2025
2026/**
2027 * Performs and schedules necessary updates following a CR3 load or reload when
2028 * using nested or extended paging.
2029 *
2030 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
2031 * TLB and triggering a SyncCR3.
2032 *
2033 * This will normally involve mapping the guest PD or nPDPT
2034 *
2035 * @returns VBox status code.
2036 * @retval VINF_SUCCESS.
2037 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2038 * paging modes). This can safely be ignored and overridden since the
2039 * FF will be set too then.
2040 * @param pVCpu Pointer to the VMCPU.
2041 * @param cr3 The new cr3.
2042 */
2043VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2044{
2045 VMCPU_ASSERT_EMT(pVCpu);
2046 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2047
2048 /* We assume we're only called in nested paging mode. */
2049 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2050 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2051 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2052
2053 /*
2054 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2055 */
2056 int rc = VINF_SUCCESS;
2057 RTGCPHYS GCPhysCR3;
2058 switch (pVCpu->pgm.s.enmGuestMode)
2059 {
2060 case PGMMODE_PAE:
2061 case PGMMODE_PAE_NX:
2062 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2063 break;
2064 case PGMMODE_AMD64:
2065 case PGMMODE_AMD64_NX:
2066 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2067 break;
2068 default:
2069 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2070 break;
2071 }
2072 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2073
2074 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2075 {
2076 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2077 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2078 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2079 }
2080
2081 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2082 return rc;
2083}
2084
2085
2086/**
2087 * Synchronize the paging structures.
2088 *
2089 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2090 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2091 * in several places, most importantly whenever the CR3 is loaded.
2092 *
2093 * @returns VBox status code.
2094 * @param pVCpu Pointer to the VMCPU.
2095 * @param cr0 Guest context CR0 register
2096 * @param cr3 Guest context CR3 register
2097 * @param cr4 Guest context CR4 register
2098 * @param fGlobal Including global page directories or not
2099 */
2100VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2101{
2102 int rc;
2103
2104 VMCPU_ASSERT_EMT(pVCpu);
2105
2106 /*
2107 * The pool may have pending stuff and even require a return to ring-3 to
2108 * clear the whole thing.
2109 */
2110 rc = pgmPoolSyncCR3(pVCpu);
2111 if (rc != VINF_SUCCESS)
2112 return rc;
2113
2114 /*
2115 * We might be called when we shouldn't.
2116 *
2117 * The mode switching will ensure that the PD is resynced after every mode
2118 * switch. So, if we find ourselves here when in protected or real mode
2119 * we can safely clear the FF and return immediately.
2120 */
2121 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2122 {
2123 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2124 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2125 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2126 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2127 return VINF_SUCCESS;
2128 }
2129
2130 /* If global pages are not supported, then all flushes are global. */
2131 if (!(cr4 & X86_CR4_PGE))
2132 fGlobal = true;
2133 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2134 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2135
2136 /*
2137 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2138 * This should be done before SyncCR3.
2139 */
2140 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2141 {
2142 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2143
2144 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2145 RTGCPHYS GCPhysCR3;
2146 switch (pVCpu->pgm.s.enmGuestMode)
2147 {
2148 case PGMMODE_PAE:
2149 case PGMMODE_PAE_NX:
2150 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2151 break;
2152 case PGMMODE_AMD64:
2153 case PGMMODE_AMD64_NX:
2154 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2155 break;
2156 default:
2157 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2158 break;
2159 }
2160 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2161
2162 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2163 {
2164 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2165 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2166 }
2167
2168 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2169 if ( rc == VINF_PGM_SYNC_CR3
2170 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2171 {
2172 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2173#ifdef IN_RING3
2174 rc = pgmPoolSyncCR3(pVCpu);
2175#else
2176 if (rc == VINF_PGM_SYNC_CR3)
2177 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2178 return VINF_PGM_SYNC_CR3;
2179#endif
2180 }
2181 AssertRCReturn(rc, rc);
2182 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2183 }
2184
2185 /*
2186 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2187 */
2188 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2189 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2190 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2191 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2192 if (rc == VINF_SUCCESS)
2193 {
2194 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2195 {
2196 /* Go back to ring 3 if a pgm pool sync is again pending. */
2197 return VINF_PGM_SYNC_CR3;
2198 }
2199
2200 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2201 {
2202 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2203 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2204 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2205 }
2206
2207 /*
2208 * Check if we have a pending update of the CR3 monitoring.
2209 */
2210 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2211 {
2212 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2213 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2214 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2215 }
2216 }
2217
2218 /*
2219 * Now flush the CR3 (guest context).
2220 */
2221 if (rc == VINF_SUCCESS)
2222 PGM_INVL_VCPU_TLBS(pVCpu);
2223 return rc;
2224}
2225
2226
2227/**
2228 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2229 *
2230 * @returns VBox status code, with the following informational code for
2231 * VM scheduling.
2232 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2233 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2234 * (I.e. not in R3.)
2235 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2236 *
2237 * @param pVCpu Pointer to the VMCPU.
2238 * @param cr0 The new cr0.
2239 * @param cr4 The new cr4.
2240 * @param efer The new extended feature enable register.
2241 */
2242VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2243{
2244 PGMMODE enmGuestMode;
2245
2246 VMCPU_ASSERT_EMT(pVCpu);
2247
2248 /*
2249 * Calc the new guest mode.
2250 */
2251 if (!(cr0 & X86_CR0_PE))
2252 enmGuestMode = PGMMODE_REAL;
2253 else if (!(cr0 & X86_CR0_PG))
2254 enmGuestMode = PGMMODE_PROTECTED;
2255 else if (!(cr4 & X86_CR4_PAE))
2256 {
2257 bool const fPse = !!(cr4 & X86_CR4_PSE);
2258 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2259 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2260 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2261 enmGuestMode = PGMMODE_32_BIT;
2262 }
2263 else if (!(efer & MSR_K6_EFER_LME))
2264 {
2265 if (!(efer & MSR_K6_EFER_NXE))
2266 enmGuestMode = PGMMODE_PAE;
2267 else
2268 enmGuestMode = PGMMODE_PAE_NX;
2269 }
2270 else
2271 {
2272 if (!(efer & MSR_K6_EFER_NXE))
2273 enmGuestMode = PGMMODE_AMD64;
2274 else
2275 enmGuestMode = PGMMODE_AMD64_NX;
2276 }
2277
2278 /*
2279 * Did it change?
2280 */
2281 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2282 return VINF_SUCCESS;
2283
2284 /* Flush the TLB */
2285 PGM_INVL_VCPU_TLBS(pVCpu);
2286
2287#ifdef IN_RING3
2288 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2289#else
2290 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2291 return VINF_PGM_CHANGE_MODE;
2292#endif
2293}
2294
2295
2296/**
2297 * Called by CPUM or REM when CR0.WP changes to 1.
2298 *
2299 * @param pVCpu The cross context virtual CPU structure of the caller.
2300 * @thread EMT
2301 */
2302VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2303{
2304 /*
2305 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2306 *
2307 * Use the counter to judge whether there might be pool pages with active
2308 * hacks in them. If there are, we will be running the risk of messing up
2309 * the guest by allowing it to write to read-only pages. Thus, we have to
2310 * clear the page pool ASAP if there is the slightest chance.
2311 */
2312 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2313 {
2314 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2315
2316 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2317 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2318 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2319 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2320 }
2321}
2322
2323
2324/**
2325 * Gets the current guest paging mode.
2326 *
2327 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2328 *
2329 * @returns The current paging mode.
2330 * @param pVCpu Pointer to the VMCPU.
2331 */
2332VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2333{
2334 return pVCpu->pgm.s.enmGuestMode;
2335}
2336
2337
2338/**
2339 * Gets the current shadow paging mode.
2340 *
2341 * @returns The current paging mode.
2342 * @param pVCpu Pointer to the VMCPU.
2343 */
2344VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2345{
2346 return pVCpu->pgm.s.enmShadowMode;
2347}
2348
2349
2350/**
2351 * Gets the current host paging mode.
2352 *
2353 * @returns The current paging mode.
2354 * @param pVM Pointer to the VM.
2355 */
2356VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2357{
2358 switch (pVM->pgm.s.enmHostMode)
2359 {
2360 case SUPPAGINGMODE_32_BIT:
2361 case SUPPAGINGMODE_32_BIT_GLOBAL:
2362 return PGMMODE_32_BIT;
2363
2364 case SUPPAGINGMODE_PAE:
2365 case SUPPAGINGMODE_PAE_GLOBAL:
2366 return PGMMODE_PAE;
2367
2368 case SUPPAGINGMODE_PAE_NX:
2369 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2370 return PGMMODE_PAE_NX;
2371
2372 case SUPPAGINGMODE_AMD64:
2373 case SUPPAGINGMODE_AMD64_GLOBAL:
2374 return PGMMODE_AMD64;
2375
2376 case SUPPAGINGMODE_AMD64_NX:
2377 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2378 return PGMMODE_AMD64_NX;
2379
2380 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2381 }
2382
2383 return PGMMODE_INVALID;
2384}
2385
2386
2387/**
2388 * Get mode name.
2389 *
2390 * @returns read-only name string.
2391 * @param enmMode The mode which name is desired.
2392 */
2393VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2394{
2395 switch (enmMode)
2396 {
2397 case PGMMODE_REAL: return "Real";
2398 case PGMMODE_PROTECTED: return "Protected";
2399 case PGMMODE_32_BIT: return "32-bit";
2400 case PGMMODE_PAE: return "PAE";
2401 case PGMMODE_PAE_NX: return "PAE+NX";
2402 case PGMMODE_AMD64: return "AMD64";
2403 case PGMMODE_AMD64_NX: return "AMD64+NX";
2404 case PGMMODE_NESTED: return "Nested";
2405 case PGMMODE_EPT: return "EPT";
2406 default: return "unknown mode value";
2407 }
2408}
2409
2410
2411
2412/**
2413 * Notification from CPUM that the EFER.NXE bit has changed.
2414 *
2415 * @param pVCpu The virtual CPU for which EFER changed.
2416 * @param fNxe The new NXE state.
2417 */
2418VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2419{
2420/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2421 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2422
2423 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2424 if (fNxe)
2425 {
2426 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2427 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2428 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2429 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2430 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2431 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2432 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2433 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2434 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2435 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2436 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2437
2438 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2439 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2440 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2441 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2442 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2443 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2444 }
2445 else
2446 {
2447 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2448 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2449 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2450 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2451 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2452 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2453 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2454 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2455 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2456 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2457 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2458
2459 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2460 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2461 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2462 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2463 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2464 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2465 }
2466}
2467
2468
2469/**
2470 * Check if any pgm pool pages are marked dirty (not monitored)
2471 *
2472 * @returns bool locked/not locked
2473 * @param pVM Pointer to the VM.
2474 */
2475VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2476{
2477 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2478}
2479
2480
2481/**
2482 * Check if this VCPU currently owns the PGM lock.
2483 *
2484 * @returns bool owner/not owner
2485 * @param pVM Pointer to the VM.
2486 */
2487VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2488{
2489 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2490}
2491
2492
2493/**
2494 * Enable or disable large page usage
2495 *
2496 * @returns VBox status code.
2497 * @param pVM Pointer to the VM.
2498 * @param fUseLargePages Use/not use large pages
2499 */
2500VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2501{
2502 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2503
2504 pVM->fUseLargePages = fUseLargePages;
2505 return VINF_SUCCESS;
2506}
2507
2508
2509/**
2510 * Acquire the PGM lock.
2511 *
2512 * @returns VBox status code
2513 * @param pVM Pointer to the VM.
2514 */
2515#if defined(VBOX_STRICT) && defined(IN_RING3)
2516int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2517#else
2518int pgmLock(PVM pVM)
2519#endif
2520{
2521#if defined(VBOX_STRICT) && defined(IN_RING3)
2522 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2523#else
2524 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2525#endif
2526#if defined(IN_RC) || defined(IN_RING0)
2527 if (rc == VERR_SEM_BUSY)
2528 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2529#endif
2530 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2531 return rc;
2532}
2533
2534
2535/**
2536 * Release the PGM lock.
2537 *
2538 * @returns VBox status code
2539 * @param pVM Pointer to the VM.
2540 */
2541void pgmUnlock(PVM pVM)
2542{
2543 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2544 pVM->pgm.s.cDeprecatedPageLocks = 0;
2545 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2546 if (rc == VINF_SEM_NESTED)
2547 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2548}
2549
2550#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2551
2552/**
2553 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2554 *
2555 * @returns VBox status code.
2556 * @param pVM Pointer to the VM.
2557 * @param pVCpu The current CPU.
2558 * @param GCPhys The guest physical address of the page to map. The
2559 * offset bits are not ignored.
2560 * @param ppv Where to return the address corresponding to @a GCPhys.
2561 */
2562int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2563{
2564 pgmLock(pVM);
2565
2566 /*
2567 * Convert it to a writable page and it on to the dynamic mapper.
2568 */
2569 int rc;
2570 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2571 if (RT_LIKELY(pPage))
2572 {
2573 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2574 if (RT_SUCCESS(rc))
2575 {
2576 void *pv;
2577 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2578 if (RT_SUCCESS(rc))
2579 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2580 }
2581 else
2582 AssertRC(rc);
2583 }
2584 else
2585 {
2586 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2587 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2588 }
2589
2590 pgmUnlock(pVM);
2591 return rc;
2592}
2593
2594#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2595#if !defined(IN_R0) || defined(LOG_ENABLED)
2596
2597/** Format handler for PGMPAGE.
2598 * @copydoc FNRTSTRFORMATTYPE */
2599static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2600 const char *pszType, void const *pvValue,
2601 int cchWidth, int cchPrecision, unsigned fFlags,
2602 void *pvUser)
2603{
2604 size_t cch;
2605 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2606 if (RT_VALID_PTR(pPage))
2607 {
2608 char szTmp[64+80];
2609
2610 cch = 0;
2611
2612 /* The single char state stuff. */
2613 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2614 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2615
2616#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2617 if (IS_PART_INCLUDED(5))
2618 {
2619 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2620 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2621 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2622 }
2623
2624 /* The type. */
2625 if (IS_PART_INCLUDED(4))
2626 {
2627 szTmp[cch++] = ':';
2628 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2629 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2630 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2631 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2632 }
2633
2634 /* The numbers. */
2635 if (IS_PART_INCLUDED(3))
2636 {
2637 szTmp[cch++] = ':';
2638 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2639 }
2640
2641 if (IS_PART_INCLUDED(2))
2642 {
2643 szTmp[cch++] = ':';
2644 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2645 }
2646
2647 if (IS_PART_INCLUDED(6))
2648 {
2649 szTmp[cch++] = ':';
2650 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2651 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2652 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2653 }
2654#undef IS_PART_INCLUDED
2655
2656 cch = pfnOutput(pvArgOutput, szTmp, cch);
2657 }
2658 else
2659 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2660 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2661 return cch;
2662}
2663
2664
2665/** Format handler for PGMRAMRANGE.
2666 * @copydoc FNRTSTRFORMATTYPE */
2667static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2668 const char *pszType, void const *pvValue,
2669 int cchWidth, int cchPrecision, unsigned fFlags,
2670 void *pvUser)
2671{
2672 size_t cch;
2673 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2674 if (VALID_PTR(pRam))
2675 {
2676 char szTmp[80];
2677 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2678 cch = pfnOutput(pvArgOutput, szTmp, cch);
2679 }
2680 else
2681 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2682 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2683 return cch;
2684}
2685
2686/** Format type andlers to be registered/deregistered. */
2687static const struct
2688{
2689 char szType[24];
2690 PFNRTSTRFORMATTYPE pfnHandler;
2691} g_aPgmFormatTypes[] =
2692{
2693 { "pgmpage", pgmFormatTypeHandlerPage },
2694 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2695};
2696
2697#endif /* !IN_R0 || LOG_ENABLED */
2698
2699/**
2700 * Registers the global string format types.
2701 *
2702 * This should be called at module load time or in some other manner that ensure
2703 * that it's called exactly one time.
2704 *
2705 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2706 */
2707VMMDECL(int) PGMRegisterStringFormatTypes(void)
2708{
2709#if !defined(IN_R0) || defined(LOG_ENABLED)
2710 int rc = VINF_SUCCESS;
2711 unsigned i;
2712 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2713 {
2714 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2715# ifdef IN_RING0
2716 if (rc == VERR_ALREADY_EXISTS)
2717 {
2718 /* in case of cleanup failure in ring-0 */
2719 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2720 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2721 }
2722# endif
2723 }
2724 if (RT_FAILURE(rc))
2725 while (i-- > 0)
2726 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2727
2728 return rc;
2729#else
2730 return VINF_SUCCESS;
2731#endif
2732}
2733
2734
2735/**
2736 * Deregisters the global string format types.
2737 *
2738 * This should be called at module unload time or in some other manner that
2739 * ensure that it's called exactly one time.
2740 */
2741VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2742{
2743#if !defined(IN_R0) || defined(LOG_ENABLED)
2744 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2745 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2746#endif
2747}
2748
2749#ifdef VBOX_STRICT
2750
2751/**
2752 * Asserts that there are no mapping conflicts.
2753 *
2754 * @returns Number of conflicts.
2755 * @param pVM Pointer to the VM.
2756 */
2757VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2758{
2759 unsigned cErrors = 0;
2760
2761 /* Only applies to raw mode -> 1 VPCU */
2762 Assert(pVM->cCpus == 1);
2763 PVMCPU pVCpu = &pVM->aCpus[0];
2764
2765 /*
2766 * Check for mapping conflicts.
2767 */
2768 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2769 pMapping;
2770 pMapping = pMapping->CTX_SUFF(pNext))
2771 {
2772 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2773 for (RTGCPTR GCPtr = pMapping->GCPtr;
2774 GCPtr <= pMapping->GCPtrLast;
2775 GCPtr += PAGE_SIZE)
2776 {
2777 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2778 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2779 {
2780 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2781 cErrors++;
2782 break;
2783 }
2784 }
2785 }
2786
2787 return cErrors;
2788}
2789
2790
2791/**
2792 * Asserts that everything related to the guest CR3 is correctly shadowed.
2793 *
2794 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2795 * and assert the correctness of the guest CR3 mapping before asserting that the
2796 * shadow page tables is in sync with the guest page tables.
2797 *
2798 * @returns Number of conflicts.
2799 * @param pVM Pointer to the VM.
2800 * @param pVCpu Pointer to the VMCPU.
2801 * @param cr3 The current guest CR3 register value.
2802 * @param cr4 The current guest CR4 register value.
2803 */
2804VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2805{
2806 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2807 pgmLock(pVM);
2808 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2809 pgmUnlock(pVM);
2810 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2811 return cErrors;
2812}
2813
2814#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette