VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 61555

Last change on this file since 61555 was 60867, checked in by vboxsync, 9 years ago

PGM: Don't ever drop VERR_PGM_NO_HYPERVISOR_ADDRESS! Will end up with fatal assertion in EIP+ESP prefetch code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 94.5 KB
Line 
1/* $Id: PGMAll.cpp 60867 2016-05-06 20:48:48Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/sup.h>
28#include <VBox/vmm/mm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/csam.h>
31#include <VBox/vmm/patm.h>
32#include <VBox/vmm/trpm.h>
33#ifdef VBOX_WITH_REM
34# include <VBox/vmm/rem.h>
35#endif
36#include <VBox/vmm/em.h>
37#include <VBox/vmm/hm.h>
38#include <VBox/vmm/hm_vmx.h>
39#include "PGMInternal.h"
40#include <VBox/vmm/vm.h>
41#include "PGMInline.h"
42#include <iprt/assert.h>
43#include <iprt/asm-amd64-x86.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*********************************************************************************************************************************
51* Structures and Typedefs *
52*********************************************************************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** Pointer to the VM. */
60 PVM pVM;
61 /** Pointer to the VMCPU. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*********************************************************************************************************************************
71* Internal Functions *
72*********************************************************************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75#ifndef IN_RC
76static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
77static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
78#endif
79
80
81/*
82 * Shadow - 32-bit mode
83 */
84#define PGM_SHW_TYPE PGM_TYPE_32BIT
85#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
86#include "PGMAllShw.h"
87
88/* Guest - real mode */
89#define PGM_GST_TYPE PGM_TYPE_REAL
90#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
91#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
92#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
93#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
94#include "PGMGstDefs.h"
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef BTH_PGMPOOLKIND_ROOT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - protected mode */
104#define PGM_GST_TYPE PGM_TYPE_PROT
105#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
108#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
109#include "PGMGstDefs.h"
110#include "PGMAllGst.h"
111#include "PGMAllBth.h"
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef BTH_PGMPOOLKIND_ROOT
114#undef PGM_BTH_NAME
115#undef PGM_GST_TYPE
116#undef PGM_GST_NAME
117
118/* Guest - 32-bit mode */
119#define PGM_GST_TYPE PGM_TYPE_32BIT
120#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
121#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
122#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
123#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
124#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
125#include "PGMGstDefs.h"
126#include "PGMAllGst.h"
127#include "PGMAllBth.h"
128#undef BTH_PGMPOOLKIND_PT_FOR_BIG
129#undef BTH_PGMPOOLKIND_PT_FOR_PT
130#undef BTH_PGMPOOLKIND_ROOT
131#undef PGM_BTH_NAME
132#undef PGM_GST_TYPE
133#undef PGM_GST_NAME
134
135#undef PGM_SHW_TYPE
136#undef PGM_SHW_NAME
137
138
139/*
140 * Shadow - PAE mode
141 */
142#define PGM_SHW_TYPE PGM_TYPE_PAE
143#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#include "PGMAllShw.h"
146
147/* Guest - real mode */
148#define PGM_GST_TYPE PGM_TYPE_REAL
149#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
150#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
151#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
152#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
153#include "PGMGstDefs.h"
154#include "PGMAllBth.h"
155#undef BTH_PGMPOOLKIND_PT_FOR_PT
156#undef BTH_PGMPOOLKIND_ROOT
157#undef PGM_BTH_NAME
158#undef PGM_GST_TYPE
159#undef PGM_GST_NAME
160
161/* Guest - protected mode */
162#define PGM_GST_TYPE PGM_TYPE_PROT
163#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
164#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
165#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
166#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
167#include "PGMGstDefs.h"
168#include "PGMAllBth.h"
169#undef BTH_PGMPOOLKIND_PT_FOR_PT
170#undef BTH_PGMPOOLKIND_ROOT
171#undef PGM_BTH_NAME
172#undef PGM_GST_TYPE
173#undef PGM_GST_NAME
174
175/* Guest - 32-bit mode */
176#define PGM_GST_TYPE PGM_TYPE_32BIT
177#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
178#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
179#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
180#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
181#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
182#include "PGMGstDefs.h"
183#include "PGMAllBth.h"
184#undef BTH_PGMPOOLKIND_PT_FOR_BIG
185#undef BTH_PGMPOOLKIND_PT_FOR_PT
186#undef BTH_PGMPOOLKIND_ROOT
187#undef PGM_BTH_NAME
188#undef PGM_GST_TYPE
189#undef PGM_GST_NAME
190
191
192/* Guest - PAE mode */
193#define PGM_GST_TYPE PGM_TYPE_PAE
194#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
195#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
196#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
197#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
198#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
199#include "PGMGstDefs.h"
200#include "PGMAllGst.h"
201#include "PGMAllBth.h"
202#undef BTH_PGMPOOLKIND_PT_FOR_BIG
203#undef BTH_PGMPOOLKIND_PT_FOR_PT
204#undef BTH_PGMPOOLKIND_ROOT
205#undef PGM_BTH_NAME
206#undef PGM_GST_TYPE
207#undef PGM_GST_NAME
208
209#undef PGM_SHW_TYPE
210#undef PGM_SHW_NAME
211
212
213#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
214/*
215 * Shadow - AMD64 mode
216 */
217# define PGM_SHW_TYPE PGM_TYPE_AMD64
218# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
219# include "PGMAllShw.h"
220
221/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
222# define PGM_GST_TYPE PGM_TYPE_PROT
223# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
224# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
225# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
226# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
227# include "PGMGstDefs.h"
228# include "PGMAllBth.h"
229# undef BTH_PGMPOOLKIND_PT_FOR_PT
230# undef BTH_PGMPOOLKIND_ROOT
231# undef PGM_BTH_NAME
232# undef PGM_GST_TYPE
233# undef PGM_GST_NAME
234
235# ifdef VBOX_WITH_64_BITS_GUESTS
236/* Guest - AMD64 mode */
237# define PGM_GST_TYPE PGM_TYPE_AMD64
238# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
239# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
240# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
241# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
242# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
243# include "PGMGstDefs.h"
244# include "PGMAllGst.h"
245# include "PGMAllBth.h"
246# undef BTH_PGMPOOLKIND_PT_FOR_BIG
247# undef BTH_PGMPOOLKIND_PT_FOR_PT
248# undef BTH_PGMPOOLKIND_ROOT
249# undef PGM_BTH_NAME
250# undef PGM_GST_TYPE
251# undef PGM_GST_NAME
252# endif /* VBOX_WITH_64_BITS_GUESTS */
253
254# undef PGM_SHW_TYPE
255# undef PGM_SHW_NAME
256
257
258/*
259 * Shadow - Nested paging mode
260 */
261# define PGM_SHW_TYPE PGM_TYPE_NESTED
262# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
263# include "PGMAllShw.h"
264
265/* Guest - real mode */
266# define PGM_GST_TYPE PGM_TYPE_REAL
267# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
268# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
269# include "PGMGstDefs.h"
270# include "PGMAllBth.h"
271# undef PGM_BTH_NAME
272# undef PGM_GST_TYPE
273# undef PGM_GST_NAME
274
275/* Guest - protected mode */
276# define PGM_GST_TYPE PGM_TYPE_PROT
277# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
278# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
279# include "PGMGstDefs.h"
280# include "PGMAllBth.h"
281# undef PGM_BTH_NAME
282# undef PGM_GST_TYPE
283# undef PGM_GST_NAME
284
285/* Guest - 32-bit mode */
286# define PGM_GST_TYPE PGM_TYPE_32BIT
287# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
288# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
289# include "PGMGstDefs.h"
290# include "PGMAllBth.h"
291# undef PGM_BTH_NAME
292# undef PGM_GST_TYPE
293# undef PGM_GST_NAME
294
295/* Guest - PAE mode */
296# define PGM_GST_TYPE PGM_TYPE_PAE
297# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
298# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
299# include "PGMGstDefs.h"
300# include "PGMAllBth.h"
301# undef PGM_BTH_NAME
302# undef PGM_GST_TYPE
303# undef PGM_GST_NAME
304
305# ifdef VBOX_WITH_64_BITS_GUESTS
306/* Guest - AMD64 mode */
307# define PGM_GST_TYPE PGM_TYPE_AMD64
308# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
309# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
310# include "PGMGstDefs.h"
311# include "PGMAllBth.h"
312# undef PGM_BTH_NAME
313# undef PGM_GST_TYPE
314# undef PGM_GST_NAME
315# endif /* VBOX_WITH_64_BITS_GUESTS */
316
317# undef PGM_SHW_TYPE
318# undef PGM_SHW_NAME
319
320
321/*
322 * Shadow - EPT
323 */
324# define PGM_SHW_TYPE PGM_TYPE_EPT
325# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
326# include "PGMAllShw.h"
327
328/* Guest - real mode */
329# define PGM_GST_TYPE PGM_TYPE_REAL
330# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
331# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
332# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
333# include "PGMGstDefs.h"
334# include "PGMAllBth.h"
335# undef BTH_PGMPOOLKIND_PT_FOR_PT
336# undef PGM_BTH_NAME
337# undef PGM_GST_TYPE
338# undef PGM_GST_NAME
339
340/* Guest - protected mode */
341# define PGM_GST_TYPE PGM_TYPE_PROT
342# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
343# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
344# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
345# include "PGMGstDefs.h"
346# include "PGMAllBth.h"
347# undef BTH_PGMPOOLKIND_PT_FOR_PT
348# undef PGM_BTH_NAME
349# undef PGM_GST_TYPE
350# undef PGM_GST_NAME
351
352/* Guest - 32-bit mode */
353# define PGM_GST_TYPE PGM_TYPE_32BIT
354# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
355# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
356# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
357# include "PGMGstDefs.h"
358# include "PGMAllBth.h"
359# undef BTH_PGMPOOLKIND_PT_FOR_PT
360# undef PGM_BTH_NAME
361# undef PGM_GST_TYPE
362# undef PGM_GST_NAME
363
364/* Guest - PAE mode */
365# define PGM_GST_TYPE PGM_TYPE_PAE
366# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
367# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
368# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
369# include "PGMGstDefs.h"
370# include "PGMAllBth.h"
371# undef BTH_PGMPOOLKIND_PT_FOR_PT
372# undef PGM_BTH_NAME
373# undef PGM_GST_TYPE
374# undef PGM_GST_NAME
375
376# ifdef VBOX_WITH_64_BITS_GUESTS
377/* Guest - AMD64 mode */
378# define PGM_GST_TYPE PGM_TYPE_AMD64
379# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
380# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
381# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
382# include "PGMGstDefs.h"
383# include "PGMAllBth.h"
384# undef BTH_PGMPOOLKIND_PT_FOR_PT
385# undef PGM_BTH_NAME
386# undef PGM_GST_TYPE
387# undef PGM_GST_NAME
388# endif /* VBOX_WITH_64_BITS_GUESTS */
389
390# undef PGM_SHW_TYPE
391# undef PGM_SHW_NAME
392
393#endif /* !IN_RC */
394
395
396#ifndef IN_RING3
397/**
398 * #PF Handler.
399 *
400 * @returns VBox status code (appropriate for trap handling and GC return).
401 * @param pVCpu The cross context virtual CPU structure.
402 * @param uErr The trap error code.
403 * @param pRegFrame Trap register frame.
404 * @param pvFault The fault address.
405 */
406VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
407{
408 PVM pVM = pVCpu->CTX_SUFF(pVM);
409
410 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
411 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
412 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
413
414
415#ifdef VBOX_WITH_STATISTICS
416 /*
417 * Error code stats.
418 */
419 if (uErr & X86_TRAP_PF_US)
420 {
421 if (!(uErr & X86_TRAP_PF_P))
422 {
423 if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
425 else
426 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
427 }
428 else if (uErr & X86_TRAP_PF_RW)
429 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
430 else if (uErr & X86_TRAP_PF_RSVD)
431 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
432 else if (uErr & X86_TRAP_PF_ID)
433 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
434 else
435 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
436 }
437 else
438 { /* Supervisor */
439 if (!(uErr & X86_TRAP_PF_P))
440 {
441 if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
443 else
444 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
445 }
446 else if (uErr & X86_TRAP_PF_RW)
447 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
448 else if (uErr & X86_TRAP_PF_ID)
449 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
450 else if (uErr & X86_TRAP_PF_RSVD)
451 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
452 }
453#endif /* VBOX_WITH_STATISTICS */
454
455 /*
456 * Call the worker.
457 */
458 bool fLockTaken = false;
459 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
460 if (fLockTaken)
461 {
462 PGM_LOCK_ASSERT_OWNER(pVM);
463 pgmUnlock(pVM);
464 }
465 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
466
467 /*
468 * Return code tweaks.
469 */
470 if (rc != VINF_SUCCESS)
471 {
472 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
473 rc = VINF_SUCCESS;
474
475# ifdef IN_RING0
476 /* Note: hack alert for difficult to reproduce problem. */
477 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
478 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
479 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
480 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
481 {
482 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
483 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
484 rc = VINF_SUCCESS;
485 }
486# endif
487 }
488
489 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
490 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
491 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
492 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
493 return rc;
494}
495#endif /* !IN_RING3 */
496
497
498/**
499 * Prefetch a page
500 *
501 * Typically used to sync commonly used pages before entering raw mode
502 * after a CR3 reload.
503 *
504 * @returns VBox status code suitable for scheduling.
505 * @retval VINF_SUCCESS on success.
506 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
507 * @param pVCpu The cross context virtual CPU structure.
508 * @param GCPtrPage Page to invalidate.
509 */
510VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
511{
512 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
513 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
514 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
515 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
516 return rc;
517}
518
519
520/**
521 * Gets the mapping corresponding to the specified address (if any).
522 *
523 * @returns Pointer to the mapping.
524 * @returns NULL if not
525 *
526 * @param pVM The cross context VM structure.
527 * @param GCPtr The guest context pointer.
528 */
529PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
530{
531 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
532 while (pMapping)
533 {
534 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
535 break;
536 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
537 return pMapping;
538 pMapping = pMapping->CTX_SUFF(pNext);
539 }
540 return NULL;
541}
542
543
544/**
545 * Verifies a range of pages for read or write access
546 *
547 * Only checks the guest's page tables
548 *
549 * @returns VBox status code.
550 * @param pVCpu The cross context virtual CPU structure.
551 * @param Addr Guest virtual address to check
552 * @param cbSize Access size
553 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
554 * @remarks Current not in use.
555 */
556VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
557{
558 /*
559 * Validate input.
560 */
561 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
562 {
563 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
564 return VERR_INVALID_PARAMETER;
565 }
566
567 uint64_t fPage;
568 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
569 if (RT_FAILURE(rc))
570 {
571 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
572 return VINF_EM_RAW_GUEST_TRAP;
573 }
574
575 /*
576 * Check if the access would cause a page fault
577 *
578 * Note that hypervisor page directories are not present in the guest's tables, so this check
579 * is sufficient.
580 */
581 bool fWrite = !!(fAccess & X86_PTE_RW);
582 bool fUser = !!(fAccess & X86_PTE_US);
583 if ( !(fPage & X86_PTE_P)
584 || (fWrite && !(fPage & X86_PTE_RW))
585 || (fUser && !(fPage & X86_PTE_US)) )
586 {
587 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
588 return VINF_EM_RAW_GUEST_TRAP;
589 }
590 if ( RT_SUCCESS(rc)
591 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
592 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
593 return rc;
594}
595
596
597/**
598 * Verifies a range of pages for read or write access
599 *
600 * Supports handling of pages marked for dirty bit tracking and CSAM
601 *
602 * @returns VBox status code.
603 * @param pVCpu The cross context virtual CPU structure.
604 * @param Addr Guest virtual address to check
605 * @param cbSize Access size
606 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
607 */
608VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
609{
610 PVM pVM = pVCpu->CTX_SUFF(pVM);
611
612 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
613
614 /*
615 * Get going.
616 */
617 uint64_t fPageGst;
618 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
619 if (RT_FAILURE(rc))
620 {
621 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
622 return VINF_EM_RAW_GUEST_TRAP;
623 }
624
625 /*
626 * Check if the access would cause a page fault
627 *
628 * Note that hypervisor page directories are not present in the guest's tables, so this check
629 * is sufficient.
630 */
631 const bool fWrite = !!(fAccess & X86_PTE_RW);
632 const bool fUser = !!(fAccess & X86_PTE_US);
633 if ( !(fPageGst & X86_PTE_P)
634 || (fWrite && !(fPageGst & X86_PTE_RW))
635 || (fUser && !(fPageGst & X86_PTE_US)) )
636 {
637 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
638 return VINF_EM_RAW_GUEST_TRAP;
639 }
640
641 if (!pVM->pgm.s.fNestedPaging)
642 {
643 /*
644 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
645 */
646 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
647 if ( rc == VERR_PAGE_NOT_PRESENT
648 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
649 {
650 /*
651 * Page is not present in our page tables.
652 * Try to sync it!
653 */
654 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
655 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
656 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
657 if (rc != VINF_SUCCESS)
658 return rc;
659 }
660 else
661 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
662 }
663
664#if 0 /* def VBOX_STRICT; triggers too often now */
665 /*
666 * This check is a bit paranoid, but useful.
667 */
668 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
669 uint64_t fPageShw;
670 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
671 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
672 || (fWrite && !(fPageShw & X86_PTE_RW))
673 || (fUser && !(fPageShw & X86_PTE_US)) )
674 {
675 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
676 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
677 return VINF_EM_RAW_GUEST_TRAP;
678 }
679#endif
680
681 if ( RT_SUCCESS(rc)
682 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
683 || Addr + cbSize < Addr))
684 {
685 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
686 for (;;)
687 {
688 Addr += PAGE_SIZE;
689 if (cbSize > PAGE_SIZE)
690 cbSize -= PAGE_SIZE;
691 else
692 cbSize = 1;
693 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
694 if (rc != VINF_SUCCESS)
695 break;
696 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
697 break;
698 }
699 }
700 return rc;
701}
702
703
704/**
705 * Emulation of the invlpg instruction (HC only actually).
706 *
707 * @returns Strict VBox status code, special care required.
708 * @retval VINF_PGM_SYNC_CR3 - handled.
709 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
710 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
711 *
712 * @param pVCpu The cross context virtual CPU structure.
713 * @param GCPtrPage Page to invalidate.
714 *
715 * @remark ASSUMES the page table entry or page directory is valid. Fairly
716 * safe, but there could be edge cases!
717 *
718 * @todo Flush page or page directory only if necessary!
719 * @todo VBOXSTRICTRC
720 */
721VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
722{
723 PVM pVM = pVCpu->CTX_SUFF(pVM);
724 int rc;
725 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
726
727#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
728 /*
729 * Notify the recompiler so it can record this instruction.
730 */
731 REMNotifyInvalidatePage(pVM, GCPtrPage);
732#endif /* !IN_RING3 */
733
734
735#ifdef IN_RC
736 /*
737 * Check for conflicts and pending CR3 monitoring updates.
738 */
739 if (pgmMapAreMappingsFloating(pVM))
740 {
741 if ( pgmGetMapping(pVM, GCPtrPage)
742 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
743 {
744 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
745 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
746 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
747 return VINF_PGM_SYNC_CR3;
748 }
749
750 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
751 {
752 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
753 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
754 return VINF_EM_RAW_EMULATE_INSTR;
755 }
756 }
757#endif /* IN_RC */
758
759 /*
760 * Call paging mode specific worker.
761 */
762 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
763 pgmLock(pVM);
764 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
765 pgmUnlock(pVM);
766 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
767
768#ifdef IN_RING3
769 /*
770 * Check if we have a pending update of the CR3 monitoring.
771 */
772 if ( RT_SUCCESS(rc)
773 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
774 {
775 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
776 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
777 }
778
779# ifdef VBOX_WITH_RAW_MODE
780 /*
781 * Inform CSAM about the flush
782 *
783 * Note: This is to check if monitored pages have been changed; when we implement
784 * callbacks for virtual handlers, this is no longer required.
785 */
786 CSAMR3FlushPage(pVM, GCPtrPage);
787# endif
788#endif /* IN_RING3 */
789
790 /* Ignore all irrelevant error codes. */
791 if ( rc == VERR_PAGE_NOT_PRESENT
792 || rc == VERR_PAGE_TABLE_NOT_PRESENT
793 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
794 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
795 rc = VINF_SUCCESS;
796
797 return rc;
798}
799
800
801/**
802 * Executes an instruction using the interpreter.
803 *
804 * @returns VBox status code (appropriate for trap handling and GC return).
805 * @param pVM The cross context VM structure.
806 * @param pVCpu The cross context virtual CPU structure.
807 * @param pRegFrame Register frame.
808 * @param pvFault Fault address.
809 */
810VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
811{
812 NOREF(pVM);
813 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
814 if (rc == VERR_EM_INTERPRETER)
815 rc = VINF_EM_RAW_EMULATE_INSTR;
816 if (rc != VINF_SUCCESS)
817 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
818 return rc;
819}
820
821
822/**
823 * Gets effective page information (from the VMM page directory).
824 *
825 * @returns VBox status code.
826 * @param pVCpu The cross context virtual CPU structure.
827 * @param GCPtr Guest Context virtual address of the page.
828 * @param pfFlags Where to store the flags. These are X86_PTE_*.
829 * @param pHCPhys Where to store the HC physical address of the page.
830 * This is page aligned.
831 * @remark You should use PGMMapGetPage() for pages in a mapping.
832 */
833VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
834{
835 pgmLock(pVCpu->CTX_SUFF(pVM));
836 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
837 pgmUnlock(pVCpu->CTX_SUFF(pVM));
838 return rc;
839}
840
841
842/**
843 * Modify page flags for a range of pages in the shadow context.
844 *
845 * The existing flags are ANDed with the fMask and ORed with the fFlags.
846 *
847 * @returns VBox status code.
848 * @param pVCpu The cross context virtual CPU structure.
849 * @param GCPtr Virtual address of the first page in the range.
850 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
851 * @param fMask The AND mask - page flags X86_PTE_*.
852 * Be very CAREFUL when ~'ing constants which could be 32-bit!
853 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
854 * @remark You must use PGMMapModifyPage() for pages in a mapping.
855 */
856DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
857{
858 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
859 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
860
861 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
862
863 PVM pVM = pVCpu->CTX_SUFF(pVM);
864 pgmLock(pVM);
865 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
866 pgmUnlock(pVM);
867 return rc;
868}
869
870
871/**
872 * Changing the page flags for a single page in the shadow page tables so as to
873 * make it read-only.
874 *
875 * @returns VBox status code.
876 * @param pVCpu The cross context virtual CPU structure.
877 * @param GCPtr Virtual address of the first page in the range.
878 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
879 */
880VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
881{
882 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
883}
884
885
886/**
887 * Changing the page flags for a single page in the shadow page tables so as to
888 * make it writable.
889 *
890 * The call must know with 101% certainty that the guest page tables maps this
891 * as writable too. This function will deal shared, zero and write monitored
892 * pages.
893 *
894 * @returns VBox status code.
895 * @param pVCpu The cross context virtual CPU structure.
896 * @param GCPtr Virtual address of the first page in the range.
897 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
898 */
899VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
900{
901 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
902}
903
904
905/**
906 * Changing the page flags for a single page in the shadow page tables so as to
907 * make it not present.
908 *
909 * @returns VBox status code.
910 * @param pVCpu The cross context virtual CPU structure.
911 * @param GCPtr Virtual address of the first page in the range.
912 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
913 */
914VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
915{
916 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
917}
918
919
920/**
921 * Changing the page flags for a single page in the shadow page tables so as to
922 * make it supervisor and writable.
923 *
924 * This if for dealing with CR0.WP=0 and readonly user pages.
925 *
926 * @returns VBox status code.
927 * @param pVCpu The cross context virtual CPU structure.
928 * @param GCPtr Virtual address of the first page in the range.
929 * @param fBigPage Whether or not this is a big page. If it is, we have to
930 * change the shadow PDE as well. If it isn't, the caller
931 * has checked that the shadow PDE doesn't need changing.
932 * We ASSUME 4KB pages backing the big page here!
933 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
934 */
935int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
936{
937 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
938 if (rc == VINF_SUCCESS && fBigPage)
939 {
940 /* this is a bit ugly... */
941 switch (pVCpu->pgm.s.enmShadowMode)
942 {
943 case PGMMODE_32_BIT:
944 {
945 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
946 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
947 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
948 pPde->n.u1Write = 1;
949 Log(("-> PDE=%#llx (32)\n", pPde->u));
950 break;
951 }
952 case PGMMODE_PAE:
953 case PGMMODE_PAE_NX:
954 {
955 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
956 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
957 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
958 pPde->n.u1Write = 1;
959 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
960 break;
961 }
962 default:
963 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
964 }
965 }
966 return rc;
967}
968
969
970/**
971 * Gets the shadow page directory for the specified address, PAE.
972 *
973 * @returns Pointer to the shadow PD.
974 * @param pVCpu The cross context virtual CPU structure.
975 * @param GCPtr The address.
976 * @param uGstPdpe Guest PDPT entry. Valid.
977 * @param ppPD Receives address of page directory
978 */
979int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
980{
981 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
982 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
983 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
984 PVM pVM = pVCpu->CTX_SUFF(pVM);
985 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
986 PPGMPOOLPAGE pShwPage;
987 int rc;
988
989 PGM_LOCK_ASSERT_OWNER(pVM);
990
991 /* Allocate page directory if not present. */
992 if ( !pPdpe->n.u1Present
993 && !(pPdpe->u & X86_PDPE_PG_MASK))
994 {
995 RTGCPTR64 GCPdPt;
996 PGMPOOLKIND enmKind;
997
998 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
999 {
1000 /* AMD-V nested paging or real/protected mode without paging. */
1001 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1002 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1003 }
1004 else
1005 {
1006 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1007 {
1008 if (!(uGstPdpe & X86_PDPE_P))
1009 {
1010 /* PD not present; guest must reload CR3 to change it.
1011 * No need to monitor anything in this case.
1012 */
1013 Assert(!HMIsEnabled(pVM));
1014
1015 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1016 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1017 uGstPdpe |= X86_PDPE_P;
1018 }
1019 else
1020 {
1021 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1022 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1023 }
1024 }
1025 else
1026 {
1027 GCPdPt = CPUMGetGuestCR3(pVCpu);
1028 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1029 }
1030 }
1031
1032 /* Create a reference back to the PDPT by using the index in its shadow page. */
1033 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1034 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1035 &pShwPage);
1036 AssertRCReturn(rc, rc);
1037
1038 /* The PD was cached or created; hook it up now. */
1039 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1040
1041# if defined(IN_RC)
1042 /*
1043 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1044 * PDPT entry; the CPU fetches them only during cr3 load, so any
1045 * non-present PDPT will continue to cause page faults.
1046 */
1047 ASMReloadCR3();
1048# endif
1049 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1050 }
1051 else
1052 {
1053 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1054 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1055 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1056
1057 pgmPoolCacheUsed(pPool, pShwPage);
1058 }
1059 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1060 return VINF_SUCCESS;
1061}
1062
1063
1064/**
1065 * Gets the pointer to the shadow page directory entry for an address, PAE.
1066 *
1067 * @returns Pointer to the PDE.
1068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1069 * @param GCPtr The address.
1070 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1071 */
1072DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1073{
1074 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1075 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1076 PVM pVM = pVCpu->CTX_SUFF(pVM);
1077
1078 PGM_LOCK_ASSERT_OWNER(pVM);
1079
1080 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1081 if (!pPdpt->a[iPdPt].n.u1Present)
1082 {
1083 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1084 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1085 }
1086 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1087
1088 /* Fetch the pgm pool shadow descriptor. */
1089 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1090 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1091
1092 *ppShwPde = pShwPde;
1093 return VINF_SUCCESS;
1094}
1095
1096#ifndef IN_RC
1097
1098/**
1099 * Syncs the SHADOW page directory pointer for the specified address.
1100 *
1101 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1102 *
1103 * The caller is responsible for making sure the guest has a valid PD before
1104 * calling this function.
1105 *
1106 * @returns VBox status code.
1107 * @param pVCpu The cross context virtual CPU structure.
1108 * @param GCPtr The address.
1109 * @param uGstPml4e Guest PML4 entry (valid).
1110 * @param uGstPdpe Guest PDPT entry (valid).
1111 * @param ppPD Receives address of page directory
1112 */
1113static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1114{
1115 PVM pVM = pVCpu->CTX_SUFF(pVM);
1116 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1117 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1118 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1119 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1120 PPGMPOOLPAGE pShwPage;
1121 int rc;
1122
1123 PGM_LOCK_ASSERT_OWNER(pVM);
1124
1125 /* Allocate page directory pointer table if not present. */
1126 if ( !pPml4e->n.u1Present
1127 && !(pPml4e->u & X86_PML4E_PG_MASK))
1128 {
1129 RTGCPTR64 GCPml4;
1130 PGMPOOLKIND enmKind;
1131
1132 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1133
1134 if (fNestedPagingOrNoGstPaging)
1135 {
1136 /* AMD-V nested paging or real/protected mode without paging */
1137 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1138 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1139 }
1140 else
1141 {
1142 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1143 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1144 }
1145
1146 /* Create a reference back to the PDPT by using the index in its shadow page. */
1147 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1148 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1149 &pShwPage);
1150 AssertRCReturn(rc, rc);
1151 }
1152 else
1153 {
1154 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1155 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1156
1157 pgmPoolCacheUsed(pPool, pShwPage);
1158 }
1159 /* The PDPT was cached or created; hook it up now. */
1160 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1161
1162 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1163 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1164 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1165
1166 /* Allocate page directory if not present. */
1167 if ( !pPdpe->n.u1Present
1168 && !(pPdpe->u & X86_PDPE_PG_MASK))
1169 {
1170 RTGCPTR64 GCPdPt;
1171 PGMPOOLKIND enmKind;
1172
1173 if (fNestedPagingOrNoGstPaging)
1174 {
1175 /* AMD-V nested paging or real/protected mode without paging */
1176 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1177 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1178 }
1179 else
1180 {
1181 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1182 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1183 }
1184
1185 /* Create a reference back to the PDPT by using the index in its shadow page. */
1186 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1187 pShwPage->idx, iPdPt, false /*fLockPage*/,
1188 &pShwPage);
1189 AssertRCReturn(rc, rc);
1190 }
1191 else
1192 {
1193 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1194 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1195
1196 pgmPoolCacheUsed(pPool, pShwPage);
1197 }
1198 /* The PD was cached or created; hook it up now. */
1199 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1200
1201 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1202 return VINF_SUCCESS;
1203}
1204
1205
1206/**
1207 * Gets the SHADOW page directory pointer for the specified address (long mode).
1208 *
1209 * @returns VBox status code.
1210 * @param pVCpu The cross context virtual CPU structure.
1211 * @param GCPtr The address.
1212 * @param ppPdpt Receives address of pdpt
1213 * @param ppPD Receives address of page directory
1214 */
1215DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1216{
1217 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1218 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1219
1220 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1221
1222 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1223 if (ppPml4e)
1224 *ppPml4e = (PX86PML4E)pPml4e;
1225
1226 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1227
1228 if (!pPml4e->n.u1Present)
1229 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1230
1231 PVM pVM = pVCpu->CTX_SUFF(pVM);
1232 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1233 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1234 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1235
1236 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1237 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1238 if (!pPdpt->a[iPdPt].n.u1Present)
1239 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1240
1241 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1242 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1243
1244 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1245 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1246 return VINF_SUCCESS;
1247}
1248
1249
1250/**
1251 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1252 * backing pages in case the PDPT or PML4 entry is missing.
1253 *
1254 * @returns VBox status code.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr The address.
1257 * @param ppPdpt Receives address of pdpt
1258 * @param ppPD Receives address of page directory
1259 */
1260static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1261{
1262 PVM pVM = pVCpu->CTX_SUFF(pVM);
1263 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1264 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1265 PEPTPML4 pPml4;
1266 PEPTPML4E pPml4e;
1267 PPGMPOOLPAGE pShwPage;
1268 int rc;
1269
1270 Assert(pVM->pgm.s.fNestedPaging);
1271 PGM_LOCK_ASSERT_OWNER(pVM);
1272
1273 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1274 Assert(pPml4);
1275
1276 /* Allocate page directory pointer table if not present. */
1277 pPml4e = &pPml4->a[iPml4];
1278 if ( !pPml4e->n.u1Present
1279 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1280 {
1281 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1282 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1283
1284 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1285 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1286 &pShwPage);
1287 AssertRCReturn(rc, rc);
1288 }
1289 else
1290 {
1291 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1292 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1293
1294 pgmPoolCacheUsed(pPool, pShwPage);
1295 }
1296 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1297 pPml4e->u = pShwPage->Core.Key;
1298 pPml4e->n.u1Present = 1;
1299 pPml4e->n.u1Write = 1;
1300 pPml4e->n.u1Execute = 1;
1301
1302 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1303 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1304 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1305
1306 if (ppPdpt)
1307 *ppPdpt = pPdpt;
1308
1309 /* Allocate page directory if not present. */
1310 if ( !pPdpe->n.u1Present
1311 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1312 {
1313 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1314 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1315 pShwPage->idx, iPdPt, false /*fLockPage*/,
1316 &pShwPage);
1317 AssertRCReturn(rc, rc);
1318 }
1319 else
1320 {
1321 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1322 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1323
1324 pgmPoolCacheUsed(pPool, pShwPage);
1325 }
1326 /* The PD was cached or created; hook it up now and fill with the default value. */
1327 pPdpe->u = pShwPage->Core.Key;
1328 pPdpe->n.u1Present = 1;
1329 pPdpe->n.u1Write = 1;
1330 pPdpe->n.u1Execute = 1;
1331
1332 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1333 return VINF_SUCCESS;
1334}
1335
1336#endif /* IN_RC */
1337
1338#ifdef IN_RING0
1339/**
1340 * Synchronizes a range of nested page table entries.
1341 *
1342 * The caller must own the PGM lock.
1343 *
1344 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1345 * @param GCPhys Where to start.
1346 * @param cPages How many pages which entries should be synced.
1347 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1348 * host paging mode for AMD-V).
1349 */
1350int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1351{
1352 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1353
1354 int rc;
1355 switch (enmShwPagingMode)
1356 {
1357 case PGMMODE_32_BIT:
1358 {
1359 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1360 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1361 break;
1362 }
1363
1364 case PGMMODE_PAE:
1365 case PGMMODE_PAE_NX:
1366 {
1367 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1368 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1369 break;
1370 }
1371
1372 case PGMMODE_AMD64:
1373 case PGMMODE_AMD64_NX:
1374 {
1375 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1376 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1377 break;
1378 }
1379
1380 case PGMMODE_EPT:
1381 {
1382 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1383 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1384 break;
1385 }
1386
1387 default:
1388 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1389 }
1390 return rc;
1391}
1392#endif /* IN_RING0 */
1393
1394
1395/**
1396 * Gets effective Guest OS page information.
1397 *
1398 * When GCPtr is in a big page, the function will return as if it was a normal
1399 * 4KB page. If the need for distinguishing between big and normal page becomes
1400 * necessary at a later point, a PGMGstGetPage() will be created for that
1401 * purpose.
1402 *
1403 * @returns VBox status code.
1404 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1405 * @param GCPtr Guest Context virtual address of the page.
1406 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1407 * @param pGCPhys Where to store the GC physical address of the page.
1408 * This is page aligned. The fact that the
1409 */
1410VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1411{
1412 VMCPU_ASSERT_EMT(pVCpu);
1413 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1414}
1415
1416
1417/**
1418 * Performs a guest page table walk.
1419 *
1420 * The guest should be in paged protect mode or long mode when making a call to
1421 * this function.
1422 *
1423 * @returns VBox status code.
1424 * @retval VINF_SUCCESS on success.
1425 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1426 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1427 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1428 *
1429 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1430 * @param GCPtr The guest virtual address to walk by.
1431 * @param pWalk Where to return the walk result. This is valid on some
1432 * error codes as well.
1433 */
1434int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1435{
1436 VMCPU_ASSERT_EMT(pVCpu);
1437 switch (pVCpu->pgm.s.enmGuestMode)
1438 {
1439 case PGMMODE_32_BIT:
1440 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1441 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1442
1443 case PGMMODE_PAE:
1444 case PGMMODE_PAE_NX:
1445 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1446 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1447
1448#if !defined(IN_RC)
1449 case PGMMODE_AMD64:
1450 case PGMMODE_AMD64_NX:
1451 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1452 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1453#endif
1454
1455 case PGMMODE_REAL:
1456 case PGMMODE_PROTECTED:
1457 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1458 return VERR_PGM_NOT_USED_IN_MODE;
1459
1460#if defined(IN_RC)
1461 case PGMMODE_AMD64:
1462 case PGMMODE_AMD64_NX:
1463#endif
1464 case PGMMODE_NESTED:
1465 case PGMMODE_EPT:
1466 default:
1467 AssertFailed();
1468 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1469 return VERR_PGM_NOT_USED_IN_MODE;
1470 }
1471}
1472
1473
1474/**
1475 * Checks if the page is present.
1476 *
1477 * @returns true if the page is present.
1478 * @returns false if the page is not present.
1479 * @param pVCpu The cross context virtual CPU structure.
1480 * @param GCPtr Address within the page.
1481 */
1482VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1483{
1484 VMCPU_ASSERT_EMT(pVCpu);
1485 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1486 return RT_SUCCESS(rc);
1487}
1488
1489
1490/**
1491 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1492 *
1493 * @returns VBox status code.
1494 * @param pVCpu The cross context virtual CPU structure.
1495 * @param GCPtr The address of the first page.
1496 * @param cb The size of the range in bytes.
1497 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1498 */
1499VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1500{
1501 VMCPU_ASSERT_EMT(pVCpu);
1502 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1503}
1504
1505
1506/**
1507 * Modify page flags for a range of pages in the guest's tables
1508 *
1509 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1510 *
1511 * @returns VBox status code.
1512 * @param pVCpu The cross context virtual CPU structure.
1513 * @param GCPtr Virtual address of the first page in the range.
1514 * @param cb Size (in bytes) of the range to apply the modification to.
1515 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1516 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1517 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1518 */
1519VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1520{
1521 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1522 VMCPU_ASSERT_EMT(pVCpu);
1523
1524 /*
1525 * Validate input.
1526 */
1527 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1528 Assert(cb);
1529
1530 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1531
1532 /*
1533 * Adjust input.
1534 */
1535 cb += GCPtr & PAGE_OFFSET_MASK;
1536 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1537 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1538
1539 /*
1540 * Call worker.
1541 */
1542 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1543
1544 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1545 return rc;
1546}
1547
1548
1549#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1550
1551/**
1552 * Performs the lazy mapping of the 32-bit guest PD.
1553 *
1554 * @returns VBox status code.
1555 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1556 * @param ppPd Where to return the pointer to the mapping. This is
1557 * always set.
1558 */
1559int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1560{
1561 PVM pVM = pVCpu->CTX_SUFF(pVM);
1562 pgmLock(pVM);
1563
1564 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1565
1566 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1567 PPGMPAGE pPage;
1568 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1569 if (RT_SUCCESS(rc))
1570 {
1571 RTHCPTR HCPtrGuestCR3;
1572 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1573 if (RT_SUCCESS(rc))
1574 {
1575 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1576# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1577 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1578# endif
1579 *ppPd = (PX86PD)HCPtrGuestCR3;
1580
1581 pgmUnlock(pVM);
1582 return VINF_SUCCESS;
1583 }
1584
1585 AssertRC(rc);
1586 }
1587 pgmUnlock(pVM);
1588
1589 *ppPd = NULL;
1590 return rc;
1591}
1592
1593
1594/**
1595 * Performs the lazy mapping of the PAE guest PDPT.
1596 *
1597 * @returns VBox status code.
1598 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1599 * @param ppPdpt Where to return the pointer to the mapping. This is
1600 * always set.
1601 */
1602int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1603{
1604 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1605 PVM pVM = pVCpu->CTX_SUFF(pVM);
1606 pgmLock(pVM);
1607
1608 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1609 PPGMPAGE pPage;
1610 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1611 if (RT_SUCCESS(rc))
1612 {
1613 RTHCPTR HCPtrGuestCR3;
1614 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1615 if (RT_SUCCESS(rc))
1616 {
1617 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1618# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1619 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1620# endif
1621 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1622
1623 pgmUnlock(pVM);
1624 return VINF_SUCCESS;
1625 }
1626
1627 AssertRC(rc);
1628 }
1629
1630 pgmUnlock(pVM);
1631 *ppPdpt = NULL;
1632 return rc;
1633}
1634
1635
1636/**
1637 * Performs the lazy mapping / updating of a PAE guest PD.
1638 *
1639 * @returns Pointer to the mapping.
1640 * @returns VBox status code.
1641 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1642 * @param iPdpt Which PD entry to map (0..3).
1643 * @param ppPd Where to return the pointer to the mapping. This is
1644 * always set.
1645 */
1646int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1647{
1648 PVM pVM = pVCpu->CTX_SUFF(pVM);
1649 pgmLock(pVM);
1650
1651 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1652 Assert(pGuestPDPT);
1653 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1654 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1655 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1656
1657 PPGMPAGE pPage;
1658 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1659 if (RT_SUCCESS(rc))
1660 {
1661 RTRCPTR RCPtr = NIL_RTRCPTR;
1662 RTHCPTR HCPtr = NIL_RTHCPTR;
1663#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1664 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1665 AssertRC(rc);
1666#endif
1667 if (RT_SUCCESS(rc) && fChanged)
1668 {
1669 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1670 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1671 }
1672 if (RT_SUCCESS(rc))
1673 {
1674 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1675# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1676 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1677# endif
1678 if (fChanged)
1679 {
1680 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1681 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1682 }
1683
1684 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1685 pgmUnlock(pVM);
1686 return VINF_SUCCESS;
1687 }
1688 }
1689
1690 /* Invalid page or some failure, invalidate the entry. */
1691 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1692 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1693# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1694 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1695# endif
1696 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1697
1698 pgmUnlock(pVM);
1699 return rc;
1700}
1701
1702#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1703#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1704/**
1705 * Performs the lazy mapping of the 32-bit guest PD.
1706 *
1707 * @returns VBox status code.
1708 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1709 * @param ppPml4 Where to return the pointer to the mapping. This will
1710 * always be set.
1711 */
1712int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1713{
1714 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1715 PVM pVM = pVCpu->CTX_SUFF(pVM);
1716 pgmLock(pVM);
1717
1718 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1719 PPGMPAGE pPage;
1720 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1721 if (RT_SUCCESS(rc))
1722 {
1723 RTHCPTR HCPtrGuestCR3;
1724 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1725 if (RT_SUCCESS(rc))
1726 {
1727 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1728# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1729 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1730# endif
1731 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1732
1733 pgmUnlock(pVM);
1734 return VINF_SUCCESS;
1735 }
1736 }
1737
1738 pgmUnlock(pVM);
1739 *ppPml4 = NULL;
1740 return rc;
1741}
1742#endif
1743
1744
1745/**
1746 * Gets the PAE PDPEs values cached by the CPU.
1747 *
1748 * @returns VBox status code.
1749 * @param pVCpu The cross context virtual CPU structure.
1750 * @param paPdpes Where to return the four PDPEs. The array
1751 * pointed to must have 4 entries.
1752 */
1753VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1754{
1755 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1756
1757 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1758 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1759 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1760 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1761 return VINF_SUCCESS;
1762}
1763
1764
1765/**
1766 * Sets the PAE PDPEs values cached by the CPU.
1767 *
1768 * @remarks This must be called *AFTER* PGMUpdateCR3.
1769 *
1770 * @param pVCpu The cross context virtual CPU structure.
1771 * @param paPdpes The four PDPE values. The array pointed to must
1772 * have exactly 4 entries.
1773 *
1774 * @remarks No-long-jump zone!!!
1775 */
1776VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1777{
1778 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1779
1780 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1781 {
1782 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1783 {
1784 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1785
1786 /* Force lazy remapping if it changed in any way. */
1787 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1788# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1789 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1790# endif
1791 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1792 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1793 }
1794 }
1795
1796 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1797}
1798
1799
1800/**
1801 * Gets the current CR3 register value for the shadow memory context.
1802 * @returns CR3 value.
1803 * @param pVCpu The cross context virtual CPU structure.
1804 */
1805VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1806{
1807 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1808 AssertPtrReturn(pPoolPage, 0);
1809 return pPoolPage->Core.Key;
1810}
1811
1812
1813/**
1814 * Gets the current CR3 register value for the nested memory context.
1815 * @returns CR3 value.
1816 * @param pVCpu The cross context virtual CPU structure.
1817 * @param enmShadowMode The shadow paging mode.
1818 */
1819VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1820{
1821 NOREF(enmShadowMode);
1822 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1823 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1824}
1825
1826
1827/**
1828 * Gets the current CR3 register value for the HC intermediate memory context.
1829 * @returns CR3 value.
1830 * @param pVM The cross context VM structure.
1831 */
1832VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1833{
1834 switch (pVM->pgm.s.enmHostMode)
1835 {
1836 case SUPPAGINGMODE_32_BIT:
1837 case SUPPAGINGMODE_32_BIT_GLOBAL:
1838 return pVM->pgm.s.HCPhysInterPD;
1839
1840 case SUPPAGINGMODE_PAE:
1841 case SUPPAGINGMODE_PAE_GLOBAL:
1842 case SUPPAGINGMODE_PAE_NX:
1843 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1844 return pVM->pgm.s.HCPhysInterPaePDPT;
1845
1846 case SUPPAGINGMODE_AMD64:
1847 case SUPPAGINGMODE_AMD64_GLOBAL:
1848 case SUPPAGINGMODE_AMD64_NX:
1849 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1850 return pVM->pgm.s.HCPhysInterPaePDPT;
1851
1852 default:
1853 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1854 return NIL_RTHCPHYS;
1855 }
1856}
1857
1858
1859/**
1860 * Gets the current CR3 register value for the RC intermediate memory context.
1861 * @returns CR3 value.
1862 * @param pVM The cross context VM structure.
1863 * @param pVCpu The cross context virtual CPU structure.
1864 */
1865VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1866{
1867 switch (pVCpu->pgm.s.enmShadowMode)
1868 {
1869 case PGMMODE_32_BIT:
1870 return pVM->pgm.s.HCPhysInterPD;
1871
1872 case PGMMODE_PAE:
1873 case PGMMODE_PAE_NX:
1874 return pVM->pgm.s.HCPhysInterPaePDPT;
1875
1876 case PGMMODE_AMD64:
1877 case PGMMODE_AMD64_NX:
1878 return pVM->pgm.s.HCPhysInterPaePML4;
1879
1880 case PGMMODE_EPT:
1881 case PGMMODE_NESTED:
1882 return 0; /* not relevant */
1883
1884 default:
1885 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1886 return NIL_RTHCPHYS;
1887 }
1888}
1889
1890
1891/**
1892 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1893 * @returns CR3 value.
1894 * @param pVM The cross context VM structure.
1895 */
1896VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1897{
1898 return pVM->pgm.s.HCPhysInterPD;
1899}
1900
1901
1902/**
1903 * Gets the CR3 register value for the PAE intermediate memory context.
1904 * @returns CR3 value.
1905 * @param pVM The cross context VM structure.
1906 */
1907VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1908{
1909 return pVM->pgm.s.HCPhysInterPaePDPT;
1910}
1911
1912
1913/**
1914 * Gets the CR3 register value for the AMD64 intermediate memory context.
1915 * @returns CR3 value.
1916 * @param pVM The cross context VM structure.
1917 */
1918VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1919{
1920 return pVM->pgm.s.HCPhysInterPaePML4;
1921}
1922
1923
1924/**
1925 * Performs and schedules necessary updates following a CR3 load or reload.
1926 *
1927 * This will normally involve mapping the guest PD or nPDPT
1928 *
1929 * @returns VBox status code.
1930 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1931 * safely be ignored and overridden since the FF will be set too then.
1932 * @param pVCpu The cross context virtual CPU structure.
1933 * @param cr3 The new cr3.
1934 * @param fGlobal Indicates whether this is a global flush or not.
1935 */
1936VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1937{
1938 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1939 PVM pVM = pVCpu->CTX_SUFF(pVM);
1940
1941 VMCPU_ASSERT_EMT(pVCpu);
1942
1943 /*
1944 * Always flag the necessary updates; necessary for hardware acceleration
1945 */
1946 /** @todo optimize this, it shouldn't always be necessary. */
1947 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1948 if (fGlobal)
1949 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1950 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1951
1952 /*
1953 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1954 */
1955 int rc = VINF_SUCCESS;
1956 RTGCPHYS GCPhysCR3;
1957 switch (pVCpu->pgm.s.enmGuestMode)
1958 {
1959 case PGMMODE_PAE:
1960 case PGMMODE_PAE_NX:
1961 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1962 break;
1963 case PGMMODE_AMD64:
1964 case PGMMODE_AMD64_NX:
1965 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1966 break;
1967 default:
1968 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1969 break;
1970 }
1971 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1972
1973 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1974 {
1975 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1976 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1977 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1978 if (RT_LIKELY(rc == VINF_SUCCESS))
1979 {
1980 if (pgmMapAreMappingsFloating(pVM))
1981 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1982 }
1983 else
1984 {
1985 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1986 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1987 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1988 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1989 if (pgmMapAreMappingsFloating(pVM))
1990 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1991 }
1992
1993 if (fGlobal)
1994 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1995 else
1996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1997 }
1998 else
1999 {
2000# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2001 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2002 if (pPool->cDirtyPages)
2003 {
2004 pgmLock(pVM);
2005 pgmPoolResetDirtyPages(pVM);
2006 pgmUnlock(pVM);
2007 }
2008# endif
2009 /*
2010 * Check if we have a pending update of the CR3 monitoring.
2011 */
2012 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2013 {
2014 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2015 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2016 }
2017 if (fGlobal)
2018 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2019 else
2020 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2021 }
2022
2023 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2024 return rc;
2025}
2026
2027
2028/**
2029 * Performs and schedules necessary updates following a CR3 load or reload when
2030 * using nested or extended paging.
2031 *
2032 * This API is an alternative to PDMFlushTLB that avoids actually flushing the
2033 * TLB and triggering a SyncCR3.
2034 *
2035 * This will normally involve mapping the guest PD or nPDPT
2036 *
2037 * @returns VBox status code.
2038 * @retval VINF_SUCCESS.
2039 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2040 * paging modes). This can safely be ignored and overridden since the
2041 * FF will be set too then.
2042 * @param pVCpu The cross context virtual CPU structure.
2043 * @param cr3 The new cr3.
2044 */
2045VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2046{
2047 VMCPU_ASSERT_EMT(pVCpu);
2048 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2049
2050 /* We assume we're only called in nested paging mode. */
2051 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2052 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2053 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2054
2055 /*
2056 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2057 */
2058 int rc = VINF_SUCCESS;
2059 RTGCPHYS GCPhysCR3;
2060 switch (pVCpu->pgm.s.enmGuestMode)
2061 {
2062 case PGMMODE_PAE:
2063 case PGMMODE_PAE_NX:
2064 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2065 break;
2066 case PGMMODE_AMD64:
2067 case PGMMODE_AMD64_NX:
2068 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2069 break;
2070 default:
2071 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2072 break;
2073 }
2074 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2075
2076 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2077 {
2078 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2079 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2080 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2081 }
2082
2083 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2084 return rc;
2085}
2086
2087
2088/**
2089 * Synchronize the paging structures.
2090 *
2091 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2092 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2093 * in several places, most importantly whenever the CR3 is loaded.
2094 *
2095 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2096 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2097 * the VMM into guest context.
2098 * @param pVCpu The cross context virtual CPU structure.
2099 * @param cr0 Guest context CR0 register
2100 * @param cr3 Guest context CR3 register
2101 * @param cr4 Guest context CR4 register
2102 * @param fGlobal Including global page directories or not
2103 */
2104VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2105{
2106 int rc;
2107
2108 VMCPU_ASSERT_EMT(pVCpu);
2109
2110 /*
2111 * The pool may have pending stuff and even require a return to ring-3 to
2112 * clear the whole thing.
2113 */
2114 rc = pgmPoolSyncCR3(pVCpu);
2115 if (rc != VINF_SUCCESS)
2116 return rc;
2117
2118 /*
2119 * We might be called when we shouldn't.
2120 *
2121 * The mode switching will ensure that the PD is resynced after every mode
2122 * switch. So, if we find ourselves here when in protected or real mode
2123 * we can safely clear the FF and return immediately.
2124 */
2125 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2126 {
2127 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2128 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2129 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2130 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2131 return VINF_SUCCESS;
2132 }
2133
2134 /* If global pages are not supported, then all flushes are global. */
2135 if (!(cr4 & X86_CR4_PGE))
2136 fGlobal = true;
2137 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2138 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2139
2140 /*
2141 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2142 * This should be done before SyncCR3.
2143 */
2144 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2145 {
2146 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2147
2148 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2149 RTGCPHYS GCPhysCR3;
2150 switch (pVCpu->pgm.s.enmGuestMode)
2151 {
2152 case PGMMODE_PAE:
2153 case PGMMODE_PAE_NX:
2154 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2155 break;
2156 case PGMMODE_AMD64:
2157 case PGMMODE_AMD64_NX:
2158 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2159 break;
2160 default:
2161 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2162 break;
2163 }
2164 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2165
2166 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2167 {
2168 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2169 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2170 }
2171
2172 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2173 if ( rc == VINF_PGM_SYNC_CR3
2174 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2175 {
2176 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2177#ifdef IN_RING3
2178 rc = pgmPoolSyncCR3(pVCpu);
2179#else
2180 if (rc == VINF_PGM_SYNC_CR3)
2181 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2182 return VINF_PGM_SYNC_CR3;
2183#endif
2184 }
2185 AssertRCReturn(rc, rc);
2186 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2187 }
2188
2189 /*
2190 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2191 */
2192 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2193 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2194 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2195 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2196 if (rc == VINF_SUCCESS)
2197 {
2198 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2199 {
2200 /* Go back to ring 3 if a pgm pool sync is again pending. */
2201 return VINF_PGM_SYNC_CR3;
2202 }
2203
2204 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2205 {
2206 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2207 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2208 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2209 }
2210
2211 /*
2212 * Check if we have a pending update of the CR3 monitoring.
2213 */
2214 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2215 {
2216 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2217 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2218 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2219 }
2220 }
2221
2222 /*
2223 * Now flush the CR3 (guest context).
2224 */
2225 if (rc == VINF_SUCCESS)
2226 PGM_INVL_VCPU_TLBS(pVCpu);
2227 return rc;
2228}
2229
2230
2231/**
2232 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2233 *
2234 * @returns VBox status code, with the following informational code for
2235 * VM scheduling.
2236 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2237 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2238 * (I.e. not in R3.)
2239 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2240 *
2241 * @param pVCpu The cross context virtual CPU structure.
2242 * @param cr0 The new cr0.
2243 * @param cr4 The new cr4.
2244 * @param efer The new extended feature enable register.
2245 */
2246VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2247{
2248 PGMMODE enmGuestMode;
2249
2250 VMCPU_ASSERT_EMT(pVCpu);
2251
2252 /*
2253 * Calc the new guest mode.
2254 */
2255 if (!(cr0 & X86_CR0_PE))
2256 enmGuestMode = PGMMODE_REAL;
2257 else if (!(cr0 & X86_CR0_PG))
2258 enmGuestMode = PGMMODE_PROTECTED;
2259 else if (!(cr4 & X86_CR4_PAE))
2260 {
2261 bool const fPse = !!(cr4 & X86_CR4_PSE);
2262 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2263 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2264 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2265 enmGuestMode = PGMMODE_32_BIT;
2266 }
2267 else if (!(efer & MSR_K6_EFER_LME))
2268 {
2269 if (!(efer & MSR_K6_EFER_NXE))
2270 enmGuestMode = PGMMODE_PAE;
2271 else
2272 enmGuestMode = PGMMODE_PAE_NX;
2273 }
2274 else
2275 {
2276 if (!(efer & MSR_K6_EFER_NXE))
2277 enmGuestMode = PGMMODE_AMD64;
2278 else
2279 enmGuestMode = PGMMODE_AMD64_NX;
2280 }
2281
2282 /*
2283 * Did it change?
2284 */
2285 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2286 return VINF_SUCCESS;
2287
2288 /* Flush the TLB */
2289 PGM_INVL_VCPU_TLBS(pVCpu);
2290
2291#ifdef IN_RING3
2292 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2293#else
2294 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2295 return VINF_PGM_CHANGE_MODE;
2296#endif
2297}
2298
2299
2300/**
2301 * Called by CPUM or REM when CR0.WP changes to 1.
2302 *
2303 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2304 * @thread EMT
2305 */
2306VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2307{
2308 /*
2309 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2310 *
2311 * Use the counter to judge whether there might be pool pages with active
2312 * hacks in them. If there are, we will be running the risk of messing up
2313 * the guest by allowing it to write to read-only pages. Thus, we have to
2314 * clear the page pool ASAP if there is the slightest chance.
2315 */
2316 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2317 {
2318 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2319
2320 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2321 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2322 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2323 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2324 }
2325}
2326
2327
2328/**
2329 * Gets the current guest paging mode.
2330 *
2331 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2332 *
2333 * @returns The current paging mode.
2334 * @param pVCpu The cross context virtual CPU structure.
2335 */
2336VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2337{
2338 return pVCpu->pgm.s.enmGuestMode;
2339}
2340
2341
2342/**
2343 * Gets the current shadow paging mode.
2344 *
2345 * @returns The current paging mode.
2346 * @param pVCpu The cross context virtual CPU structure.
2347 */
2348VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2349{
2350 return pVCpu->pgm.s.enmShadowMode;
2351}
2352
2353
2354/**
2355 * Gets the current host paging mode.
2356 *
2357 * @returns The current paging mode.
2358 * @param pVM The cross context VM structure.
2359 */
2360VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2361{
2362 switch (pVM->pgm.s.enmHostMode)
2363 {
2364 case SUPPAGINGMODE_32_BIT:
2365 case SUPPAGINGMODE_32_BIT_GLOBAL:
2366 return PGMMODE_32_BIT;
2367
2368 case SUPPAGINGMODE_PAE:
2369 case SUPPAGINGMODE_PAE_GLOBAL:
2370 return PGMMODE_PAE;
2371
2372 case SUPPAGINGMODE_PAE_NX:
2373 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2374 return PGMMODE_PAE_NX;
2375
2376 case SUPPAGINGMODE_AMD64:
2377 case SUPPAGINGMODE_AMD64_GLOBAL:
2378 return PGMMODE_AMD64;
2379
2380 case SUPPAGINGMODE_AMD64_NX:
2381 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2382 return PGMMODE_AMD64_NX;
2383
2384 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2385 }
2386
2387 return PGMMODE_INVALID;
2388}
2389
2390
2391/**
2392 * Get mode name.
2393 *
2394 * @returns read-only name string.
2395 * @param enmMode The mode which name is desired.
2396 */
2397VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2398{
2399 switch (enmMode)
2400 {
2401 case PGMMODE_REAL: return "Real";
2402 case PGMMODE_PROTECTED: return "Protected";
2403 case PGMMODE_32_BIT: return "32-bit";
2404 case PGMMODE_PAE: return "PAE";
2405 case PGMMODE_PAE_NX: return "PAE+NX";
2406 case PGMMODE_AMD64: return "AMD64";
2407 case PGMMODE_AMD64_NX: return "AMD64+NX";
2408 case PGMMODE_NESTED: return "Nested";
2409 case PGMMODE_EPT: return "EPT";
2410 default: return "unknown mode value";
2411 }
2412}
2413
2414
2415
2416/**
2417 * Notification from CPUM that the EFER.NXE bit has changed.
2418 *
2419 * @param pVCpu The cross context virtual CPU structure of the CPU for
2420 * which EFER changed.
2421 * @param fNxe The new NXE state.
2422 */
2423VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2424{
2425/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2426 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2427
2428 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2429 if (fNxe)
2430 {
2431 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2432 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2433 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2434 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2435 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2436 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2437 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2438 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2439 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2440 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2441 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2442
2443 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2444 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2445 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2446 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2447 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2448 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2449 }
2450 else
2451 {
2452 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2453 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2454 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2455 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2456 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2457 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2458 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2459 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2460 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2461 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2462 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2463
2464 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2465 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2466 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2467 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2468 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2469 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2470 }
2471}
2472
2473
2474/**
2475 * Check if any pgm pool pages are marked dirty (not monitored)
2476 *
2477 * @returns bool locked/not locked
2478 * @param pVM The cross context VM structure.
2479 */
2480VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2481{
2482 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2483}
2484
2485
2486/**
2487 * Check if this VCPU currently owns the PGM lock.
2488 *
2489 * @returns bool owner/not owner
2490 * @param pVM The cross context VM structure.
2491 */
2492VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2493{
2494 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2495}
2496
2497
2498/**
2499 * Enable or disable large page usage
2500 *
2501 * @returns VBox status code.
2502 * @param pVM The cross context VM structure.
2503 * @param fUseLargePages Use/not use large pages
2504 */
2505VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2506{
2507 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2508
2509 pVM->fUseLargePages = fUseLargePages;
2510 return VINF_SUCCESS;
2511}
2512
2513
2514/**
2515 * Acquire the PGM lock.
2516 *
2517 * @returns VBox status code
2518 * @param pVM The cross context VM structure.
2519 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2520 */
2521#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
2522int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2523#else
2524int pgmLock(PVM pVM)
2525#endif
2526{
2527#if defined(VBOX_STRICT) && defined(IN_RING3)
2528 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2529#else
2530 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2531#endif
2532#if defined(IN_RC) || defined(IN_RING0)
2533 if (rc == VERR_SEM_BUSY)
2534 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2535#endif
2536 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2537 return rc;
2538}
2539
2540
2541/**
2542 * Release the PGM lock.
2543 *
2544 * @returns VBox status code
2545 * @param pVM The cross context VM structure.
2546 */
2547void pgmUnlock(PVM pVM)
2548{
2549 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2550 pVM->pgm.s.cDeprecatedPageLocks = 0;
2551 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2552 if (rc == VINF_SEM_NESTED)
2553 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2554}
2555
2556#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2557
2558/**
2559 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2560 *
2561 * @returns VBox status code.
2562 * @param pVM The cross context VM structure.
2563 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2564 * @param GCPhys The guest physical address of the page to map. The
2565 * offset bits are not ignored.
2566 * @param ppv Where to return the address corresponding to @a GCPhys.
2567 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2568 */
2569int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2570{
2571 pgmLock(pVM);
2572
2573 /*
2574 * Convert it to a writable page and it on to the dynamic mapper.
2575 */
2576 int rc;
2577 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2578 if (RT_LIKELY(pPage))
2579 {
2580 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2581 if (RT_SUCCESS(rc))
2582 {
2583 void *pv;
2584 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2585 if (RT_SUCCESS(rc))
2586 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2587 }
2588 else
2589 AssertRC(rc);
2590 }
2591 else
2592 {
2593 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2594 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2595 }
2596
2597 pgmUnlock(pVM);
2598 return rc;
2599}
2600
2601#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2602#if !defined(IN_R0) || defined(LOG_ENABLED)
2603
2604/** Format handler for PGMPAGE.
2605 * @copydoc FNRTSTRFORMATTYPE */
2606static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2607 const char *pszType, void const *pvValue,
2608 int cchWidth, int cchPrecision, unsigned fFlags,
2609 void *pvUser)
2610{
2611 size_t cch;
2612 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2613 if (RT_VALID_PTR(pPage))
2614 {
2615 char szTmp[64+80];
2616
2617 cch = 0;
2618
2619 /* The single char state stuff. */
2620 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2621 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2622
2623#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2624 if (IS_PART_INCLUDED(5))
2625 {
2626 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2627 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2628 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2629 }
2630
2631 /* The type. */
2632 if (IS_PART_INCLUDED(4))
2633 {
2634 szTmp[cch++] = ':';
2635 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2636 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2637 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2638 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2639 }
2640
2641 /* The numbers. */
2642 if (IS_PART_INCLUDED(3))
2643 {
2644 szTmp[cch++] = ':';
2645 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2646 }
2647
2648 if (IS_PART_INCLUDED(2))
2649 {
2650 szTmp[cch++] = ':';
2651 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2652 }
2653
2654 if (IS_PART_INCLUDED(6))
2655 {
2656 szTmp[cch++] = ':';
2657 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2658 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2659 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2660 }
2661#undef IS_PART_INCLUDED
2662
2663 cch = pfnOutput(pvArgOutput, szTmp, cch);
2664 }
2665 else
2666 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2667 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2668 return cch;
2669}
2670
2671
2672/** Format handler for PGMRAMRANGE.
2673 * @copydoc FNRTSTRFORMATTYPE */
2674static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2675 const char *pszType, void const *pvValue,
2676 int cchWidth, int cchPrecision, unsigned fFlags,
2677 void *pvUser)
2678{
2679 size_t cch;
2680 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2681 if (VALID_PTR(pRam))
2682 {
2683 char szTmp[80];
2684 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2685 cch = pfnOutput(pvArgOutput, szTmp, cch);
2686 }
2687 else
2688 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2689 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2690 return cch;
2691}
2692
2693/** Format type andlers to be registered/deregistered. */
2694static const struct
2695{
2696 char szType[24];
2697 PFNRTSTRFORMATTYPE pfnHandler;
2698} g_aPgmFormatTypes[] =
2699{
2700 { "pgmpage", pgmFormatTypeHandlerPage },
2701 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2702};
2703
2704#endif /* !IN_R0 || LOG_ENABLED */
2705
2706/**
2707 * Registers the global string format types.
2708 *
2709 * This should be called at module load time or in some other manner that ensure
2710 * that it's called exactly one time.
2711 *
2712 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2713 */
2714VMMDECL(int) PGMRegisterStringFormatTypes(void)
2715{
2716#if !defined(IN_R0) || defined(LOG_ENABLED)
2717 int rc = VINF_SUCCESS;
2718 unsigned i;
2719 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2720 {
2721 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2722# ifdef IN_RING0
2723 if (rc == VERR_ALREADY_EXISTS)
2724 {
2725 /* in case of cleanup failure in ring-0 */
2726 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2727 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2728 }
2729# endif
2730 }
2731 if (RT_FAILURE(rc))
2732 while (i-- > 0)
2733 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2734
2735 return rc;
2736#else
2737 return VINF_SUCCESS;
2738#endif
2739}
2740
2741
2742/**
2743 * Deregisters the global string format types.
2744 *
2745 * This should be called at module unload time or in some other manner that
2746 * ensure that it's called exactly one time.
2747 */
2748VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2749{
2750#if !defined(IN_R0) || defined(LOG_ENABLED)
2751 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2752 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2753#endif
2754}
2755
2756#ifdef VBOX_STRICT
2757
2758/**
2759 * Asserts that there are no mapping conflicts.
2760 *
2761 * @returns Number of conflicts.
2762 * @param pVM The cross context VM structure.
2763 */
2764VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2765{
2766 unsigned cErrors = 0;
2767
2768 /* Only applies to raw mode -> 1 VPCU */
2769 Assert(pVM->cCpus == 1);
2770 PVMCPU pVCpu = &pVM->aCpus[0];
2771
2772 /*
2773 * Check for mapping conflicts.
2774 */
2775 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2776 pMapping;
2777 pMapping = pMapping->CTX_SUFF(pNext))
2778 {
2779 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2780 for (RTGCPTR GCPtr = pMapping->GCPtr;
2781 GCPtr <= pMapping->GCPtrLast;
2782 GCPtr += PAGE_SIZE)
2783 {
2784 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2785 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2786 {
2787 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2788 cErrors++;
2789 break;
2790 }
2791 }
2792 }
2793
2794 return cErrors;
2795}
2796
2797
2798/**
2799 * Asserts that everything related to the guest CR3 is correctly shadowed.
2800 *
2801 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2802 * and assert the correctness of the guest CR3 mapping before asserting that the
2803 * shadow page tables is in sync with the guest page tables.
2804 *
2805 * @returns Number of conflicts.
2806 * @param pVM The cross context VM structure.
2807 * @param pVCpu The cross context virtual CPU structure.
2808 * @param cr3 The current guest CR3 register value.
2809 * @param cr4 The current guest CR4 register value.
2810 */
2811VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2812{
2813 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2814 pgmLock(pVM);
2815 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2816 pgmUnlock(pVM);
2817 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2818 return cErrors;
2819}
2820
2821#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette