VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 68228

Last change on this file since 68228 was 67522, checked in by vboxsync, 7 years ago

VMM/PGM: typo.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 94.6 KB
Line 
1/* $Id: PGMAll.cpp 67522 2017-06-21 07:26:56Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/csam.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/trpm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/em.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vmm/vm.h>
42#include "PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*********************************************************************************************************************************
52* Structures and Typedefs *
53*********************************************************************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** Pointer to the VM. */
61 PVM pVM;
62 /** Pointer to the VMCPU. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*********************************************************************************************************************************
72* Internal Functions *
73*********************************************************************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76#ifndef IN_RC
77static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
78static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
79#endif
80
81
82/*
83 * Shadow - 32-bit mode
84 */
85#define PGM_SHW_TYPE PGM_TYPE_32BIT
86#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
87#include "PGMAllShw.h"
88
89/* Guest - real mode */
90#define PGM_GST_TYPE PGM_TYPE_REAL
91#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
92#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
93#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
94#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
95#include "PGMGstDefs.h"
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef BTH_PGMPOOLKIND_ROOT
100#undef PGM_BTH_NAME
101#undef PGM_GST_TYPE
102#undef PGM_GST_NAME
103
104/* Guest - protected mode */
105#define PGM_GST_TYPE PGM_TYPE_PROT
106#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
107#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
108#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
109#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
110#include "PGMGstDefs.h"
111#include "PGMAllGst.h"
112#include "PGMAllBth.h"
113#undef BTH_PGMPOOLKIND_PT_FOR_PT
114#undef BTH_PGMPOOLKIND_ROOT
115#undef PGM_BTH_NAME
116#undef PGM_GST_TYPE
117#undef PGM_GST_NAME
118
119/* Guest - 32-bit mode */
120#define PGM_GST_TYPE PGM_TYPE_32BIT
121#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
123#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
124#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
125#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
126#include "PGMGstDefs.h"
127#include "PGMAllGst.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_BIG
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136#undef PGM_SHW_TYPE
137#undef PGM_SHW_NAME
138
139
140/*
141 * Shadow - PAE mode
142 */
143#define PGM_SHW_TYPE PGM_TYPE_PAE
144#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#include "PGMAllShw.h"
147
148/* Guest - real mode */
149#define PGM_GST_TYPE PGM_TYPE_REAL
150#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
153#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
154#include "PGMGstDefs.h"
155#include "PGMAllBth.h"
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef BTH_PGMPOOLKIND_ROOT
158#undef PGM_BTH_NAME
159#undef PGM_GST_TYPE
160#undef PGM_GST_NAME
161
162/* Guest - protected mode */
163#define PGM_GST_TYPE PGM_TYPE_PROT
164#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
167#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
168#include "PGMGstDefs.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef BTH_PGMPOOLKIND_ROOT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176/* Guest - 32-bit mode */
177#define PGM_GST_TYPE PGM_TYPE_32BIT
178#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
179#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
180#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
181#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
182#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
183#include "PGMGstDefs.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef BTH_PGMPOOLKIND_ROOT
188#undef PGM_BTH_NAME
189#undef PGM_GST_TYPE
190#undef PGM_GST_NAME
191
192
193/* Guest - PAE mode */
194#define PGM_GST_TYPE PGM_TYPE_PAE
195#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
196#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
197#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
198#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
199#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
200#include "PGMGstDefs.h"
201#include "PGMAllGst.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_BIG
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#undef PGM_SHW_TYPE
211#undef PGM_SHW_NAME
212
213
214#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
215/*
216 * Shadow - AMD64 mode
217 */
218# define PGM_SHW_TYPE PGM_TYPE_AMD64
219# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
220# include "PGMAllShw.h"
221
222/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
223# define PGM_GST_TYPE PGM_TYPE_PROT
224# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
225# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
226# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
227# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
228# include "PGMGstDefs.h"
229# include "PGMAllBth.h"
230# undef BTH_PGMPOOLKIND_PT_FOR_PT
231# undef BTH_PGMPOOLKIND_ROOT
232# undef PGM_BTH_NAME
233# undef PGM_GST_TYPE
234# undef PGM_GST_NAME
235
236# ifdef VBOX_WITH_64_BITS_GUESTS
237/* Guest - AMD64 mode */
238# define PGM_GST_TYPE PGM_TYPE_AMD64
239# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
240# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
241# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
242# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
243# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
244# include "PGMGstDefs.h"
245# include "PGMAllGst.h"
246# include "PGMAllBth.h"
247# undef BTH_PGMPOOLKIND_PT_FOR_BIG
248# undef BTH_PGMPOOLKIND_PT_FOR_PT
249# undef BTH_PGMPOOLKIND_ROOT
250# undef PGM_BTH_NAME
251# undef PGM_GST_TYPE
252# undef PGM_GST_NAME
253# endif /* VBOX_WITH_64_BITS_GUESTS */
254
255# undef PGM_SHW_TYPE
256# undef PGM_SHW_NAME
257
258
259/*
260 * Shadow - Nested paging mode
261 */
262# define PGM_SHW_TYPE PGM_TYPE_NESTED
263# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
264# include "PGMAllShw.h"
265
266/* Guest - real mode */
267# define PGM_GST_TYPE PGM_TYPE_REAL
268# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
269# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
270# include "PGMGstDefs.h"
271# include "PGMAllBth.h"
272# undef PGM_BTH_NAME
273# undef PGM_GST_TYPE
274# undef PGM_GST_NAME
275
276/* Guest - protected mode */
277# define PGM_GST_TYPE PGM_TYPE_PROT
278# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
279# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
280# include "PGMGstDefs.h"
281# include "PGMAllBth.h"
282# undef PGM_BTH_NAME
283# undef PGM_GST_TYPE
284# undef PGM_GST_NAME
285
286/* Guest - 32-bit mode */
287# define PGM_GST_TYPE PGM_TYPE_32BIT
288# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
289# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
290# include "PGMGstDefs.h"
291# include "PGMAllBth.h"
292# undef PGM_BTH_NAME
293# undef PGM_GST_TYPE
294# undef PGM_GST_NAME
295
296/* Guest - PAE mode */
297# define PGM_GST_TYPE PGM_TYPE_PAE
298# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
299# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
300# include "PGMGstDefs.h"
301# include "PGMAllBth.h"
302# undef PGM_BTH_NAME
303# undef PGM_GST_TYPE
304# undef PGM_GST_NAME
305
306# ifdef VBOX_WITH_64_BITS_GUESTS
307/* Guest - AMD64 mode */
308# define PGM_GST_TYPE PGM_TYPE_AMD64
309# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
310# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
311# include "PGMGstDefs.h"
312# include "PGMAllBth.h"
313# undef PGM_BTH_NAME
314# undef PGM_GST_TYPE
315# undef PGM_GST_NAME
316# endif /* VBOX_WITH_64_BITS_GUESTS */
317
318# undef PGM_SHW_TYPE
319# undef PGM_SHW_NAME
320
321
322/*
323 * Shadow - EPT
324 */
325# define PGM_SHW_TYPE PGM_TYPE_EPT
326# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
327# include "PGMAllShw.h"
328
329/* Guest - real mode */
330# define PGM_GST_TYPE PGM_TYPE_REAL
331# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
332# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
333# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
334# include "PGMGstDefs.h"
335# include "PGMAllBth.h"
336# undef BTH_PGMPOOLKIND_PT_FOR_PT
337# undef PGM_BTH_NAME
338# undef PGM_GST_TYPE
339# undef PGM_GST_NAME
340
341/* Guest - protected mode */
342# define PGM_GST_TYPE PGM_TYPE_PROT
343# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
344# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
345# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
346# include "PGMGstDefs.h"
347# include "PGMAllBth.h"
348# undef BTH_PGMPOOLKIND_PT_FOR_PT
349# undef PGM_BTH_NAME
350# undef PGM_GST_TYPE
351# undef PGM_GST_NAME
352
353/* Guest - 32-bit mode */
354# define PGM_GST_TYPE PGM_TYPE_32BIT
355# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
356# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
357# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
358# include "PGMGstDefs.h"
359# include "PGMAllBth.h"
360# undef BTH_PGMPOOLKIND_PT_FOR_PT
361# undef PGM_BTH_NAME
362# undef PGM_GST_TYPE
363# undef PGM_GST_NAME
364
365/* Guest - PAE mode */
366# define PGM_GST_TYPE PGM_TYPE_PAE
367# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
368# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
369# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
370# include "PGMGstDefs.h"
371# include "PGMAllBth.h"
372# undef BTH_PGMPOOLKIND_PT_FOR_PT
373# undef PGM_BTH_NAME
374# undef PGM_GST_TYPE
375# undef PGM_GST_NAME
376
377# ifdef VBOX_WITH_64_BITS_GUESTS
378/* Guest - AMD64 mode */
379# define PGM_GST_TYPE PGM_TYPE_AMD64
380# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
381# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
382# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
383# include "PGMGstDefs.h"
384# include "PGMAllBth.h"
385# undef BTH_PGMPOOLKIND_PT_FOR_PT
386# undef PGM_BTH_NAME
387# undef PGM_GST_TYPE
388# undef PGM_GST_NAME
389# endif /* VBOX_WITH_64_BITS_GUESTS */
390
391# undef PGM_SHW_TYPE
392# undef PGM_SHW_NAME
393
394#endif /* !IN_RC */
395
396
397#ifndef IN_RING3
398/**
399 * #PF Handler.
400 *
401 * @returns VBox status code (appropriate for trap handling and GC return).
402 * @param pVCpu The cross context virtual CPU structure.
403 * @param uErr The trap error code.
404 * @param pRegFrame Trap register frame.
405 * @param pvFault The fault address.
406 */
407VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
408{
409 PVM pVM = pVCpu->CTX_SUFF(pVM);
410
411 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
412 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
413 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
414
415
416#ifdef VBOX_WITH_STATISTICS
417 /*
418 * Error code stats.
419 */
420 if (uErr & X86_TRAP_PF_US)
421 {
422 if (!(uErr & X86_TRAP_PF_P))
423 {
424 if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
426 else
427 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
428 }
429 else if (uErr & X86_TRAP_PF_RW)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
431 else if (uErr & X86_TRAP_PF_RSVD)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
433 else if (uErr & X86_TRAP_PF_ID)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
435 else
436 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
437 }
438 else
439 { /* Supervisor */
440 if (!(uErr & X86_TRAP_PF_P))
441 {
442 if (uErr & X86_TRAP_PF_RW)
443 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
444 else
445 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
446 }
447 else if (uErr & X86_TRAP_PF_RW)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
449 else if (uErr & X86_TRAP_PF_ID)
450 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
451 else if (uErr & X86_TRAP_PF_RSVD)
452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
453 }
454#endif /* VBOX_WITH_STATISTICS */
455
456 /*
457 * Call the worker.
458 */
459 bool fLockTaken = false;
460 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
461 if (fLockTaken)
462 {
463 PGM_LOCK_ASSERT_OWNER(pVM);
464 pgmUnlock(pVM);
465 }
466 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
467
468 /*
469 * Return code tweaks.
470 */
471 if (rc != VINF_SUCCESS)
472 {
473 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
474 rc = VINF_SUCCESS;
475
476# ifdef IN_RING0
477 /* Note: hack alert for difficult to reproduce problem. */
478 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
479 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
480 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
481 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
482 {
483 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
484 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
485 rc = VINF_SUCCESS;
486 }
487# endif
488 }
489
490 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
491 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
492 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
493 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
494 return rc;
495}
496#endif /* !IN_RING3 */
497
498
499/**
500 * Prefetch a page
501 *
502 * Typically used to sync commonly used pages before entering raw mode
503 * after a CR3 reload.
504 *
505 * @returns VBox status code suitable for scheduling.
506 * @retval VINF_SUCCESS on success.
507 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
508 * @param pVCpu The cross context virtual CPU structure.
509 * @param GCPtrPage Page to invalidate.
510 */
511VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
512{
513 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
514 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
515 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
516 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
517 return rc;
518}
519
520
521/**
522 * Gets the mapping corresponding to the specified address (if any).
523 *
524 * @returns Pointer to the mapping.
525 * @returns NULL if not
526 *
527 * @param pVM The cross context VM structure.
528 * @param GCPtr The guest context pointer.
529 */
530PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
531{
532 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
533 while (pMapping)
534 {
535 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
536 break;
537 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
538 return pMapping;
539 pMapping = pMapping->CTX_SUFF(pNext);
540 }
541 return NULL;
542}
543
544
545/**
546 * Verifies a range of pages for read or write access
547 *
548 * Only checks the guest's page tables
549 *
550 * @returns VBox status code.
551 * @param pVCpu The cross context virtual CPU structure.
552 * @param Addr Guest virtual address to check
553 * @param cbSize Access size
554 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
555 * @remarks Current not in use.
556 */
557VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
558{
559 /*
560 * Validate input.
561 */
562 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
563 {
564 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
565 return VERR_INVALID_PARAMETER;
566 }
567
568 uint64_t fPage;
569 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
570 if (RT_FAILURE(rc))
571 {
572 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
573 return VINF_EM_RAW_GUEST_TRAP;
574 }
575
576 /*
577 * Check if the access would cause a page fault
578 *
579 * Note that hypervisor page directories are not present in the guest's tables, so this check
580 * is sufficient.
581 */
582 bool fWrite = !!(fAccess & X86_PTE_RW);
583 bool fUser = !!(fAccess & X86_PTE_US);
584 if ( !(fPage & X86_PTE_P)
585 || (fWrite && !(fPage & X86_PTE_RW))
586 || (fUser && !(fPage & X86_PTE_US)) )
587 {
588 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
589 return VINF_EM_RAW_GUEST_TRAP;
590 }
591 if ( RT_SUCCESS(rc)
592 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
593 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
594 return rc;
595}
596
597
598/**
599 * Verifies a range of pages for read or write access
600 *
601 * Supports handling of pages marked for dirty bit tracking and CSAM
602 *
603 * @returns VBox status code.
604 * @param pVCpu The cross context virtual CPU structure.
605 * @param Addr Guest virtual address to check
606 * @param cbSize Access size
607 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
608 */
609VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
610{
611 PVM pVM = pVCpu->CTX_SUFF(pVM);
612
613 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
614
615 /*
616 * Get going.
617 */
618 uint64_t fPageGst;
619 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
620 if (RT_FAILURE(rc))
621 {
622 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
623 return VINF_EM_RAW_GUEST_TRAP;
624 }
625
626 /*
627 * Check if the access would cause a page fault
628 *
629 * Note that hypervisor page directories are not present in the guest's tables, so this check
630 * is sufficient.
631 */
632 const bool fWrite = !!(fAccess & X86_PTE_RW);
633 const bool fUser = !!(fAccess & X86_PTE_US);
634 if ( !(fPageGst & X86_PTE_P)
635 || (fWrite && !(fPageGst & X86_PTE_RW))
636 || (fUser && !(fPageGst & X86_PTE_US)) )
637 {
638 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
639 return VINF_EM_RAW_GUEST_TRAP;
640 }
641
642 if (!pVM->pgm.s.fNestedPaging)
643 {
644 /*
645 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
646 */
647 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
648 if ( rc == VERR_PAGE_NOT_PRESENT
649 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
650 {
651 /*
652 * Page is not present in our page tables.
653 * Try to sync it!
654 */
655 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
656 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
657 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
658 if (rc != VINF_SUCCESS)
659 return rc;
660 }
661 else
662 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
663 }
664
665#if 0 /* def VBOX_STRICT; triggers too often now */
666 /*
667 * This check is a bit paranoid, but useful.
668 */
669 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
670 uint64_t fPageShw;
671 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
672 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
673 || (fWrite && !(fPageShw & X86_PTE_RW))
674 || (fUser && !(fPageShw & X86_PTE_US)) )
675 {
676 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
677 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
678 return VINF_EM_RAW_GUEST_TRAP;
679 }
680#endif
681
682 if ( RT_SUCCESS(rc)
683 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
684 || Addr + cbSize < Addr))
685 {
686 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
687 for (;;)
688 {
689 Addr += PAGE_SIZE;
690 if (cbSize > PAGE_SIZE)
691 cbSize -= PAGE_SIZE;
692 else
693 cbSize = 1;
694 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
695 if (rc != VINF_SUCCESS)
696 break;
697 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
698 break;
699 }
700 }
701 return rc;
702}
703
704
705/**
706 * Emulation of the invlpg instruction (HC only actually).
707 *
708 * @returns Strict VBox status code, special care required.
709 * @retval VINF_PGM_SYNC_CR3 - handled.
710 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
711 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
712 *
713 * @param pVCpu The cross context virtual CPU structure.
714 * @param GCPtrPage Page to invalidate.
715 *
716 * @remark ASSUMES the page table entry or page directory is valid. Fairly
717 * safe, but there could be edge cases!
718 *
719 * @todo Flush page or page directory only if necessary!
720 * @todo VBOXSTRICTRC
721 */
722VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
723{
724 PVM pVM = pVCpu->CTX_SUFF(pVM);
725 int rc;
726 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
727
728#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
729 /*
730 * Notify the recompiler so it can record this instruction.
731 */
732 REMNotifyInvalidatePage(pVM, GCPtrPage);
733#endif /* !IN_RING3 */
734 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
735
736
737#ifdef IN_RC
738 /*
739 * Check for conflicts and pending CR3 monitoring updates.
740 */
741 if (pgmMapAreMappingsFloating(pVM))
742 {
743 if ( pgmGetMapping(pVM, GCPtrPage)
744 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
745 {
746 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
747 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
748 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
749 return VINF_PGM_SYNC_CR3;
750 }
751
752 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
753 {
754 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
755 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
756 return VINF_EM_RAW_EMULATE_INSTR;
757 }
758 }
759#endif /* IN_RC */
760
761 /*
762 * Call paging mode specific worker.
763 */
764 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
765 pgmLock(pVM);
766 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
767 pgmUnlock(pVM);
768 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
769
770#ifdef IN_RING3
771 /*
772 * Check if we have a pending update of the CR3 monitoring.
773 */
774 if ( RT_SUCCESS(rc)
775 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
776 {
777 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
778 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
779 }
780
781# ifdef VBOX_WITH_RAW_MODE
782 /*
783 * Inform CSAM about the flush
784 *
785 * Note: This is to check if monitored pages have been changed; when we implement
786 * callbacks for virtual handlers, this is no longer required.
787 */
788 CSAMR3FlushPage(pVM, GCPtrPage);
789# endif
790#endif /* IN_RING3 */
791
792 /* Ignore all irrelevant error codes. */
793 if ( rc == VERR_PAGE_NOT_PRESENT
794 || rc == VERR_PAGE_TABLE_NOT_PRESENT
795 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
796 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
797 rc = VINF_SUCCESS;
798
799 return rc;
800}
801
802
803/**
804 * Executes an instruction using the interpreter.
805 *
806 * @returns VBox status code (appropriate for trap handling and GC return).
807 * @param pVM The cross context VM structure.
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pRegFrame Register frame.
810 * @param pvFault Fault address.
811 */
812VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
813{
814 NOREF(pVM);
815 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
816 if (rc == VERR_EM_INTERPRETER)
817 rc = VINF_EM_RAW_EMULATE_INSTR;
818 if (rc != VINF_SUCCESS)
819 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
820 return rc;
821}
822
823
824/**
825 * Gets effective page information (from the VMM page directory).
826 *
827 * @returns VBox status code.
828 * @param pVCpu The cross context virtual CPU structure.
829 * @param GCPtr Guest Context virtual address of the page.
830 * @param pfFlags Where to store the flags. These are X86_PTE_*.
831 * @param pHCPhys Where to store the HC physical address of the page.
832 * This is page aligned.
833 * @remark You should use PGMMapGetPage() for pages in a mapping.
834 */
835VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
836{
837 pgmLock(pVCpu->CTX_SUFF(pVM));
838 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
839 pgmUnlock(pVCpu->CTX_SUFF(pVM));
840 return rc;
841}
842
843
844/**
845 * Modify page flags for a range of pages in the shadow context.
846 *
847 * The existing flags are ANDed with the fMask and ORed with the fFlags.
848 *
849 * @returns VBox status code.
850 * @param pVCpu The cross context virtual CPU structure.
851 * @param GCPtr Virtual address of the first page in the range.
852 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
853 * @param fMask The AND mask - page flags X86_PTE_*.
854 * Be very CAREFUL when ~'ing constants which could be 32-bit!
855 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
856 * @remark You must use PGMMapModifyPage() for pages in a mapping.
857 */
858DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
859{
860 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
861 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
862
863 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
864
865 PVM pVM = pVCpu->CTX_SUFF(pVM);
866 pgmLock(pVM);
867 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
868 pgmUnlock(pVM);
869 return rc;
870}
871
872
873/**
874 * Changing the page flags for a single page in the shadow page tables so as to
875 * make it read-only.
876 *
877 * @returns VBox status code.
878 * @param pVCpu The cross context virtual CPU structure.
879 * @param GCPtr Virtual address of the first page in the range.
880 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
881 */
882VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
883{
884 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
885}
886
887
888/**
889 * Changing the page flags for a single page in the shadow page tables so as to
890 * make it writable.
891 *
892 * The call must know with 101% certainty that the guest page tables maps this
893 * as writable too. This function will deal shared, zero and write monitored
894 * pages.
895 *
896 * @returns VBox status code.
897 * @param pVCpu The cross context virtual CPU structure.
898 * @param GCPtr Virtual address of the first page in the range.
899 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
900 */
901VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
902{
903 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
904}
905
906
907/**
908 * Changing the page flags for a single page in the shadow page tables so as to
909 * make it not present.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param GCPtr Virtual address of the first page in the range.
914 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
915 */
916VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
917{
918 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
919}
920
921
922/**
923 * Changing the page flags for a single page in the shadow page tables so as to
924 * make it supervisor and writable.
925 *
926 * This if for dealing with CR0.WP=0 and readonly user pages.
927 *
928 * @returns VBox status code.
929 * @param pVCpu The cross context virtual CPU structure.
930 * @param GCPtr Virtual address of the first page in the range.
931 * @param fBigPage Whether or not this is a big page. If it is, we have to
932 * change the shadow PDE as well. If it isn't, the caller
933 * has checked that the shadow PDE doesn't need changing.
934 * We ASSUME 4KB pages backing the big page here!
935 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
936 */
937int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
938{
939 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
940 if (rc == VINF_SUCCESS && fBigPage)
941 {
942 /* this is a bit ugly... */
943 switch (pVCpu->pgm.s.enmShadowMode)
944 {
945 case PGMMODE_32_BIT:
946 {
947 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
948 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
949 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
950 pPde->n.u1Write = 1;
951 Log(("-> PDE=%#llx (32)\n", pPde->u));
952 break;
953 }
954 case PGMMODE_PAE:
955 case PGMMODE_PAE_NX:
956 {
957 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
958 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
959 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
960 pPde->n.u1Write = 1;
961 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
962 break;
963 }
964 default:
965 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
966 }
967 }
968 return rc;
969}
970
971
972/**
973 * Gets the shadow page directory for the specified address, PAE.
974 *
975 * @returns Pointer to the shadow PD.
976 * @param pVCpu The cross context virtual CPU structure.
977 * @param GCPtr The address.
978 * @param uGstPdpe Guest PDPT entry. Valid.
979 * @param ppPD Receives address of page directory
980 */
981int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
982{
983 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
984 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
985 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
986 PVM pVM = pVCpu->CTX_SUFF(pVM);
987 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
988 PPGMPOOLPAGE pShwPage;
989 int rc;
990
991 PGM_LOCK_ASSERT_OWNER(pVM);
992
993 /* Allocate page directory if not present. */
994 if ( !pPdpe->n.u1Present
995 && !(pPdpe->u & X86_PDPE_PG_MASK))
996 {
997 RTGCPTR64 GCPdPt;
998 PGMPOOLKIND enmKind;
999
1000 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1001 {
1002 /* AMD-V nested paging or real/protected mode without paging. */
1003 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1004 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1005 }
1006 else
1007 {
1008 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1009 {
1010 if (!(uGstPdpe & X86_PDPE_P))
1011 {
1012 /* PD not present; guest must reload CR3 to change it.
1013 * No need to monitor anything in this case.
1014 */
1015 Assert(!HMIsEnabled(pVM));
1016
1017 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1018 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1019 uGstPdpe |= X86_PDPE_P;
1020 }
1021 else
1022 {
1023 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1024 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1025 }
1026 }
1027 else
1028 {
1029 GCPdPt = CPUMGetGuestCR3(pVCpu);
1030 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1031 }
1032 }
1033
1034 /* Create a reference back to the PDPT by using the index in its shadow page. */
1035 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1036 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1037 &pShwPage);
1038 AssertRCReturn(rc, rc);
1039
1040 /* The PD was cached or created; hook it up now. */
1041 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1042
1043# if defined(IN_RC)
1044 /*
1045 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1046 * PDPT entry; the CPU fetches them only during cr3 load, so any
1047 * non-present PDPT will continue to cause page faults.
1048 */
1049 ASMReloadCR3();
1050# endif
1051 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1052 }
1053 else
1054 {
1055 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1056 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1057 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1058
1059 pgmPoolCacheUsed(pPool, pShwPage);
1060 }
1061 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Gets the pointer to the shadow page directory entry for an address, PAE.
1068 *
1069 * @returns Pointer to the PDE.
1070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1071 * @param GCPtr The address.
1072 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1073 */
1074DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1075{
1076 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1077 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1078 PVM pVM = pVCpu->CTX_SUFF(pVM);
1079
1080 PGM_LOCK_ASSERT_OWNER(pVM);
1081
1082 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1083 if (!pPdpt->a[iPdPt].n.u1Present)
1084 {
1085 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1086 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1087 }
1088 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1089
1090 /* Fetch the pgm pool shadow descriptor. */
1091 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1092 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1093
1094 *ppShwPde = pShwPde;
1095 return VINF_SUCCESS;
1096}
1097
1098#ifndef IN_RC
1099
1100/**
1101 * Syncs the SHADOW page directory pointer for the specified address.
1102 *
1103 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1104 *
1105 * The caller is responsible for making sure the guest has a valid PD before
1106 * calling this function.
1107 *
1108 * @returns VBox status code.
1109 * @param pVCpu The cross context virtual CPU structure.
1110 * @param GCPtr The address.
1111 * @param uGstPml4e Guest PML4 entry (valid).
1112 * @param uGstPdpe Guest PDPT entry (valid).
1113 * @param ppPD Receives address of page directory
1114 */
1115static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1116{
1117 PVM pVM = pVCpu->CTX_SUFF(pVM);
1118 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1119 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1120 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1121 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1122 PPGMPOOLPAGE pShwPage;
1123 int rc;
1124
1125 PGM_LOCK_ASSERT_OWNER(pVM);
1126
1127 /* Allocate page directory pointer table if not present. */
1128 if ( !pPml4e->n.u1Present
1129 && !(pPml4e->u & X86_PML4E_PG_MASK))
1130 {
1131 RTGCPTR64 GCPml4;
1132 PGMPOOLKIND enmKind;
1133
1134 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1135
1136 if (fNestedPagingOrNoGstPaging)
1137 {
1138 /* AMD-V nested paging or real/protected mode without paging */
1139 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1140 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1141 }
1142 else
1143 {
1144 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1145 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1146 }
1147
1148 /* Create a reference back to the PDPT by using the index in its shadow page. */
1149 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1150 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1151 &pShwPage);
1152 AssertRCReturn(rc, rc);
1153 }
1154 else
1155 {
1156 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1157 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1158
1159 pgmPoolCacheUsed(pPool, pShwPage);
1160 }
1161 /* The PDPT was cached or created; hook it up now. */
1162 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1163
1164 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1165 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1166 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1167
1168 /* Allocate page directory if not present. */
1169 if ( !pPdpe->n.u1Present
1170 && !(pPdpe->u & X86_PDPE_PG_MASK))
1171 {
1172 RTGCPTR64 GCPdPt;
1173 PGMPOOLKIND enmKind;
1174
1175 if (fNestedPagingOrNoGstPaging)
1176 {
1177 /* AMD-V nested paging or real/protected mode without paging */
1178 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1179 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1180 }
1181 else
1182 {
1183 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1184 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1185 }
1186
1187 /* Create a reference back to the PDPT by using the index in its shadow page. */
1188 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1189 pShwPage->idx, iPdPt, false /*fLockPage*/,
1190 &pShwPage);
1191 AssertRCReturn(rc, rc);
1192 }
1193 else
1194 {
1195 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1196 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1197
1198 pgmPoolCacheUsed(pPool, pShwPage);
1199 }
1200 /* The PD was cached or created; hook it up now. */
1201 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1202
1203 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1204 return VINF_SUCCESS;
1205}
1206
1207
1208/**
1209 * Gets the SHADOW page directory pointer for the specified address (long mode).
1210 *
1211 * @returns VBox status code.
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param GCPtr The address.
1214 * @param ppPdpt Receives address of pdpt
1215 * @param ppPD Receives address of page directory
1216 */
1217DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1218{
1219 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1220 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1221
1222 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1223
1224 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1225 if (ppPml4e)
1226 *ppPml4e = (PX86PML4E)pPml4e;
1227
1228 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1229
1230 if (!pPml4e->n.u1Present)
1231 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1232
1233 PVM pVM = pVCpu->CTX_SUFF(pVM);
1234 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1235 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1236 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1237
1238 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1239 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1240 if (!pPdpt->a[iPdPt].n.u1Present)
1241 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1242
1243 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1244 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1245
1246 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1247 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1254 * backing pages in case the PDPT or PML4 entry is missing.
1255 *
1256 * @returns VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param GCPtr The address.
1259 * @param ppPdpt Receives address of pdpt
1260 * @param ppPD Receives address of page directory
1261 */
1262static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1263{
1264 PVM pVM = pVCpu->CTX_SUFF(pVM);
1265 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1266 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1267 PEPTPML4 pPml4;
1268 PEPTPML4E pPml4e;
1269 PPGMPOOLPAGE pShwPage;
1270 int rc;
1271
1272 Assert(pVM->pgm.s.fNestedPaging);
1273 PGM_LOCK_ASSERT_OWNER(pVM);
1274
1275 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1276 Assert(pPml4);
1277
1278 /* Allocate page directory pointer table if not present. */
1279 pPml4e = &pPml4->a[iPml4];
1280 if ( !pPml4e->n.u1Present
1281 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1282 {
1283 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1284 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1285
1286 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1287 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1288 &pShwPage);
1289 AssertRCReturn(rc, rc);
1290 }
1291 else
1292 {
1293 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1294 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1295
1296 pgmPoolCacheUsed(pPool, pShwPage);
1297 }
1298 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1299 pPml4e->u = pShwPage->Core.Key;
1300 pPml4e->n.u1Present = 1;
1301 pPml4e->n.u1Write = 1;
1302 pPml4e->n.u1Execute = 1;
1303
1304 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1305 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1306 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1307
1308 if (ppPdpt)
1309 *ppPdpt = pPdpt;
1310
1311 /* Allocate page directory if not present. */
1312 if ( !pPdpe->n.u1Present
1313 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1314 {
1315 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1316 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1317 pShwPage->idx, iPdPt, false /*fLockPage*/,
1318 &pShwPage);
1319 AssertRCReturn(rc, rc);
1320 }
1321 else
1322 {
1323 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1324 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1325
1326 pgmPoolCacheUsed(pPool, pShwPage);
1327 }
1328 /* The PD was cached or created; hook it up now and fill with the default value. */
1329 pPdpe->u = pShwPage->Core.Key;
1330 pPdpe->n.u1Present = 1;
1331 pPdpe->n.u1Write = 1;
1332 pPdpe->n.u1Execute = 1;
1333
1334 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1335 return VINF_SUCCESS;
1336}
1337
1338#endif /* IN_RC */
1339
1340#ifdef IN_RING0
1341/**
1342 * Synchronizes a range of nested page table entries.
1343 *
1344 * The caller must own the PGM lock.
1345 *
1346 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1347 * @param GCPhys Where to start.
1348 * @param cPages How many pages which entries should be synced.
1349 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1350 * host paging mode for AMD-V).
1351 */
1352int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1353{
1354 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1355
1356 int rc;
1357 switch (enmShwPagingMode)
1358 {
1359 case PGMMODE_32_BIT:
1360 {
1361 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1362 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1363 break;
1364 }
1365
1366 case PGMMODE_PAE:
1367 case PGMMODE_PAE_NX:
1368 {
1369 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1370 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1371 break;
1372 }
1373
1374 case PGMMODE_AMD64:
1375 case PGMMODE_AMD64_NX:
1376 {
1377 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1378 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1379 break;
1380 }
1381
1382 case PGMMODE_EPT:
1383 {
1384 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1385 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1386 break;
1387 }
1388
1389 default:
1390 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1391 }
1392 return rc;
1393}
1394#endif /* IN_RING0 */
1395
1396
1397/**
1398 * Gets effective Guest OS page information.
1399 *
1400 * When GCPtr is in a big page, the function will return as if it was a normal
1401 * 4KB page. If the need for distinguishing between big and normal page becomes
1402 * necessary at a later point, a PGMGstGetPage() will be created for that
1403 * purpose.
1404 *
1405 * @returns VBox status code.
1406 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1407 * @param GCPtr Guest Context virtual address of the page.
1408 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1409 * @param pGCPhys Where to store the GC physical address of the page.
1410 * This is page aligned. The fact that the
1411 */
1412VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1413{
1414 VMCPU_ASSERT_EMT(pVCpu);
1415 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1416}
1417
1418
1419/**
1420 * Performs a guest page table walk.
1421 *
1422 * The guest should be in paged protect mode or long mode when making a call to
1423 * this function.
1424 *
1425 * @returns VBox status code.
1426 * @retval VINF_SUCCESS on success.
1427 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1428 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1429 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1430 *
1431 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1432 * @param GCPtr The guest virtual address to walk by.
1433 * @param pWalk Where to return the walk result. This is valid on some
1434 * error codes as well.
1435 */
1436int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1437{
1438 VMCPU_ASSERT_EMT(pVCpu);
1439 switch (pVCpu->pgm.s.enmGuestMode)
1440 {
1441 case PGMMODE_32_BIT:
1442 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1443 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1444
1445 case PGMMODE_PAE:
1446 case PGMMODE_PAE_NX:
1447 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1448 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1449
1450#if !defined(IN_RC)
1451 case PGMMODE_AMD64:
1452 case PGMMODE_AMD64_NX:
1453 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1454 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1455#endif
1456
1457 case PGMMODE_REAL:
1458 case PGMMODE_PROTECTED:
1459 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1460 return VERR_PGM_NOT_USED_IN_MODE;
1461
1462#if defined(IN_RC)
1463 case PGMMODE_AMD64:
1464 case PGMMODE_AMD64_NX:
1465#endif
1466 case PGMMODE_NESTED:
1467 case PGMMODE_EPT:
1468 default:
1469 AssertFailed();
1470 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1471 return VERR_PGM_NOT_USED_IN_MODE;
1472 }
1473}
1474
1475
1476/**
1477 * Checks if the page is present.
1478 *
1479 * @returns true if the page is present.
1480 * @returns false if the page is not present.
1481 * @param pVCpu The cross context virtual CPU structure.
1482 * @param GCPtr Address within the page.
1483 */
1484VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1485{
1486 VMCPU_ASSERT_EMT(pVCpu);
1487 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1488 return RT_SUCCESS(rc);
1489}
1490
1491
1492/**
1493 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1494 *
1495 * @returns VBox status code.
1496 * @param pVCpu The cross context virtual CPU structure.
1497 * @param GCPtr The address of the first page.
1498 * @param cb The size of the range in bytes.
1499 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1500 */
1501VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1502{
1503 VMCPU_ASSERT_EMT(pVCpu);
1504 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1505}
1506
1507
1508/**
1509 * Modify page flags for a range of pages in the guest's tables
1510 *
1511 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1512 *
1513 * @returns VBox status code.
1514 * @param pVCpu The cross context virtual CPU structure.
1515 * @param GCPtr Virtual address of the first page in the range.
1516 * @param cb Size (in bytes) of the range to apply the modification to.
1517 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1518 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1519 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1520 */
1521VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1522{
1523 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1524 VMCPU_ASSERT_EMT(pVCpu);
1525
1526 /*
1527 * Validate input.
1528 */
1529 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1530 Assert(cb);
1531
1532 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1533
1534 /*
1535 * Adjust input.
1536 */
1537 cb += GCPtr & PAGE_OFFSET_MASK;
1538 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1539 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1540
1541 /*
1542 * Call worker.
1543 */
1544 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1545
1546 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1547 return rc;
1548}
1549
1550
1551#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1552
1553/**
1554 * Performs the lazy mapping of the 32-bit guest PD.
1555 *
1556 * @returns VBox status code.
1557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1558 * @param ppPd Where to return the pointer to the mapping. This is
1559 * always set.
1560 */
1561int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1562{
1563 PVM pVM = pVCpu->CTX_SUFF(pVM);
1564 pgmLock(pVM);
1565
1566 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1567
1568 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1569 PPGMPAGE pPage;
1570 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1571 if (RT_SUCCESS(rc))
1572 {
1573 RTHCPTR HCPtrGuestCR3;
1574 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1575 if (RT_SUCCESS(rc))
1576 {
1577 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1578# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1579 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1580# endif
1581 *ppPd = (PX86PD)HCPtrGuestCR3;
1582
1583 pgmUnlock(pVM);
1584 return VINF_SUCCESS;
1585 }
1586
1587 AssertRC(rc);
1588 }
1589 pgmUnlock(pVM);
1590
1591 *ppPd = NULL;
1592 return rc;
1593}
1594
1595
1596/**
1597 * Performs the lazy mapping of the PAE guest PDPT.
1598 *
1599 * @returns VBox status code.
1600 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1601 * @param ppPdpt Where to return the pointer to the mapping. This is
1602 * always set.
1603 */
1604int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1605{
1606 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1607 PVM pVM = pVCpu->CTX_SUFF(pVM);
1608 pgmLock(pVM);
1609
1610 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1611 PPGMPAGE pPage;
1612 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1613 if (RT_SUCCESS(rc))
1614 {
1615 RTHCPTR HCPtrGuestCR3;
1616 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1617 if (RT_SUCCESS(rc))
1618 {
1619 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1620# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1621 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1622# endif
1623 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1624
1625 pgmUnlock(pVM);
1626 return VINF_SUCCESS;
1627 }
1628
1629 AssertRC(rc);
1630 }
1631
1632 pgmUnlock(pVM);
1633 *ppPdpt = NULL;
1634 return rc;
1635}
1636
1637
1638/**
1639 * Performs the lazy mapping / updating of a PAE guest PD.
1640 *
1641 * @returns Pointer to the mapping.
1642 * @returns VBox status code.
1643 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1644 * @param iPdpt Which PD entry to map (0..3).
1645 * @param ppPd Where to return the pointer to the mapping. This is
1646 * always set.
1647 */
1648int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1649{
1650 PVM pVM = pVCpu->CTX_SUFF(pVM);
1651 pgmLock(pVM);
1652
1653 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1654 Assert(pGuestPDPT);
1655 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1656 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1657 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1658
1659 PPGMPAGE pPage;
1660 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1661 if (RT_SUCCESS(rc))
1662 {
1663 RTRCPTR RCPtr = NIL_RTRCPTR;
1664 RTHCPTR HCPtr = NIL_RTHCPTR;
1665#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1666 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1667 AssertRC(rc);
1668#endif
1669 if (RT_SUCCESS(rc) && fChanged)
1670 {
1671 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1672 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1673 }
1674 if (RT_SUCCESS(rc))
1675 {
1676 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1677# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1678 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1679# endif
1680 if (fChanged)
1681 {
1682 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1683 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1684 }
1685
1686 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1687 pgmUnlock(pVM);
1688 return VINF_SUCCESS;
1689 }
1690 }
1691
1692 /* Invalid page or some failure, invalidate the entry. */
1693 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1694 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1695# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1696 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1697# endif
1698 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1699
1700 pgmUnlock(pVM);
1701 return rc;
1702}
1703
1704#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1705#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1706/**
1707 * Performs the lazy mapping of the 32-bit guest PD.
1708 *
1709 * @returns VBox status code.
1710 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1711 * @param ppPml4 Where to return the pointer to the mapping. This will
1712 * always be set.
1713 */
1714int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1715{
1716 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1717 PVM pVM = pVCpu->CTX_SUFF(pVM);
1718 pgmLock(pVM);
1719
1720 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1721 PPGMPAGE pPage;
1722 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1723 if (RT_SUCCESS(rc))
1724 {
1725 RTHCPTR HCPtrGuestCR3;
1726 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1727 if (RT_SUCCESS(rc))
1728 {
1729 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1730# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1731 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1732# endif
1733 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1734
1735 pgmUnlock(pVM);
1736 return VINF_SUCCESS;
1737 }
1738 }
1739
1740 pgmUnlock(pVM);
1741 *ppPml4 = NULL;
1742 return rc;
1743}
1744#endif
1745
1746
1747/**
1748 * Gets the PAE PDPEs values cached by the CPU.
1749 *
1750 * @returns VBox status code.
1751 * @param pVCpu The cross context virtual CPU structure.
1752 * @param paPdpes Where to return the four PDPEs. The array
1753 * pointed to must have 4 entries.
1754 */
1755VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1756{
1757 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1758
1759 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1760 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1761 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1762 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1763 return VINF_SUCCESS;
1764}
1765
1766
1767/**
1768 * Sets the PAE PDPEs values cached by the CPU.
1769 *
1770 * @remarks This must be called *AFTER* PGMUpdateCR3.
1771 *
1772 * @param pVCpu The cross context virtual CPU structure.
1773 * @param paPdpes The four PDPE values. The array pointed to must
1774 * have exactly 4 entries.
1775 *
1776 * @remarks No-long-jump zone!!!
1777 */
1778VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1779{
1780 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1781
1782 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1783 {
1784 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1785 {
1786 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1787
1788 /* Force lazy remapping if it changed in any way. */
1789 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1790# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1791 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1792# endif
1793 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1794 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1795 }
1796 }
1797
1798 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1799}
1800
1801
1802/**
1803 * Gets the current CR3 register value for the shadow memory context.
1804 * @returns CR3 value.
1805 * @param pVCpu The cross context virtual CPU structure.
1806 */
1807VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1808{
1809 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1810 AssertPtrReturn(pPoolPage, 0);
1811 return pPoolPage->Core.Key;
1812}
1813
1814
1815/**
1816 * Gets the current CR3 register value for the nested memory context.
1817 * @returns CR3 value.
1818 * @param pVCpu The cross context virtual CPU structure.
1819 * @param enmShadowMode The shadow paging mode.
1820 */
1821VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1822{
1823 NOREF(enmShadowMode);
1824 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1825 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1826}
1827
1828
1829/**
1830 * Gets the current CR3 register value for the HC intermediate memory context.
1831 * @returns CR3 value.
1832 * @param pVM The cross context VM structure.
1833 */
1834VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1835{
1836 switch (pVM->pgm.s.enmHostMode)
1837 {
1838 case SUPPAGINGMODE_32_BIT:
1839 case SUPPAGINGMODE_32_BIT_GLOBAL:
1840 return pVM->pgm.s.HCPhysInterPD;
1841
1842 case SUPPAGINGMODE_PAE:
1843 case SUPPAGINGMODE_PAE_GLOBAL:
1844 case SUPPAGINGMODE_PAE_NX:
1845 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1846 return pVM->pgm.s.HCPhysInterPaePDPT;
1847
1848 case SUPPAGINGMODE_AMD64:
1849 case SUPPAGINGMODE_AMD64_GLOBAL:
1850 case SUPPAGINGMODE_AMD64_NX:
1851 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1852 return pVM->pgm.s.HCPhysInterPaePDPT;
1853
1854 default:
1855 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1856 return NIL_RTHCPHYS;
1857 }
1858}
1859
1860
1861/**
1862 * Gets the current CR3 register value for the RC intermediate memory context.
1863 * @returns CR3 value.
1864 * @param pVM The cross context VM structure.
1865 * @param pVCpu The cross context virtual CPU structure.
1866 */
1867VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1868{
1869 switch (pVCpu->pgm.s.enmShadowMode)
1870 {
1871 case PGMMODE_32_BIT:
1872 return pVM->pgm.s.HCPhysInterPD;
1873
1874 case PGMMODE_PAE:
1875 case PGMMODE_PAE_NX:
1876 return pVM->pgm.s.HCPhysInterPaePDPT;
1877
1878 case PGMMODE_AMD64:
1879 case PGMMODE_AMD64_NX:
1880 return pVM->pgm.s.HCPhysInterPaePML4;
1881
1882 case PGMMODE_EPT:
1883 case PGMMODE_NESTED:
1884 return 0; /* not relevant */
1885
1886 default:
1887 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1888 return NIL_RTHCPHYS;
1889 }
1890}
1891
1892
1893/**
1894 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1895 * @returns CR3 value.
1896 * @param pVM The cross context VM structure.
1897 */
1898VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1899{
1900 return pVM->pgm.s.HCPhysInterPD;
1901}
1902
1903
1904/**
1905 * Gets the CR3 register value for the PAE intermediate memory context.
1906 * @returns CR3 value.
1907 * @param pVM The cross context VM structure.
1908 */
1909VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1910{
1911 return pVM->pgm.s.HCPhysInterPaePDPT;
1912}
1913
1914
1915/**
1916 * Gets the CR3 register value for the AMD64 intermediate memory context.
1917 * @returns CR3 value.
1918 * @param pVM The cross context VM structure.
1919 */
1920VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1921{
1922 return pVM->pgm.s.HCPhysInterPaePML4;
1923}
1924
1925
1926/**
1927 * Performs and schedules necessary updates following a CR3 load or reload.
1928 *
1929 * This will normally involve mapping the guest PD or nPDPT
1930 *
1931 * @returns VBox status code.
1932 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1933 * safely be ignored and overridden since the FF will be set too then.
1934 * @param pVCpu The cross context virtual CPU structure.
1935 * @param cr3 The new cr3.
1936 * @param fGlobal Indicates whether this is a global flush or not.
1937 */
1938VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1939{
1940 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
1941 PVM pVM = pVCpu->CTX_SUFF(pVM);
1942
1943 VMCPU_ASSERT_EMT(pVCpu);
1944
1945 /*
1946 * Always flag the necessary updates; necessary for hardware acceleration
1947 */
1948 /** @todo optimize this, it shouldn't always be necessary. */
1949 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1950 if (fGlobal)
1951 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1952 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1953
1954 /*
1955 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1956 */
1957 int rc = VINF_SUCCESS;
1958 RTGCPHYS GCPhysCR3;
1959 switch (pVCpu->pgm.s.enmGuestMode)
1960 {
1961 case PGMMODE_PAE:
1962 case PGMMODE_PAE_NX:
1963 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1964 break;
1965 case PGMMODE_AMD64:
1966 case PGMMODE_AMD64_NX:
1967 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1968 break;
1969 default:
1970 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1971 break;
1972 }
1973 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1974
1975 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1976 {
1977 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1978 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1979 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1980 if (RT_LIKELY(rc == VINF_SUCCESS))
1981 {
1982 if (pgmMapAreMappingsFloating(pVM))
1983 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1984 }
1985 else
1986 {
1987 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1988 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1989 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1990 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1991 if (pgmMapAreMappingsFloating(pVM))
1992 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1993 }
1994
1995 if (fGlobal)
1996 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1997 else
1998 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
1999 }
2000 else
2001 {
2002# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2003 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2004 if (pPool->cDirtyPages)
2005 {
2006 pgmLock(pVM);
2007 pgmPoolResetDirtyPages(pVM);
2008 pgmUnlock(pVM);
2009 }
2010# endif
2011 /*
2012 * Check if we have a pending update of the CR3 monitoring.
2013 */
2014 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2015 {
2016 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2017 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2018 }
2019 if (fGlobal)
2020 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2021 else
2022 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2023 }
2024
2025 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2026 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2027 return rc;
2028}
2029
2030
2031/**
2032 * Performs and schedules necessary updates following a CR3 load or reload when
2033 * using nested or extended paging.
2034 *
2035 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2036 * TLB and triggering a SyncCR3.
2037 *
2038 * This will normally involve mapping the guest PD or nPDPT
2039 *
2040 * @returns VBox status code.
2041 * @retval VINF_SUCCESS.
2042 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2043 * paging modes). This can safely be ignored and overridden since the
2044 * FF will be set too then.
2045 * @param pVCpu The cross context virtual CPU structure.
2046 * @param cr3 The new cr3.
2047 */
2048VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2049{
2050 VMCPU_ASSERT_EMT(pVCpu);
2051 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2052
2053 /* We assume we're only called in nested paging mode. */
2054 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2055 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2056 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2057
2058 /*
2059 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2060 */
2061 int rc = VINF_SUCCESS;
2062 RTGCPHYS GCPhysCR3;
2063 switch (pVCpu->pgm.s.enmGuestMode)
2064 {
2065 case PGMMODE_PAE:
2066 case PGMMODE_PAE_NX:
2067 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2068 break;
2069 case PGMMODE_AMD64:
2070 case PGMMODE_AMD64_NX:
2071 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2072 break;
2073 default:
2074 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2075 break;
2076 }
2077 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2078
2079 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2080 {
2081 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2082 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2083 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2084 }
2085
2086 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2087 return rc;
2088}
2089
2090
2091/**
2092 * Synchronize the paging structures.
2093 *
2094 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2095 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2096 * in several places, most importantly whenever the CR3 is loaded.
2097 *
2098 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2099 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2100 * the VMM into guest context.
2101 * @param pVCpu The cross context virtual CPU structure.
2102 * @param cr0 Guest context CR0 register
2103 * @param cr3 Guest context CR3 register
2104 * @param cr4 Guest context CR4 register
2105 * @param fGlobal Including global page directories or not
2106 */
2107VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2108{
2109 int rc;
2110
2111 VMCPU_ASSERT_EMT(pVCpu);
2112
2113 /*
2114 * The pool may have pending stuff and even require a return to ring-3 to
2115 * clear the whole thing.
2116 */
2117 rc = pgmPoolSyncCR3(pVCpu);
2118 if (rc != VINF_SUCCESS)
2119 return rc;
2120
2121 /*
2122 * We might be called when we shouldn't.
2123 *
2124 * The mode switching will ensure that the PD is resynced after every mode
2125 * switch. So, if we find ourselves here when in protected or real mode
2126 * we can safely clear the FF and return immediately.
2127 */
2128 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2129 {
2130 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2131 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2132 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2133 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2134 return VINF_SUCCESS;
2135 }
2136
2137 /* If global pages are not supported, then all flushes are global. */
2138 if (!(cr4 & X86_CR4_PGE))
2139 fGlobal = true;
2140 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2141 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2142
2143 /*
2144 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2145 * This should be done before SyncCR3.
2146 */
2147 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2148 {
2149 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2150
2151 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2152 RTGCPHYS GCPhysCR3;
2153 switch (pVCpu->pgm.s.enmGuestMode)
2154 {
2155 case PGMMODE_PAE:
2156 case PGMMODE_PAE_NX:
2157 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2158 break;
2159 case PGMMODE_AMD64:
2160 case PGMMODE_AMD64_NX:
2161 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2162 break;
2163 default:
2164 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2165 break;
2166 }
2167 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2168
2169 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2170 {
2171 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2172 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2173 }
2174
2175 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2176 if ( rc == VINF_PGM_SYNC_CR3
2177 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2178 {
2179 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2180#ifdef IN_RING3
2181 rc = pgmPoolSyncCR3(pVCpu);
2182#else
2183 if (rc == VINF_PGM_SYNC_CR3)
2184 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2185 return VINF_PGM_SYNC_CR3;
2186#endif
2187 }
2188 AssertRCReturn(rc, rc);
2189 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2190 }
2191
2192 /*
2193 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2194 */
2195 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2196 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2197 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2198 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2199 if (rc == VINF_SUCCESS)
2200 {
2201 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2202 {
2203 /* Go back to ring 3 if a pgm pool sync is again pending. */
2204 return VINF_PGM_SYNC_CR3;
2205 }
2206
2207 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2208 {
2209 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2210 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2211 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2212 }
2213
2214 /*
2215 * Check if we have a pending update of the CR3 monitoring.
2216 */
2217 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2218 {
2219 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2220 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2221 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2222 }
2223 }
2224
2225 /*
2226 * Now flush the CR3 (guest context).
2227 */
2228 if (rc == VINF_SUCCESS)
2229 PGM_INVL_VCPU_TLBS(pVCpu);
2230 return rc;
2231}
2232
2233
2234/**
2235 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2236 *
2237 * @returns VBox status code, with the following informational code for
2238 * VM scheduling.
2239 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2240 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2241 * (I.e. not in R3.)
2242 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2243 *
2244 * @param pVCpu The cross context virtual CPU structure.
2245 * @param cr0 The new cr0.
2246 * @param cr4 The new cr4.
2247 * @param efer The new extended feature enable register.
2248 */
2249VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2250{
2251 PGMMODE enmGuestMode;
2252
2253 VMCPU_ASSERT_EMT(pVCpu);
2254
2255 /*
2256 * Calc the new guest mode.
2257 */
2258 if (!(cr0 & X86_CR0_PE))
2259 enmGuestMode = PGMMODE_REAL;
2260 else if (!(cr0 & X86_CR0_PG))
2261 enmGuestMode = PGMMODE_PROTECTED;
2262 else if (!(cr4 & X86_CR4_PAE))
2263 {
2264 bool const fPse = !!(cr4 & X86_CR4_PSE);
2265 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2266 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2267 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2268 enmGuestMode = PGMMODE_32_BIT;
2269 }
2270 else if (!(efer & MSR_K6_EFER_LME))
2271 {
2272 if (!(efer & MSR_K6_EFER_NXE))
2273 enmGuestMode = PGMMODE_PAE;
2274 else
2275 enmGuestMode = PGMMODE_PAE_NX;
2276 }
2277 else
2278 {
2279 if (!(efer & MSR_K6_EFER_NXE))
2280 enmGuestMode = PGMMODE_AMD64;
2281 else
2282 enmGuestMode = PGMMODE_AMD64_NX;
2283 }
2284
2285 /*
2286 * Did it change?
2287 */
2288 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2289 return VINF_SUCCESS;
2290
2291 /* Flush the TLB */
2292 PGM_INVL_VCPU_TLBS(pVCpu);
2293
2294#ifdef IN_RING3
2295 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2296#else
2297 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2298 return VINF_PGM_CHANGE_MODE;
2299#endif
2300}
2301
2302
2303/**
2304 * Called by CPUM or REM when CR0.WP changes to 1.
2305 *
2306 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2307 * @thread EMT
2308 */
2309VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2310{
2311 /*
2312 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2313 *
2314 * Use the counter to judge whether there might be pool pages with active
2315 * hacks in them. If there are, we will be running the risk of messing up
2316 * the guest by allowing it to write to read-only pages. Thus, we have to
2317 * clear the page pool ASAP if there is the slightest chance.
2318 */
2319 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2320 {
2321 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2322
2323 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2324 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2325 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2326 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2327 }
2328}
2329
2330
2331/**
2332 * Gets the current guest paging mode.
2333 *
2334 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2335 *
2336 * @returns The current paging mode.
2337 * @param pVCpu The cross context virtual CPU structure.
2338 */
2339VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2340{
2341 return pVCpu->pgm.s.enmGuestMode;
2342}
2343
2344
2345/**
2346 * Gets the current shadow paging mode.
2347 *
2348 * @returns The current paging mode.
2349 * @param pVCpu The cross context virtual CPU structure.
2350 */
2351VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2352{
2353 return pVCpu->pgm.s.enmShadowMode;
2354}
2355
2356
2357/**
2358 * Gets the current host paging mode.
2359 *
2360 * @returns The current paging mode.
2361 * @param pVM The cross context VM structure.
2362 */
2363VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2364{
2365 switch (pVM->pgm.s.enmHostMode)
2366 {
2367 case SUPPAGINGMODE_32_BIT:
2368 case SUPPAGINGMODE_32_BIT_GLOBAL:
2369 return PGMMODE_32_BIT;
2370
2371 case SUPPAGINGMODE_PAE:
2372 case SUPPAGINGMODE_PAE_GLOBAL:
2373 return PGMMODE_PAE;
2374
2375 case SUPPAGINGMODE_PAE_NX:
2376 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2377 return PGMMODE_PAE_NX;
2378
2379 case SUPPAGINGMODE_AMD64:
2380 case SUPPAGINGMODE_AMD64_GLOBAL:
2381 return PGMMODE_AMD64;
2382
2383 case SUPPAGINGMODE_AMD64_NX:
2384 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2385 return PGMMODE_AMD64_NX;
2386
2387 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2388 }
2389
2390 return PGMMODE_INVALID;
2391}
2392
2393
2394/**
2395 * Get mode name.
2396 *
2397 * @returns read-only name string.
2398 * @param enmMode The mode which name is desired.
2399 */
2400VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2401{
2402 switch (enmMode)
2403 {
2404 case PGMMODE_REAL: return "Real";
2405 case PGMMODE_PROTECTED: return "Protected";
2406 case PGMMODE_32_BIT: return "32-bit";
2407 case PGMMODE_PAE: return "PAE";
2408 case PGMMODE_PAE_NX: return "PAE+NX";
2409 case PGMMODE_AMD64: return "AMD64";
2410 case PGMMODE_AMD64_NX: return "AMD64+NX";
2411 case PGMMODE_NESTED: return "Nested";
2412 case PGMMODE_EPT: return "EPT";
2413 default: return "unknown mode value";
2414 }
2415}
2416
2417
2418
2419/**
2420 * Notification from CPUM that the EFER.NXE bit has changed.
2421 *
2422 * @param pVCpu The cross context virtual CPU structure of the CPU for
2423 * which EFER changed.
2424 * @param fNxe The new NXE state.
2425 */
2426VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2427{
2428/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2429 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2430
2431 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2432 if (fNxe)
2433 {
2434 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2435 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2436 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2437 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2438 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2439 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2440 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2441 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2442 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2443 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2444 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2445
2446 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2447 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2448 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2449 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2450 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2451 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2452 }
2453 else
2454 {
2455 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2456 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2457 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2458 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2459 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2460 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2461 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2462 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2463 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2464 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2465 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2466
2467 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2468 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2469 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2470 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2471 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2472 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2473 }
2474}
2475
2476
2477/**
2478 * Check if any pgm pool pages are marked dirty (not monitored)
2479 *
2480 * @returns bool locked/not locked
2481 * @param pVM The cross context VM structure.
2482 */
2483VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2484{
2485 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2486}
2487
2488
2489/**
2490 * Check if this VCPU currently owns the PGM lock.
2491 *
2492 * @returns bool owner/not owner
2493 * @param pVM The cross context VM structure.
2494 */
2495VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2496{
2497 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2498}
2499
2500
2501/**
2502 * Enable or disable large page usage
2503 *
2504 * @returns VBox status code.
2505 * @param pVM The cross context VM structure.
2506 * @param fUseLargePages Use/not use large pages
2507 */
2508VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2509{
2510 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2511
2512 pVM->fUseLargePages = fUseLargePages;
2513 return VINF_SUCCESS;
2514}
2515
2516
2517/**
2518 * Acquire the PGM lock.
2519 *
2520 * @returns VBox status code
2521 * @param pVM The cross context VM structure.
2522 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2523 */
2524#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
2525int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2526#else
2527int pgmLock(PVM pVM)
2528#endif
2529{
2530#if defined(VBOX_STRICT) && defined(IN_RING3)
2531 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2532#else
2533 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2534#endif
2535#if defined(IN_RC) || defined(IN_RING0)
2536 if (rc == VERR_SEM_BUSY)
2537 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2538#endif
2539 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2540 return rc;
2541}
2542
2543
2544/**
2545 * Release the PGM lock.
2546 *
2547 * @returns VBox status code
2548 * @param pVM The cross context VM structure.
2549 */
2550void pgmUnlock(PVM pVM)
2551{
2552 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2553 pVM->pgm.s.cDeprecatedPageLocks = 0;
2554 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2555 if (rc == VINF_SEM_NESTED)
2556 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2557}
2558
2559#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2560
2561/**
2562 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2563 *
2564 * @returns VBox status code.
2565 * @param pVM The cross context VM structure.
2566 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2567 * @param GCPhys The guest physical address of the page to map. The
2568 * offset bits are not ignored.
2569 * @param ppv Where to return the address corresponding to @a GCPhys.
2570 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2571 */
2572int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2573{
2574 pgmLock(pVM);
2575
2576 /*
2577 * Convert it to a writable page and it on to the dynamic mapper.
2578 */
2579 int rc;
2580 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2581 if (RT_LIKELY(pPage))
2582 {
2583 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2584 if (RT_SUCCESS(rc))
2585 {
2586 void *pv;
2587 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2588 if (RT_SUCCESS(rc))
2589 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2590 }
2591 else
2592 AssertRC(rc);
2593 }
2594 else
2595 {
2596 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2597 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2598 }
2599
2600 pgmUnlock(pVM);
2601 return rc;
2602}
2603
2604#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2605#if !defined(IN_R0) || defined(LOG_ENABLED)
2606
2607/** Format handler for PGMPAGE.
2608 * @copydoc FNRTSTRFORMATTYPE */
2609static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2610 const char *pszType, void const *pvValue,
2611 int cchWidth, int cchPrecision, unsigned fFlags,
2612 void *pvUser)
2613{
2614 size_t cch;
2615 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2616 if (RT_VALID_PTR(pPage))
2617 {
2618 char szTmp[64+80];
2619
2620 cch = 0;
2621
2622 /* The single char state stuff. */
2623 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2624 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2625
2626#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2627 if (IS_PART_INCLUDED(5))
2628 {
2629 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2630 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2631 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2632 }
2633
2634 /* The type. */
2635 if (IS_PART_INCLUDED(4))
2636 {
2637 szTmp[cch++] = ':';
2638 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2639 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2640 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2641 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2642 }
2643
2644 /* The numbers. */
2645 if (IS_PART_INCLUDED(3))
2646 {
2647 szTmp[cch++] = ':';
2648 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2649 }
2650
2651 if (IS_PART_INCLUDED(2))
2652 {
2653 szTmp[cch++] = ':';
2654 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2655 }
2656
2657 if (IS_PART_INCLUDED(6))
2658 {
2659 szTmp[cch++] = ':';
2660 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2661 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2662 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2663 }
2664#undef IS_PART_INCLUDED
2665
2666 cch = pfnOutput(pvArgOutput, szTmp, cch);
2667 }
2668 else
2669 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2670 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2671 return cch;
2672}
2673
2674
2675/** Format handler for PGMRAMRANGE.
2676 * @copydoc FNRTSTRFORMATTYPE */
2677static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2678 const char *pszType, void const *pvValue,
2679 int cchWidth, int cchPrecision, unsigned fFlags,
2680 void *pvUser)
2681{
2682 size_t cch;
2683 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2684 if (VALID_PTR(pRam))
2685 {
2686 char szTmp[80];
2687 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2688 cch = pfnOutput(pvArgOutput, szTmp, cch);
2689 }
2690 else
2691 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2692 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2693 return cch;
2694}
2695
2696/** Format type andlers to be registered/deregistered. */
2697static const struct
2698{
2699 char szType[24];
2700 PFNRTSTRFORMATTYPE pfnHandler;
2701} g_aPgmFormatTypes[] =
2702{
2703 { "pgmpage", pgmFormatTypeHandlerPage },
2704 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2705};
2706
2707#endif /* !IN_R0 || LOG_ENABLED */
2708
2709/**
2710 * Registers the global string format types.
2711 *
2712 * This should be called at module load time or in some other manner that ensure
2713 * that it's called exactly one time.
2714 *
2715 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2716 */
2717VMMDECL(int) PGMRegisterStringFormatTypes(void)
2718{
2719#if !defined(IN_R0) || defined(LOG_ENABLED)
2720 int rc = VINF_SUCCESS;
2721 unsigned i;
2722 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2723 {
2724 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2725# ifdef IN_RING0
2726 if (rc == VERR_ALREADY_EXISTS)
2727 {
2728 /* in case of cleanup failure in ring-0 */
2729 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2730 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2731 }
2732# endif
2733 }
2734 if (RT_FAILURE(rc))
2735 while (i-- > 0)
2736 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2737
2738 return rc;
2739#else
2740 return VINF_SUCCESS;
2741#endif
2742}
2743
2744
2745/**
2746 * Deregisters the global string format types.
2747 *
2748 * This should be called at module unload time or in some other manner that
2749 * ensure that it's called exactly one time.
2750 */
2751VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2752{
2753#if !defined(IN_R0) || defined(LOG_ENABLED)
2754 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2755 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2756#endif
2757}
2758
2759#ifdef VBOX_STRICT
2760
2761/**
2762 * Asserts that there are no mapping conflicts.
2763 *
2764 * @returns Number of conflicts.
2765 * @param pVM The cross context VM structure.
2766 */
2767VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2768{
2769 unsigned cErrors = 0;
2770
2771 /* Only applies to raw mode -> 1 VPCU */
2772 Assert(pVM->cCpus == 1);
2773 PVMCPU pVCpu = &pVM->aCpus[0];
2774
2775 /*
2776 * Check for mapping conflicts.
2777 */
2778 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2779 pMapping;
2780 pMapping = pMapping->CTX_SUFF(pNext))
2781 {
2782 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2783 for (RTGCPTR GCPtr = pMapping->GCPtr;
2784 GCPtr <= pMapping->GCPtrLast;
2785 GCPtr += PAGE_SIZE)
2786 {
2787 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2788 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2789 {
2790 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2791 cErrors++;
2792 break;
2793 }
2794 }
2795 }
2796
2797 return cErrors;
2798}
2799
2800
2801/**
2802 * Asserts that everything related to the guest CR3 is correctly shadowed.
2803 *
2804 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2805 * and assert the correctness of the guest CR3 mapping before asserting that the
2806 * shadow page tables is in sync with the guest page tables.
2807 *
2808 * @returns Number of conflicts.
2809 * @param pVM The cross context VM structure.
2810 * @param pVCpu The cross context virtual CPU structure.
2811 * @param cr3 The current guest CR3 register value.
2812 * @param cr4 The current guest CR4 register value.
2813 */
2814VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2815{
2816 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2817 pgmLock(pVM);
2818 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2819 pgmUnlock(pVM);
2820 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2821 return cErrors;
2822}
2823
2824#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette