VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 73266

Last change on this file since 73266 was 73266, checked in by vboxsync, 6 years ago

PGM,HM: Made PGMR3ChangeMode work in ring-0 too. This required a kludge for the VT-x real-in-V86-mode stuff, as there are certain limitations on that mode which weren't checked as CR0.PE was cleared. The kludge isn't very smart, but it seems to do the job. Similar kludge for getting out of the mode. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 136.8 KB
Line 
1/* $Id: PGMAll.cpp 73266 2018-07-20 14:27:20Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/csam.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/trpm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/em.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vmm/vm.h>
42#include "PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*********************************************************************************************************************************
52* Structures and Typedefs *
53*********************************************************************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** Pointer to the VM. */
61 PVM pVM;
62 /** Pointer to the VMCPU. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*********************************************************************************************************************************
72* Internal Functions *
73*********************************************************************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76#ifndef IN_RC
77static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
78static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
79#endif
80
81
82/*
83 * Shadow - 32-bit mode
84 */
85#define PGM_SHW_TYPE PGM_TYPE_32BIT
86#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
87#include "PGMAllShw.h"
88
89/* Guest - real mode */
90#define PGM_GST_TYPE PGM_TYPE_REAL
91#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
92#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
93#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
94#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
95#include "PGMGstDefs.h"
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef BTH_PGMPOOLKIND_ROOT
100#undef PGM_BTH_NAME
101#undef PGM_GST_TYPE
102#undef PGM_GST_NAME
103
104/* Guest - protected mode */
105#define PGM_GST_TYPE PGM_TYPE_PROT
106#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
107#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
108#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
109#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
110#include "PGMGstDefs.h"
111#include "PGMAllGst.h"
112#include "PGMAllBth.h"
113#undef BTH_PGMPOOLKIND_PT_FOR_PT
114#undef BTH_PGMPOOLKIND_ROOT
115#undef PGM_BTH_NAME
116#undef PGM_GST_TYPE
117#undef PGM_GST_NAME
118
119/* Guest - 32-bit mode */
120#define PGM_GST_TYPE PGM_TYPE_32BIT
121#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
123#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
124#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
125#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
126#include "PGMGstDefs.h"
127#include "PGMAllGst.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_BIG
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136#undef PGM_SHW_TYPE
137#undef PGM_SHW_NAME
138
139
140/*
141 * Shadow - PAE mode
142 */
143#define PGM_SHW_TYPE PGM_TYPE_PAE
144#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#include "PGMAllShw.h"
147
148/* Guest - real mode */
149#define PGM_GST_TYPE PGM_TYPE_REAL
150#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
153#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
154#include "PGMGstDefs.h"
155#include "PGMAllBth.h"
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef BTH_PGMPOOLKIND_ROOT
158#undef PGM_BTH_NAME
159#undef PGM_GST_TYPE
160#undef PGM_GST_NAME
161
162/* Guest - protected mode */
163#define PGM_GST_TYPE PGM_TYPE_PROT
164#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
167#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
168#include "PGMGstDefs.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef BTH_PGMPOOLKIND_ROOT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176/* Guest - 32-bit mode */
177#define PGM_GST_TYPE PGM_TYPE_32BIT
178#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
179#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
180#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
181#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
182#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
183#include "PGMGstDefs.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef BTH_PGMPOOLKIND_ROOT
188#undef PGM_BTH_NAME
189#undef PGM_GST_TYPE
190#undef PGM_GST_NAME
191
192
193/* Guest - PAE mode */
194#define PGM_GST_TYPE PGM_TYPE_PAE
195#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
196#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
197#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
198#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
199#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
200#include "PGMGstDefs.h"
201#include "PGMAllGst.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_BIG
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#undef PGM_SHW_TYPE
211#undef PGM_SHW_NAME
212
213
214#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
215/*
216 * Shadow - AMD64 mode
217 */
218# define PGM_SHW_TYPE PGM_TYPE_AMD64
219# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
220# include "PGMAllShw.h"
221
222/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
223/** @todo retire this hack. */
224# define PGM_GST_TYPE PGM_TYPE_PROT
225# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
226# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
227# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
228# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
229# include "PGMGstDefs.h"
230# include "PGMAllBth.h"
231# undef BTH_PGMPOOLKIND_PT_FOR_PT
232# undef BTH_PGMPOOLKIND_ROOT
233# undef PGM_BTH_NAME
234# undef PGM_GST_TYPE
235# undef PGM_GST_NAME
236
237# ifdef VBOX_WITH_64_BITS_GUESTS
238/* Guest - AMD64 mode */
239# define PGM_GST_TYPE PGM_TYPE_AMD64
240# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
241# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
242# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
243# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
244# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
245# include "PGMGstDefs.h"
246# include "PGMAllGst.h"
247# include "PGMAllBth.h"
248# undef BTH_PGMPOOLKIND_PT_FOR_BIG
249# undef BTH_PGMPOOLKIND_PT_FOR_PT
250# undef BTH_PGMPOOLKIND_ROOT
251# undef PGM_BTH_NAME
252# undef PGM_GST_TYPE
253# undef PGM_GST_NAME
254# endif /* VBOX_WITH_64_BITS_GUESTS */
255
256# undef PGM_SHW_TYPE
257# undef PGM_SHW_NAME
258
259
260/*
261 * Shadow - 32-bit nested paging mode.
262 */
263# define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
264# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
265# include "PGMAllShw.h"
266
267/* Guest - real mode */
268# define PGM_GST_TYPE PGM_TYPE_REAL
269# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
270# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
271# include "PGMGstDefs.h"
272# include "PGMAllBth.h"
273# undef PGM_BTH_NAME
274# undef PGM_GST_TYPE
275# undef PGM_GST_NAME
276
277/* Guest - protected mode */
278# define PGM_GST_TYPE PGM_TYPE_PROT
279# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
280# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
281# include "PGMGstDefs.h"
282# include "PGMAllBth.h"
283# undef PGM_BTH_NAME
284# undef PGM_GST_TYPE
285# undef PGM_GST_NAME
286
287/* Guest - 32-bit mode */
288# define PGM_GST_TYPE PGM_TYPE_32BIT
289# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
290# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
291# include "PGMGstDefs.h"
292# include "PGMAllBth.h"
293# undef PGM_BTH_NAME
294# undef PGM_GST_TYPE
295# undef PGM_GST_NAME
296
297/* Guest - PAE mode */
298# define PGM_GST_TYPE PGM_TYPE_PAE
299# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
300# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
301# include "PGMGstDefs.h"
302# include "PGMAllBth.h"
303# undef PGM_BTH_NAME
304# undef PGM_GST_TYPE
305# undef PGM_GST_NAME
306
307# ifdef VBOX_WITH_64_BITS_GUESTS
308/* Guest - AMD64 mode */
309# define PGM_GST_TYPE PGM_TYPE_AMD64
310# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
311# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
312# include "PGMGstDefs.h"
313# include "PGMAllBth.h"
314# undef PGM_BTH_NAME
315# undef PGM_GST_TYPE
316# undef PGM_GST_NAME
317# endif /* VBOX_WITH_64_BITS_GUESTS */
318
319# undef PGM_SHW_TYPE
320# undef PGM_SHW_NAME
321
322
323/*
324 * Shadow - PAE nested paging mode.
325 */
326# define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
327# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
328# include "PGMAllShw.h"
329
330/* Guest - real mode */
331# define PGM_GST_TYPE PGM_TYPE_REAL
332# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
333# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
334# include "PGMGstDefs.h"
335# include "PGMAllBth.h"
336# undef PGM_BTH_NAME
337# undef PGM_GST_TYPE
338# undef PGM_GST_NAME
339
340/* Guest - protected mode */
341# define PGM_GST_TYPE PGM_TYPE_PROT
342# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
343# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
344# include "PGMGstDefs.h"
345# include "PGMAllBth.h"
346# undef PGM_BTH_NAME
347# undef PGM_GST_TYPE
348# undef PGM_GST_NAME
349
350/* Guest - 32-bit mode */
351# define PGM_GST_TYPE PGM_TYPE_32BIT
352# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
353# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - PAE mode */
361# define PGM_GST_TYPE PGM_TYPE_PAE
362# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef PGM_BTH_NAME
367# undef PGM_GST_TYPE
368# undef PGM_GST_NAME
369
370# ifdef VBOX_WITH_64_BITS_GUESTS
371/* Guest - AMD64 mode */
372# define PGM_GST_TYPE PGM_TYPE_AMD64
373# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
374# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
375# include "PGMGstDefs.h"
376# include "PGMAllBth.h"
377# undef PGM_BTH_NAME
378# undef PGM_GST_TYPE
379# undef PGM_GST_NAME
380# endif /* VBOX_WITH_64_BITS_GUESTS */
381
382# undef PGM_SHW_TYPE
383# undef PGM_SHW_NAME
384
385
386/*
387 * Shadow - AMD64 nested paging mode.
388 */
389# define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
390# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
391# include "PGMAllShw.h"
392
393/* Guest - real mode */
394# define PGM_GST_TYPE PGM_TYPE_REAL
395# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
396# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
397# include "PGMGstDefs.h"
398# include "PGMAllBth.h"
399# undef PGM_BTH_NAME
400# undef PGM_GST_TYPE
401# undef PGM_GST_NAME
402
403/* Guest - protected mode */
404# define PGM_GST_TYPE PGM_TYPE_PROT
405# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
406# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
407# include "PGMGstDefs.h"
408# include "PGMAllBth.h"
409# undef PGM_BTH_NAME
410# undef PGM_GST_TYPE
411# undef PGM_GST_NAME
412
413/* Guest - 32-bit mode */
414# define PGM_GST_TYPE PGM_TYPE_32BIT
415# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
416# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
417# include "PGMGstDefs.h"
418# include "PGMAllBth.h"
419# undef PGM_BTH_NAME
420# undef PGM_GST_TYPE
421# undef PGM_GST_NAME
422
423/* Guest - PAE mode */
424# define PGM_GST_TYPE PGM_TYPE_PAE
425# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
426# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
427# include "PGMGstDefs.h"
428# include "PGMAllBth.h"
429# undef PGM_BTH_NAME
430# undef PGM_GST_TYPE
431# undef PGM_GST_NAME
432
433# ifdef VBOX_WITH_64_BITS_GUESTS
434/* Guest - AMD64 mode */
435# define PGM_GST_TYPE PGM_TYPE_AMD64
436# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
437# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
438# include "PGMGstDefs.h"
439# include "PGMAllBth.h"
440# undef PGM_BTH_NAME
441# undef PGM_GST_TYPE
442# undef PGM_GST_NAME
443# endif /* VBOX_WITH_64_BITS_GUESTS */
444
445# undef PGM_SHW_TYPE
446# undef PGM_SHW_NAME
447
448
449/*
450 * Shadow - EPT.
451 */
452# define PGM_SHW_TYPE PGM_TYPE_EPT
453# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
454# include "PGMAllShw.h"
455
456/* Guest - real mode */
457# define PGM_GST_TYPE PGM_TYPE_REAL
458# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
459# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
460# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
461# include "PGMGstDefs.h"
462# include "PGMAllBth.h"
463# undef BTH_PGMPOOLKIND_PT_FOR_PT
464# undef PGM_BTH_NAME
465# undef PGM_GST_TYPE
466# undef PGM_GST_NAME
467
468/* Guest - protected mode */
469# define PGM_GST_TYPE PGM_TYPE_PROT
470# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
471# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
472# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
473# include "PGMGstDefs.h"
474# include "PGMAllBth.h"
475# undef BTH_PGMPOOLKIND_PT_FOR_PT
476# undef PGM_BTH_NAME
477# undef PGM_GST_TYPE
478# undef PGM_GST_NAME
479
480/* Guest - 32-bit mode */
481# define PGM_GST_TYPE PGM_TYPE_32BIT
482# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
483# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
484# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
485# include "PGMGstDefs.h"
486# include "PGMAllBth.h"
487# undef BTH_PGMPOOLKIND_PT_FOR_PT
488# undef PGM_BTH_NAME
489# undef PGM_GST_TYPE
490# undef PGM_GST_NAME
491
492/* Guest - PAE mode */
493# define PGM_GST_TYPE PGM_TYPE_PAE
494# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
495# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
496# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
497# include "PGMGstDefs.h"
498# include "PGMAllBth.h"
499# undef BTH_PGMPOOLKIND_PT_FOR_PT
500# undef PGM_BTH_NAME
501# undef PGM_GST_TYPE
502# undef PGM_GST_NAME
503
504# ifdef VBOX_WITH_64_BITS_GUESTS
505/* Guest - AMD64 mode */
506# define PGM_GST_TYPE PGM_TYPE_AMD64
507# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
508# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
509# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
510# include "PGMGstDefs.h"
511# include "PGMAllBth.h"
512# undef BTH_PGMPOOLKIND_PT_FOR_PT
513# undef PGM_BTH_NAME
514# undef PGM_GST_TYPE
515# undef PGM_GST_NAME
516# endif /* VBOX_WITH_64_BITS_GUESTS */
517
518# undef PGM_SHW_TYPE
519# undef PGM_SHW_NAME
520
521#endif /* !IN_RC */
522
523
524/**
525 * Guest mode data array.
526 */
527PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
528{
529 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
530 {
531 PGM_TYPE_REAL,
532 PGM_GST_NAME_REAL(GetPage),
533 PGM_GST_NAME_REAL(ModifyPage),
534 PGM_GST_NAME_REAL(GetPDE),
535 PGM_GST_NAME_REAL(Enter),
536 PGM_GST_NAME_REAL(Exit),
537#ifdef IN_RING3
538 PGM_GST_NAME_REAL(Relocate),
539#endif
540 },
541 {
542 PGM_TYPE_PROT,
543 PGM_GST_NAME_PROT(GetPage),
544 PGM_GST_NAME_PROT(ModifyPage),
545 PGM_GST_NAME_PROT(GetPDE),
546 PGM_GST_NAME_PROT(Enter),
547 PGM_GST_NAME_PROT(Exit),
548#ifdef IN_RING3
549 PGM_GST_NAME_PROT(Relocate),
550#endif
551 },
552 {
553 PGM_TYPE_32BIT,
554 PGM_GST_NAME_32BIT(GetPage),
555 PGM_GST_NAME_32BIT(ModifyPage),
556 PGM_GST_NAME_32BIT(GetPDE),
557 PGM_GST_NAME_32BIT(Enter),
558 PGM_GST_NAME_32BIT(Exit),
559#ifdef IN_RING3
560 PGM_GST_NAME_32BIT(Relocate),
561#endif
562 },
563 {
564 PGM_TYPE_PAE,
565 PGM_GST_NAME_PAE(GetPage),
566 PGM_GST_NAME_PAE(ModifyPage),
567 PGM_GST_NAME_PAE(GetPDE),
568 PGM_GST_NAME_PAE(Enter),
569 PGM_GST_NAME_PAE(Exit),
570#ifdef IN_RING3
571 PGM_GST_NAME_PAE(Relocate),
572#endif
573 },
574#if defined(VBOX_WITH_64_BITS_GUESTS) && !defined(IN_RC)
575 {
576 PGM_TYPE_AMD64,
577 PGM_GST_NAME_AMD64(GetPage),
578 PGM_GST_NAME_AMD64(ModifyPage),
579 PGM_GST_NAME_AMD64(GetPDE),
580 PGM_GST_NAME_AMD64(Enter),
581 PGM_GST_NAME_AMD64(Exit),
582# ifdef IN_RING3
583 PGM_GST_NAME_AMD64(Relocate),
584# endif
585 },
586#endif
587};
588
589
590/**
591 * The shadow mode data array.
592 */
593PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
594{
595 { UINT8_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
596 { UINT8_MAX, NULL, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
597 { UINT8_MAX, NULL, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
598 {
599 PGM_TYPE_32BIT,
600 PGM_SHW_NAME_32BIT(GetPage),
601 PGM_SHW_NAME_32BIT(ModifyPage),
602 PGM_SHW_NAME_32BIT(Enter),
603 PGM_SHW_NAME_32BIT(Exit),
604#ifdef IN_RING3
605 PGM_SHW_NAME_32BIT(Relocate),
606#endif
607 },
608 {
609 PGM_TYPE_PAE,
610 PGM_SHW_NAME_PAE(GetPage),
611 PGM_SHW_NAME_PAE(ModifyPage),
612 PGM_SHW_NAME_PAE(Enter),
613 PGM_SHW_NAME_PAE(Exit),
614#ifdef IN_RING3
615 PGM_SHW_NAME_PAE(Relocate),
616#endif
617 },
618#ifndef IN_RC
619 {
620 PGM_TYPE_AMD64,
621 PGM_SHW_NAME_AMD64(GetPage),
622 PGM_SHW_NAME_AMD64(ModifyPage),
623 PGM_SHW_NAME_AMD64(Enter),
624 PGM_SHW_NAME_AMD64(Exit),
625# ifdef IN_RING3
626 PGM_SHW_NAME_AMD64(Relocate),
627# endif
628 },
629 {
630 PGM_TYPE_NESTED_32BIT,
631 PGM_SHW_NAME_NESTED_32BIT(GetPage),
632 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
633 PGM_SHW_NAME_NESTED_32BIT(Enter),
634 PGM_SHW_NAME_NESTED_32BIT(Exit),
635# ifdef IN_RING3
636 PGM_SHW_NAME_NESTED_32BIT(Relocate),
637# endif
638 },
639 {
640 PGM_TYPE_NESTED_PAE,
641 PGM_SHW_NAME_NESTED_PAE(GetPage),
642 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
643 PGM_SHW_NAME_NESTED_PAE(Enter),
644 PGM_SHW_NAME_NESTED_PAE(Exit),
645# ifdef IN_RING3
646 PGM_SHW_NAME_NESTED_PAE(Relocate),
647# endif
648 },
649 {
650 PGM_TYPE_NESTED_AMD64,
651 PGM_SHW_NAME_NESTED_AMD64(GetPage),
652 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
653 PGM_SHW_NAME_NESTED_AMD64(Enter),
654 PGM_SHW_NAME_NESTED_AMD64(Exit),
655# ifdef IN_RING3
656 PGM_SHW_NAME_NESTED_AMD64(Relocate),
657# endif
658 },
659 {
660 PGM_TYPE_EPT,
661 PGM_SHW_NAME_EPT(GetPage),
662 PGM_SHW_NAME_EPT(ModifyPage),
663 PGM_SHW_NAME_EPT(Enter),
664 PGM_SHW_NAME_EPT(Exit),
665# ifdef IN_RING3
666 PGM_SHW_NAME_EPT(Relocate),
667# endif
668 },
669#endif /* IN_RC */
670};
671
672
673/**
674 * The guest+shadow mode data array.
675 */
676PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
677{
678#if !defined(IN_RING3) && !defined(VBOX_STRICT)
679# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
680# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
681 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
682
683#elif !defined(IN_RING3) && defined(VBOX_STRICT)
684# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
685# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
686 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
687
688#elif defined(IN_RING3) && !defined(VBOX_STRICT)
689# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
690# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
691 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Relocate), }
692
693#elif defined(IN_RING3) && defined(VBOX_STRICT)
694# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
695# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
696 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Relocate), Nm(AssertCR3) }
697
698#else
699# error "Misconfig."
700#endif
701
702 /* 32-bit shadow paging mode: */
703 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
704 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
705 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
706 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
707 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
708 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
709 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
710 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
711 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
712 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
713
714 /* PAE shadow paging mode: */
715 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
716 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
717 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
718 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
719 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
720 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
721 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
722 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
723 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
724 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
725
726#ifndef IN_RC
727 /* AMD64 shadow paging mode: */
728 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
729 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
730 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
731 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
732 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
733# ifdef VBOX_WITH_64_BITS_GUESTS
734 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
735# else
736 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
737# endif
738 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
739 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
740 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
741 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
742
743 /* 32-bit nested paging mode: */
744 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
745 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
746 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
747 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
748 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
749# ifdef VBOX_WITH_64_BITS_GUESTS
750 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
751# else
752 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
753# endif
754 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
755 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
756 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
757 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
758
759 /* PAE nested paging mode: */
760 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
762 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
763 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
764 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
765# ifdef VBOX_WITH_64_BITS_GUESTS
766 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
767# else
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
769# endif
770 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
771 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
772 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
773 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
774
775 /* AMD64 nested paging mode: */
776 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
778 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
779 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
780 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
781# ifdef VBOX_WITH_64_BITS_GUESTS
782 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
783# else
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
785# endif
786 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
787 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
788 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
789 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
790
791 /* EPT nested paging mode: */
792 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
793 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
794 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
795 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
796 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
797# ifdef VBOX_WITH_64_BITS_GUESTS
798 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
799# else
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
801# endif
802 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
803 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
804 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
805 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
806
807#endif /* IN_RC */
808
809#undef PGMMODEDATABTH_ENTRY
810#undef PGMMODEDATABTH_NULL_ENTRY
811};
812
813
814#ifndef IN_RING3
815/**
816 * #PF Handler.
817 *
818 * @returns VBox status code (appropriate for trap handling and GC return).
819 * @param pVCpu The cross context virtual CPU structure.
820 * @param uErr The trap error code.
821 * @param pRegFrame Trap register frame.
822 * @param pvFault The fault address.
823 */
824VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
825{
826 PVM pVM = pVCpu->CTX_SUFF(pVM);
827
828 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
829 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
830 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
831
832
833#ifdef VBOX_WITH_STATISTICS
834 /*
835 * Error code stats.
836 */
837 if (uErr & X86_TRAP_PF_US)
838 {
839 if (!(uErr & X86_TRAP_PF_P))
840 {
841 if (uErr & X86_TRAP_PF_RW)
842 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
843 else
844 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
845 }
846 else if (uErr & X86_TRAP_PF_RW)
847 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
848 else if (uErr & X86_TRAP_PF_RSVD)
849 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
850 else if (uErr & X86_TRAP_PF_ID)
851 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
852 else
853 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
854 }
855 else
856 { /* Supervisor */
857 if (!(uErr & X86_TRAP_PF_P))
858 {
859 if (uErr & X86_TRAP_PF_RW)
860 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
861 else
862 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
863 }
864 else if (uErr & X86_TRAP_PF_RW)
865 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
866 else if (uErr & X86_TRAP_PF_ID)
867 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
868 else if (uErr & X86_TRAP_PF_RSVD)
869 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
870 }
871#endif /* VBOX_WITH_STATISTICS */
872
873 /*
874 * Call the worker.
875 */
876 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
877 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
878 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
879 bool fLockTaken = false;
880 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
881 if (fLockTaken)
882 {
883 PGM_LOCK_ASSERT_OWNER(pVM);
884 pgmUnlock(pVM);
885 }
886 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
887
888 /*
889 * Return code tweaks.
890 */
891 if (rc != VINF_SUCCESS)
892 {
893 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
894 rc = VINF_SUCCESS;
895
896# ifdef IN_RING0
897 /* Note: hack alert for difficult to reproduce problem. */
898 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
899 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
900 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
901 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
902 {
903 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
904 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
905 rc = VINF_SUCCESS;
906 }
907# endif
908 }
909
910 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
911 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
912 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
913 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
914 return rc;
915}
916#endif /* !IN_RING3 */
917
918
919/**
920 * Prefetch a page
921 *
922 * Typically used to sync commonly used pages before entering raw mode
923 * after a CR3 reload.
924 *
925 * @returns VBox status code suitable for scheduling.
926 * @retval VINF_SUCCESS on success.
927 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
928 * @param pVCpu The cross context virtual CPU structure.
929 * @param GCPtrPage Page to invalidate.
930 */
931VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
932{
933 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
934
935 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
936 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
937 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
938 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
939
940 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
941 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
942 return rc;
943}
944
945
946/**
947 * Gets the mapping corresponding to the specified address (if any).
948 *
949 * @returns Pointer to the mapping.
950 * @returns NULL if not
951 *
952 * @param pVM The cross context VM structure.
953 * @param GCPtr The guest context pointer.
954 */
955PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
956{
957 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
958 while (pMapping)
959 {
960 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
961 break;
962 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
963 return pMapping;
964 pMapping = pMapping->CTX_SUFF(pNext);
965 }
966 return NULL;
967}
968
969
970/**
971 * Verifies a range of pages for read or write access
972 *
973 * Only checks the guest's page tables
974 *
975 * @returns VBox status code.
976 * @param pVCpu The cross context virtual CPU structure.
977 * @param Addr Guest virtual address to check
978 * @param cbSize Access size
979 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
980 * @remarks Current not in use.
981 */
982VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
983{
984 /*
985 * Validate input.
986 */
987 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
988 {
989 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
990 return VERR_INVALID_PARAMETER;
991 }
992
993 uint64_t fPage;
994 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
995 if (RT_FAILURE(rc))
996 {
997 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
998 return VINF_EM_RAW_GUEST_TRAP;
999 }
1000
1001 /*
1002 * Check if the access would cause a page fault
1003 *
1004 * Note that hypervisor page directories are not present in the guest's tables, so this check
1005 * is sufficient.
1006 */
1007 bool fWrite = !!(fAccess & X86_PTE_RW);
1008 bool fUser = !!(fAccess & X86_PTE_US);
1009 if ( !(fPage & X86_PTE_P)
1010 || (fWrite && !(fPage & X86_PTE_RW))
1011 || (fUser && !(fPage & X86_PTE_US)) )
1012 {
1013 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
1014 return VINF_EM_RAW_GUEST_TRAP;
1015 }
1016 if ( RT_SUCCESS(rc)
1017 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
1018 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
1019 return rc;
1020}
1021
1022
1023/**
1024 * Verifies a range of pages for read or write access
1025 *
1026 * Supports handling of pages marked for dirty bit tracking and CSAM
1027 *
1028 * @returns VBox status code.
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param Addr Guest virtual address to check
1031 * @param cbSize Access size
1032 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1033 */
1034VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1035{
1036 PVM pVM = pVCpu->CTX_SUFF(pVM);
1037
1038 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
1039
1040 /*
1041 * Get going.
1042 */
1043 uint64_t fPageGst;
1044 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
1045 if (RT_FAILURE(rc))
1046 {
1047 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
1048 return VINF_EM_RAW_GUEST_TRAP;
1049 }
1050
1051 /*
1052 * Check if the access would cause a page fault
1053 *
1054 * Note that hypervisor page directories are not present in the guest's tables, so this check
1055 * is sufficient.
1056 */
1057 const bool fWrite = !!(fAccess & X86_PTE_RW);
1058 const bool fUser = !!(fAccess & X86_PTE_US);
1059 if ( !(fPageGst & X86_PTE_P)
1060 || (fWrite && !(fPageGst & X86_PTE_RW))
1061 || (fUser && !(fPageGst & X86_PTE_US)) )
1062 {
1063 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
1064 return VINF_EM_RAW_GUEST_TRAP;
1065 }
1066
1067 if (!pVM->pgm.s.fNestedPaging)
1068 {
1069 /*
1070 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
1071 */
1072 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
1073 if ( rc == VERR_PAGE_NOT_PRESENT
1074 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1075 {
1076 /*
1077 * Page is not present in our page tables.
1078 * Try to sync it!
1079 */
1080 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
1081 uint32_t const uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
1082 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1083 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1084 AssertReturn(g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
1085 rc = g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage(pVCpu, Addr, fPageGst, uErr);
1086 if (rc != VINF_SUCCESS)
1087 return rc;
1088 }
1089 else
1090 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
1091 }
1092
1093#if 0 /* def VBOX_STRICT; triggers too often now */
1094 /*
1095 * This check is a bit paranoid, but useful.
1096 */
1097 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
1098 uint64_t fPageShw;
1099 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
1100 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
1101 || (fWrite && !(fPageShw & X86_PTE_RW))
1102 || (fUser && !(fPageShw & X86_PTE_US)) )
1103 {
1104 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
1105 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
1106 return VINF_EM_RAW_GUEST_TRAP;
1107 }
1108#endif
1109
1110 if ( RT_SUCCESS(rc)
1111 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
1112 || Addr + cbSize < Addr))
1113 {
1114 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
1115 for (;;)
1116 {
1117 Addr += PAGE_SIZE;
1118 if (cbSize > PAGE_SIZE)
1119 cbSize -= PAGE_SIZE;
1120 else
1121 cbSize = 1;
1122 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
1123 if (rc != VINF_SUCCESS)
1124 break;
1125 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
1126 break;
1127 }
1128 }
1129 return rc;
1130}
1131
1132
1133/**
1134 * Emulation of the invlpg instruction (HC only actually).
1135 *
1136 * @returns Strict VBox status code, special care required.
1137 * @retval VINF_PGM_SYNC_CR3 - handled.
1138 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1139 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1140 *
1141 * @param pVCpu The cross context virtual CPU structure.
1142 * @param GCPtrPage Page to invalidate.
1143 *
1144 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1145 * safe, but there could be edge cases!
1146 *
1147 * @todo Flush page or page directory only if necessary!
1148 * @todo VBOXSTRICTRC
1149 */
1150VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
1151{
1152 PVM pVM = pVCpu->CTX_SUFF(pVM);
1153 int rc;
1154 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1155
1156#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
1157 /*
1158 * Notify the recompiler so it can record this instruction.
1159 */
1160 REMNotifyInvalidatePage(pVM, GCPtrPage);
1161#endif /* !IN_RING3 */
1162 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1163
1164
1165#ifdef IN_RC
1166 /*
1167 * Check for conflicts and pending CR3 monitoring updates.
1168 */
1169 if (pgmMapAreMappingsFloating(pVM))
1170 {
1171 if ( pgmGetMapping(pVM, GCPtrPage)
1172 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
1173 {
1174 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
1175 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1176 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
1177 return VINF_PGM_SYNC_CR3;
1178 }
1179
1180 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1181 {
1182 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
1183 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
1184 return VINF_EM_RAW_EMULATE_INSTR;
1185 }
1186 }
1187#endif /* IN_RC */
1188
1189 /*
1190 * Call paging mode specific worker.
1191 */
1192 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1193 pgmLock(pVM);
1194
1195 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1196 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1197 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1198 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1199
1200 pgmUnlock(pVM);
1201 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1202
1203#ifdef IN_RING3
1204 /*
1205 * Check if we have a pending update of the CR3 monitoring.
1206 */
1207 if ( RT_SUCCESS(rc)
1208 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
1209 {
1210 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1211 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
1212 }
1213
1214# ifdef VBOX_WITH_RAW_MODE
1215 /*
1216 * Inform CSAM about the flush
1217 *
1218 * Note: This is to check if monitored pages have been changed; when we implement
1219 * callbacks for virtual handlers, this is no longer required.
1220 */
1221 CSAMR3FlushPage(pVM, GCPtrPage);
1222# endif
1223#endif /* IN_RING3 */
1224
1225 /* Ignore all irrelevant error codes. */
1226 if ( rc == VERR_PAGE_NOT_PRESENT
1227 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1228 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1229 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1230 rc = VINF_SUCCESS;
1231
1232 return rc;
1233}
1234
1235
1236/**
1237 * Executes an instruction using the interpreter.
1238 *
1239 * @returns VBox status code (appropriate for trap handling and GC return).
1240 * @param pVM The cross context VM structure.
1241 * @param pVCpu The cross context virtual CPU structure.
1242 * @param pRegFrame Register frame.
1243 * @param pvFault Fault address.
1244 */
1245VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1246{
1247 NOREF(pVM);
1248 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1249 if (rc == VERR_EM_INTERPRETER)
1250 rc = VINF_EM_RAW_EMULATE_INSTR;
1251 if (rc != VINF_SUCCESS)
1252 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1253 return rc;
1254}
1255
1256
1257/**
1258 * Gets effective page information (from the VMM page directory).
1259 *
1260 * @returns VBox status code.
1261 * @param pVCpu The cross context virtual CPU structure.
1262 * @param GCPtr Guest Context virtual address of the page.
1263 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1264 * @param pHCPhys Where to store the HC physical address of the page.
1265 * This is page aligned.
1266 * @remark You should use PGMMapGetPage() for pages in a mapping.
1267 */
1268VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1269{
1270 PVM pVM = pVCpu->CTX_SUFF(pVM);
1271 pgmLock(pVM);
1272
1273 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1274 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1275 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1276 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1277
1278 pgmUnlock(pVM);
1279 return rc;
1280}
1281
1282
1283/**
1284 * Modify page flags for a range of pages in the shadow context.
1285 *
1286 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1287 *
1288 * @returns VBox status code.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 * @param GCPtr Virtual address of the first page in the range.
1291 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1292 * @param fMask The AND mask - page flags X86_PTE_*.
1293 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1294 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1295 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1296 */
1297DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1298{
1299 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1300 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1301
1302 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1303
1304 PVM pVM = pVCpu->CTX_SUFF(pVM);
1305 pgmLock(pVM);
1306
1307 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1308 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1309 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1310 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1311
1312 pgmUnlock(pVM);
1313 return rc;
1314}
1315
1316
1317/**
1318 * Changing the page flags for a single page in the shadow page tables so as to
1319 * make it read-only.
1320 *
1321 * @returns VBox status code.
1322 * @param pVCpu The cross context virtual CPU structure.
1323 * @param GCPtr Virtual address of the first page in the range.
1324 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1325 */
1326VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1327{
1328 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1329}
1330
1331
1332/**
1333 * Changing the page flags for a single page in the shadow page tables so as to
1334 * make it writable.
1335 *
1336 * The call must know with 101% certainty that the guest page tables maps this
1337 * as writable too. This function will deal shared, zero and write monitored
1338 * pages.
1339 *
1340 * @returns VBox status code.
1341 * @param pVCpu The cross context virtual CPU structure.
1342 * @param GCPtr Virtual address of the first page in the range.
1343 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1344 */
1345VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1346{
1347 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1348}
1349
1350
1351/**
1352 * Changing the page flags for a single page in the shadow page tables so as to
1353 * make it not present.
1354 *
1355 * @returns VBox status code.
1356 * @param pVCpu The cross context virtual CPU structure.
1357 * @param GCPtr Virtual address of the first page in the range.
1358 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1359 */
1360VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1361{
1362 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1363}
1364
1365
1366/**
1367 * Changing the page flags for a single page in the shadow page tables so as to
1368 * make it supervisor and writable.
1369 *
1370 * This if for dealing with CR0.WP=0 and readonly user pages.
1371 *
1372 * @returns VBox status code.
1373 * @param pVCpu The cross context virtual CPU structure.
1374 * @param GCPtr Virtual address of the first page in the range.
1375 * @param fBigPage Whether or not this is a big page. If it is, we have to
1376 * change the shadow PDE as well. If it isn't, the caller
1377 * has checked that the shadow PDE doesn't need changing.
1378 * We ASSUME 4KB pages backing the big page here!
1379 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1380 */
1381int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1382{
1383 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1384 if (rc == VINF_SUCCESS && fBigPage)
1385 {
1386 /* this is a bit ugly... */
1387 switch (pVCpu->pgm.s.enmShadowMode)
1388 {
1389 case PGMMODE_32_BIT:
1390 {
1391 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1392 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1393 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1394 pPde->n.u1Write = 1;
1395 Log(("-> PDE=%#llx (32)\n", pPde->u));
1396 break;
1397 }
1398 case PGMMODE_PAE:
1399 case PGMMODE_PAE_NX:
1400 {
1401 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1402 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1403 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1404 pPde->n.u1Write = 1;
1405 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1406 break;
1407 }
1408 default:
1409 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1410 }
1411 }
1412 return rc;
1413}
1414
1415
1416/**
1417 * Gets the shadow page directory for the specified address, PAE.
1418 *
1419 * @returns Pointer to the shadow PD.
1420 * @param pVCpu The cross context virtual CPU structure.
1421 * @param GCPtr The address.
1422 * @param uGstPdpe Guest PDPT entry. Valid.
1423 * @param ppPD Receives address of page directory
1424 */
1425int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1426{
1427 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1428 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1429 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1430 PVM pVM = pVCpu->CTX_SUFF(pVM);
1431 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1432 PPGMPOOLPAGE pShwPage;
1433 int rc;
1434
1435 PGM_LOCK_ASSERT_OWNER(pVM);
1436
1437 /* Allocate page directory if not present. */
1438 if ( !pPdpe->n.u1Present
1439 && !(pPdpe->u & X86_PDPE_PG_MASK))
1440 {
1441 RTGCPTR64 GCPdPt;
1442 PGMPOOLKIND enmKind;
1443
1444 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1445 {
1446 /* AMD-V nested paging or real/protected mode without paging. */
1447 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1448 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1449 }
1450 else
1451 {
1452 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1453 {
1454 if (!(uGstPdpe & X86_PDPE_P))
1455 {
1456 /* PD not present; guest must reload CR3 to change it.
1457 * No need to monitor anything in this case.
1458 */
1459 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1460
1461 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1462 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1463 uGstPdpe |= X86_PDPE_P;
1464 }
1465 else
1466 {
1467 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1468 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1469 }
1470 }
1471 else
1472 {
1473 GCPdPt = CPUMGetGuestCR3(pVCpu);
1474 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1475 }
1476 }
1477
1478 /* Create a reference back to the PDPT by using the index in its shadow page. */
1479 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1480 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1481 &pShwPage);
1482 AssertRCReturn(rc, rc);
1483
1484 /* The PD was cached or created; hook it up now. */
1485 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1486
1487# if defined(IN_RC)
1488 /*
1489 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1490 * PDPT entry; the CPU fetches them only during cr3 load, so any
1491 * non-present PDPT will continue to cause page faults.
1492 */
1493 ASMReloadCR3();
1494# endif
1495 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1496 }
1497 else
1498 {
1499 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1500 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1501 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1502
1503 pgmPoolCacheUsed(pPool, pShwPage);
1504 }
1505 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1506 return VINF_SUCCESS;
1507}
1508
1509
1510/**
1511 * Gets the pointer to the shadow page directory entry for an address, PAE.
1512 *
1513 * @returns Pointer to the PDE.
1514 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1515 * @param GCPtr The address.
1516 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1517 */
1518DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1519{
1520 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1521 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1522 PVM pVM = pVCpu->CTX_SUFF(pVM);
1523
1524 PGM_LOCK_ASSERT_OWNER(pVM);
1525
1526 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1527 if (!pPdpt->a[iPdPt].n.u1Present)
1528 {
1529 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1530 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1531 }
1532 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1533
1534 /* Fetch the pgm pool shadow descriptor. */
1535 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1536 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1537
1538 *ppShwPde = pShwPde;
1539 return VINF_SUCCESS;
1540}
1541
1542#ifndef IN_RC
1543
1544/**
1545 * Syncs the SHADOW page directory pointer for the specified address.
1546 *
1547 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1548 *
1549 * The caller is responsible for making sure the guest has a valid PD before
1550 * calling this function.
1551 *
1552 * @returns VBox status code.
1553 * @param pVCpu The cross context virtual CPU structure.
1554 * @param GCPtr The address.
1555 * @param uGstPml4e Guest PML4 entry (valid).
1556 * @param uGstPdpe Guest PDPT entry (valid).
1557 * @param ppPD Receives address of page directory
1558 */
1559static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1560{
1561 PVM pVM = pVCpu->CTX_SUFF(pVM);
1562 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1563 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1564 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1565 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1566 PPGMPOOLPAGE pShwPage;
1567 int rc;
1568
1569 PGM_LOCK_ASSERT_OWNER(pVM);
1570
1571 /* Allocate page directory pointer table if not present. */
1572 if ( !pPml4e->n.u1Present
1573 && !(pPml4e->u & X86_PML4E_PG_MASK))
1574 {
1575 RTGCPTR64 GCPml4;
1576 PGMPOOLKIND enmKind;
1577
1578 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1579
1580 if (fNestedPagingOrNoGstPaging)
1581 {
1582 /* AMD-V nested paging or real/protected mode without paging */
1583 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1584 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1585 }
1586 else
1587 {
1588 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1589 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1590 }
1591
1592 /* Create a reference back to the PDPT by using the index in its shadow page. */
1593 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1594 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1595 &pShwPage);
1596 AssertRCReturn(rc, rc);
1597 }
1598 else
1599 {
1600 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1601 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1602
1603 pgmPoolCacheUsed(pPool, pShwPage);
1604 }
1605 /* The PDPT was cached or created; hook it up now. */
1606 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1607
1608 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1609 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1610 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1611
1612 /* Allocate page directory if not present. */
1613 if ( !pPdpe->n.u1Present
1614 && !(pPdpe->u & X86_PDPE_PG_MASK))
1615 {
1616 RTGCPTR64 GCPdPt;
1617 PGMPOOLKIND enmKind;
1618
1619 if (fNestedPagingOrNoGstPaging)
1620 {
1621 /* AMD-V nested paging or real/protected mode without paging */
1622 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1623 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1624 }
1625 else
1626 {
1627 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1628 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1629 }
1630
1631 /* Create a reference back to the PDPT by using the index in its shadow page. */
1632 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1633 pShwPage->idx, iPdPt, false /*fLockPage*/,
1634 &pShwPage);
1635 AssertRCReturn(rc, rc);
1636 }
1637 else
1638 {
1639 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1640 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1641
1642 pgmPoolCacheUsed(pPool, pShwPage);
1643 }
1644 /* The PD was cached or created; hook it up now. */
1645 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1646
1647 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1648 return VINF_SUCCESS;
1649}
1650
1651
1652/**
1653 * Gets the SHADOW page directory pointer for the specified address (long mode).
1654 *
1655 * @returns VBox status code.
1656 * @param pVCpu The cross context virtual CPU structure.
1657 * @param GCPtr The address.
1658 * @param ppPdpt Receives address of pdpt
1659 * @param ppPD Receives address of page directory
1660 */
1661DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1662{
1663 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1664 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1665
1666 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1667
1668 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1669 if (ppPml4e)
1670 *ppPml4e = (PX86PML4E)pPml4e;
1671
1672 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1673
1674 if (!pPml4e->n.u1Present)
1675 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1676
1677 PVM pVM = pVCpu->CTX_SUFF(pVM);
1678 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1679 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1680 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1681
1682 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1683 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1684 if (!pPdpt->a[iPdPt].n.u1Present)
1685 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1686
1687 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1688 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1689
1690 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1691 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1692 return VINF_SUCCESS;
1693}
1694
1695
1696/**
1697 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1698 * backing pages in case the PDPT or PML4 entry is missing.
1699 *
1700 * @returns VBox status code.
1701 * @param pVCpu The cross context virtual CPU structure.
1702 * @param GCPtr The address.
1703 * @param ppPdpt Receives address of pdpt
1704 * @param ppPD Receives address of page directory
1705 */
1706static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1707{
1708 PVM pVM = pVCpu->CTX_SUFF(pVM);
1709 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1710 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1711 PEPTPML4 pPml4;
1712 PEPTPML4E pPml4e;
1713 PPGMPOOLPAGE pShwPage;
1714 int rc;
1715
1716 Assert(pVM->pgm.s.fNestedPaging);
1717 PGM_LOCK_ASSERT_OWNER(pVM);
1718
1719 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1720 Assert(pPml4);
1721
1722 /* Allocate page directory pointer table if not present. */
1723 pPml4e = &pPml4->a[iPml4];
1724 if ( !pPml4e->n.u1Present
1725 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1726 {
1727 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1728 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1729
1730 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1731 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1732 &pShwPage);
1733 AssertRCReturn(rc, rc);
1734 }
1735 else
1736 {
1737 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1738 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1739
1740 pgmPoolCacheUsed(pPool, pShwPage);
1741 }
1742 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1743 pPml4e->u = pShwPage->Core.Key;
1744 pPml4e->n.u1Present = 1;
1745 pPml4e->n.u1Write = 1;
1746 pPml4e->n.u1Execute = 1;
1747
1748 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1749 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1750 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1751
1752 if (ppPdpt)
1753 *ppPdpt = pPdpt;
1754
1755 /* Allocate page directory if not present. */
1756 if ( !pPdpe->n.u1Present
1757 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1758 {
1759 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1760 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1761 pShwPage->idx, iPdPt, false /*fLockPage*/,
1762 &pShwPage);
1763 AssertRCReturn(rc, rc);
1764 }
1765 else
1766 {
1767 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1768 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1769
1770 pgmPoolCacheUsed(pPool, pShwPage);
1771 }
1772 /* The PD was cached or created; hook it up now and fill with the default value. */
1773 pPdpe->u = pShwPage->Core.Key;
1774 pPdpe->n.u1Present = 1;
1775 pPdpe->n.u1Write = 1;
1776 pPdpe->n.u1Execute = 1;
1777
1778 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1779 return VINF_SUCCESS;
1780}
1781
1782#endif /* IN_RC */
1783
1784#ifdef IN_RING0
1785/**
1786 * Synchronizes a range of nested page table entries.
1787 *
1788 * The caller must own the PGM lock.
1789 *
1790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1791 * @param GCPhys Where to start.
1792 * @param cPages How many pages which entries should be synced.
1793 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1794 * host paging mode for AMD-V).
1795 */
1796int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1797{
1798 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1799
1800/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1801 int rc;
1802 switch (enmShwPagingMode)
1803 {
1804 case PGMMODE_32_BIT:
1805 {
1806 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1807 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1808 break;
1809 }
1810
1811 case PGMMODE_PAE:
1812 case PGMMODE_PAE_NX:
1813 {
1814 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1815 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1816 break;
1817 }
1818
1819 case PGMMODE_AMD64:
1820 case PGMMODE_AMD64_NX:
1821 {
1822 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1823 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1824 break;
1825 }
1826
1827 case PGMMODE_EPT:
1828 {
1829 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1830 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1831 break;
1832 }
1833
1834 default:
1835 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1836 }
1837 return rc;
1838}
1839#endif /* IN_RING0 */
1840
1841
1842/**
1843 * Gets effective Guest OS page information.
1844 *
1845 * When GCPtr is in a big page, the function will return as if it was a normal
1846 * 4KB page. If the need for distinguishing between big and normal page becomes
1847 * necessary at a later point, a PGMGstGetPage() will be created for that
1848 * purpose.
1849 *
1850 * @returns VBox status code.
1851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1852 * @param GCPtr Guest Context virtual address of the page.
1853 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1854 * @param pGCPhys Where to store the GC physical address of the page.
1855 * This is page aligned. The fact that the
1856 */
1857VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1858{
1859 VMCPU_ASSERT_EMT(pVCpu);
1860 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1861 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1862 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1863 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1864}
1865
1866
1867/**
1868 * Performs a guest page table walk.
1869 *
1870 * The guest should be in paged protect mode or long mode when making a call to
1871 * this function.
1872 *
1873 * @returns VBox status code.
1874 * @retval VINF_SUCCESS on success.
1875 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1876 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1877 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1878 *
1879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1880 * @param GCPtr The guest virtual address to walk by.
1881 * @param pWalk Where to return the walk result. This is valid for some
1882 * error codes as well.
1883 */
1884int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1885{
1886 VMCPU_ASSERT_EMT(pVCpu);
1887 switch (pVCpu->pgm.s.enmGuestMode)
1888 {
1889 case PGMMODE_32_BIT:
1890 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1891 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1892
1893 case PGMMODE_PAE:
1894 case PGMMODE_PAE_NX:
1895 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1896 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1897
1898#if !defined(IN_RC)
1899 case PGMMODE_AMD64:
1900 case PGMMODE_AMD64_NX:
1901 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1902 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1903#endif
1904
1905 case PGMMODE_REAL:
1906 case PGMMODE_PROTECTED:
1907 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1908 return VERR_PGM_NOT_USED_IN_MODE;
1909
1910#if defined(IN_RC)
1911 case PGMMODE_AMD64:
1912 case PGMMODE_AMD64_NX:
1913#endif
1914 case PGMMODE_NESTED_32BIT:
1915 case PGMMODE_NESTED_PAE:
1916 case PGMMODE_NESTED_AMD64:
1917 case PGMMODE_EPT:
1918 default:
1919 AssertFailed();
1920 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1921 return VERR_PGM_NOT_USED_IN_MODE;
1922 }
1923}
1924
1925
1926/**
1927 * Tries to continue the previous walk.
1928 *
1929 * @note Requires the caller to hold the PGM lock from the first
1930 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1931 * we cannot use the pointers.
1932 *
1933 * @returns VBox status code.
1934 * @retval VINF_SUCCESS on success.
1935 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1936 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1937 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1938 *
1939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1940 * @param GCPtr The guest virtual address to walk by.
1941 * @param pWalk Pointer to the previous walk result and where to return
1942 * the result of this walk. This is valid for some error
1943 * codes as well.
1944 */
1945int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1946{
1947 /*
1948 * We can only handle successfully walks.
1949 * We also limit ourselves to the next page.
1950 */
1951 if ( pWalk->u.Core.fSucceeded
1952 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
1953 {
1954 Assert(pWalk->u.Core.uLevel == 0);
1955 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1956 {
1957 /*
1958 * AMD64
1959 */
1960 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
1961 {
1962 /*
1963 * We fall back to full walk if the PDE table changes, if any
1964 * reserved bits are set, or if the effective page access changes.
1965 */
1966 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1967 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1968 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1969 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1970
1971 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
1972 {
1973 if (pWalk->u.Amd64.pPte)
1974 {
1975 X86PTEPAE Pte;
1976 Pte.u = pWalk->u.Amd64.pPte[1].u;
1977 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1978 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1979 {
1980
1981 pWalk->u.Core.GCPtr = GCPtr;
1982 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1983 pWalk->u.Amd64.Pte.u = Pte.u;
1984 pWalk->u.Amd64.pPte++;
1985 return VINF_SUCCESS;
1986 }
1987 }
1988 }
1989 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
1990 {
1991 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1992 if (pWalk->u.Amd64.pPde)
1993 {
1994 X86PDEPAE Pde;
1995 Pde.u = pWalk->u.Amd64.pPde[1].u;
1996 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
1997 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1998 {
1999 /* Get the new PTE and check out the first entry. */
2000 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2001 &pWalk->u.Amd64.pPt);
2002 if (RT_SUCCESS(rc))
2003 {
2004 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
2005 X86PTEPAE Pte;
2006 Pte.u = pWalk->u.Amd64.pPte->u;
2007 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2008 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2009 {
2010 pWalk->u.Core.GCPtr = GCPtr;
2011 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2012 pWalk->u.Amd64.Pte.u = Pte.u;
2013 pWalk->u.Amd64.Pde.u = Pde.u;
2014 pWalk->u.Amd64.pPde++;
2015 return VINF_SUCCESS;
2016 }
2017 }
2018 }
2019 }
2020 }
2021 }
2022 else if (!pWalk->u.Core.fGigantPage)
2023 {
2024 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
2025 {
2026 pWalk->u.Core.GCPtr = GCPtr;
2027 pWalk->u.Core.GCPhys += PAGE_SIZE;
2028 return VINF_SUCCESS;
2029 }
2030 }
2031 else
2032 {
2033 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
2034 {
2035 pWalk->u.Core.GCPtr = GCPtr;
2036 pWalk->u.Core.GCPhys += PAGE_SIZE;
2037 return VINF_SUCCESS;
2038 }
2039 }
2040 }
2041 }
2042 /* Case we don't handle. Do full walk. */
2043 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
2044}
2045
2046
2047/**
2048 * Checks if the page is present.
2049 *
2050 * @returns true if the page is present.
2051 * @returns false if the page is not present.
2052 * @param pVCpu The cross context virtual CPU structure.
2053 * @param GCPtr Address within the page.
2054 */
2055VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
2056{
2057 VMCPU_ASSERT_EMT(pVCpu);
2058 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
2059 return RT_SUCCESS(rc);
2060}
2061
2062
2063/**
2064 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2065 *
2066 * @returns VBox status code.
2067 * @param pVCpu The cross context virtual CPU structure.
2068 * @param GCPtr The address of the first page.
2069 * @param cb The size of the range in bytes.
2070 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2071 */
2072VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2073{
2074 VMCPU_ASSERT_EMT(pVCpu);
2075 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2076}
2077
2078
2079/**
2080 * Modify page flags for a range of pages in the guest's tables
2081 *
2082 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2083 *
2084 * @returns VBox status code.
2085 * @param pVCpu The cross context virtual CPU structure.
2086 * @param GCPtr Virtual address of the first page in the range.
2087 * @param cb Size (in bytes) of the range to apply the modification to.
2088 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2089 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2090 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2091 */
2092VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2093{
2094 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2095 VMCPU_ASSERT_EMT(pVCpu);
2096
2097 /*
2098 * Validate input.
2099 */
2100 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2101 Assert(cb);
2102
2103 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2104
2105 /*
2106 * Adjust input.
2107 */
2108 cb += GCPtr & PAGE_OFFSET_MASK;
2109 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2110 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2111
2112 /*
2113 * Call worker.
2114 */
2115 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2116 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2117 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2118 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2119
2120 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2121 return rc;
2122}
2123
2124
2125#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2126
2127/**
2128 * Performs the lazy mapping of the 32-bit guest PD.
2129 *
2130 * @returns VBox status code.
2131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2132 * @param ppPd Where to return the pointer to the mapping. This is
2133 * always set.
2134 */
2135int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
2136{
2137 PVM pVM = pVCpu->CTX_SUFF(pVM);
2138 pgmLock(pVM);
2139
2140 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2141
2142 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2143 PPGMPAGE pPage;
2144 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2145 if (RT_SUCCESS(rc))
2146 {
2147 RTHCPTR HCPtrGuestCR3;
2148 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2149 if (RT_SUCCESS(rc))
2150 {
2151 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
2152# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2153 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
2154# endif
2155 *ppPd = (PX86PD)HCPtrGuestCR3;
2156
2157 pgmUnlock(pVM);
2158 return VINF_SUCCESS;
2159 }
2160
2161 AssertRC(rc);
2162 }
2163 pgmUnlock(pVM);
2164
2165 *ppPd = NULL;
2166 return rc;
2167}
2168
2169
2170/**
2171 * Performs the lazy mapping of the PAE guest PDPT.
2172 *
2173 * @returns VBox status code.
2174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2175 * @param ppPdpt Where to return the pointer to the mapping. This is
2176 * always set.
2177 */
2178int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
2179{
2180 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2181 PVM pVM = pVCpu->CTX_SUFF(pVM);
2182 pgmLock(pVM);
2183
2184 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2185 PPGMPAGE pPage;
2186 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2187 if (RT_SUCCESS(rc))
2188 {
2189 RTHCPTR HCPtrGuestCR3;
2190 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2191 if (RT_SUCCESS(rc))
2192 {
2193 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2194# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2195 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2196# endif
2197 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
2198
2199 pgmUnlock(pVM);
2200 return VINF_SUCCESS;
2201 }
2202
2203 AssertRC(rc);
2204 }
2205
2206 pgmUnlock(pVM);
2207 *ppPdpt = NULL;
2208 return rc;
2209}
2210
2211
2212/**
2213 * Performs the lazy mapping / updating of a PAE guest PD.
2214 *
2215 * @returns Pointer to the mapping.
2216 * @returns VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2218 * @param iPdpt Which PD entry to map (0..3).
2219 * @param ppPd Where to return the pointer to the mapping. This is
2220 * always set.
2221 */
2222int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2223{
2224 PVM pVM = pVCpu->CTX_SUFF(pVM);
2225 pgmLock(pVM);
2226
2227 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2228 Assert(pGuestPDPT);
2229 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
2230 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2231 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2232
2233 PPGMPAGE pPage;
2234 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2235 if (RT_SUCCESS(rc))
2236 {
2237 RTRCPTR RCPtr = NIL_RTRCPTR;
2238 RTHCPTR HCPtr = NIL_RTHCPTR;
2239#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2240 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
2241 AssertRC(rc);
2242#endif
2243 if (RT_SUCCESS(rc) && fChanged)
2244 {
2245 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
2246 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
2247 }
2248 if (RT_SUCCESS(rc))
2249 {
2250 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
2251# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2252 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
2253# endif
2254 if (fChanged)
2255 {
2256 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2257 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
2258 }
2259
2260 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
2261 pgmUnlock(pVM);
2262 return VINF_SUCCESS;
2263 }
2264 }
2265
2266 /* Invalid page or some failure, invalidate the entry. */
2267 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2268 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
2269# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2270 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
2271# endif
2272 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
2273
2274 pgmUnlock(pVM);
2275 return rc;
2276}
2277
2278#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2279#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2280/**
2281 * Performs the lazy mapping of the 32-bit guest PD.
2282 *
2283 * @returns VBox status code.
2284 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2285 * @param ppPml4 Where to return the pointer to the mapping. This will
2286 * always be set.
2287 */
2288int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
2289{
2290 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2291 PVM pVM = pVCpu->CTX_SUFF(pVM);
2292 pgmLock(pVM);
2293
2294 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2295 PPGMPAGE pPage;
2296 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2297 if (RT_SUCCESS(rc))
2298 {
2299 RTHCPTR HCPtrGuestCR3;
2300 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2301 if (RT_SUCCESS(rc))
2302 {
2303 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
2304# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2305 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
2306# endif
2307 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
2308
2309 pgmUnlock(pVM);
2310 return VINF_SUCCESS;
2311 }
2312 }
2313
2314 pgmUnlock(pVM);
2315 *ppPml4 = NULL;
2316 return rc;
2317}
2318#endif
2319
2320
2321/**
2322 * Gets the PAE PDPEs values cached by the CPU.
2323 *
2324 * @returns VBox status code.
2325 * @param pVCpu The cross context virtual CPU structure.
2326 * @param paPdpes Where to return the four PDPEs. The array
2327 * pointed to must have 4 entries.
2328 */
2329VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
2330{
2331 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2332
2333 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
2334 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
2335 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
2336 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
2337 return VINF_SUCCESS;
2338}
2339
2340
2341/**
2342 * Sets the PAE PDPEs values cached by the CPU.
2343 *
2344 * @remarks This must be called *AFTER* PGMUpdateCR3.
2345 *
2346 * @param pVCpu The cross context virtual CPU structure.
2347 * @param paPdpes The four PDPE values. The array pointed to must
2348 * have exactly 4 entries.
2349 *
2350 * @remarks No-long-jump zone!!!
2351 */
2352VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
2353{
2354 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2355
2356 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
2357 {
2358 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
2359 {
2360 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
2361
2362 /* Force lazy remapping if it changed in any way. */
2363 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2364# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2365 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2366# endif
2367 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
2368 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2369 }
2370 }
2371
2372 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
2373}
2374
2375
2376/**
2377 * Gets the current CR3 register value for the shadow memory context.
2378 * @returns CR3 value.
2379 * @param pVCpu The cross context virtual CPU structure.
2380 */
2381VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2382{
2383 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2384 AssertPtrReturn(pPoolPage, 0);
2385 return pPoolPage->Core.Key;
2386}
2387
2388
2389/**
2390 * Gets the current CR3 register value for the nested memory context.
2391 * @returns CR3 value.
2392 * @param pVCpu The cross context virtual CPU structure.
2393 * @param enmShadowMode The shadow paging mode.
2394 */
2395VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
2396{
2397 NOREF(enmShadowMode);
2398 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
2399 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
2400}
2401
2402
2403/**
2404 * Gets the current CR3 register value for the HC intermediate memory context.
2405 * @returns CR3 value.
2406 * @param pVM The cross context VM structure.
2407 */
2408VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
2409{
2410 switch (pVM->pgm.s.enmHostMode)
2411 {
2412 case SUPPAGINGMODE_32_BIT:
2413 case SUPPAGINGMODE_32_BIT_GLOBAL:
2414 return pVM->pgm.s.HCPhysInterPD;
2415
2416 case SUPPAGINGMODE_PAE:
2417 case SUPPAGINGMODE_PAE_GLOBAL:
2418 case SUPPAGINGMODE_PAE_NX:
2419 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2420 return pVM->pgm.s.HCPhysInterPaePDPT;
2421
2422 case SUPPAGINGMODE_AMD64:
2423 case SUPPAGINGMODE_AMD64_GLOBAL:
2424 case SUPPAGINGMODE_AMD64_NX:
2425 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2426 return pVM->pgm.s.HCPhysInterPaePDPT;
2427
2428 default:
2429 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
2430 return NIL_RTHCPHYS;
2431 }
2432}
2433
2434
2435/**
2436 * Gets the current CR3 register value for the RC intermediate memory context.
2437 * @returns CR3 value.
2438 * @param pVM The cross context VM structure.
2439 * @param pVCpu The cross context virtual CPU structure.
2440 */
2441VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
2442{
2443 switch (pVCpu->pgm.s.enmShadowMode)
2444 {
2445 case PGMMODE_32_BIT:
2446 return pVM->pgm.s.HCPhysInterPD;
2447
2448 case PGMMODE_PAE:
2449 case PGMMODE_PAE_NX:
2450 return pVM->pgm.s.HCPhysInterPaePDPT;
2451
2452 case PGMMODE_AMD64:
2453 case PGMMODE_AMD64_NX:
2454 return pVM->pgm.s.HCPhysInterPaePML4;
2455
2456 case PGMMODE_NESTED_32BIT:
2457 case PGMMODE_NESTED_PAE:
2458 case PGMMODE_NESTED_AMD64:
2459 case PGMMODE_EPT:
2460 return 0; /* not relevant */
2461
2462 default:
2463 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
2464 return NIL_RTHCPHYS;
2465 }
2466}
2467
2468
2469/**
2470 * Gets the CR3 register value for the 32-Bit intermediate memory context.
2471 * @returns CR3 value.
2472 * @param pVM The cross context VM structure.
2473 */
2474VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
2475{
2476 return pVM->pgm.s.HCPhysInterPD;
2477}
2478
2479
2480/**
2481 * Gets the CR3 register value for the PAE intermediate memory context.
2482 * @returns CR3 value.
2483 * @param pVM The cross context VM structure.
2484 */
2485VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
2486{
2487 return pVM->pgm.s.HCPhysInterPaePDPT;
2488}
2489
2490
2491/**
2492 * Gets the CR3 register value for the AMD64 intermediate memory context.
2493 * @returns CR3 value.
2494 * @param pVM The cross context VM structure.
2495 */
2496VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
2497{
2498 return pVM->pgm.s.HCPhysInterPaePML4;
2499}
2500
2501
2502/**
2503 * Performs and schedules necessary updates following a CR3 load or reload.
2504 *
2505 * This will normally involve mapping the guest PD or nPDPT
2506 *
2507 * @returns VBox status code.
2508 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2509 * safely be ignored and overridden since the FF will be set too then.
2510 * @param pVCpu The cross context virtual CPU structure.
2511 * @param cr3 The new cr3.
2512 * @param fGlobal Indicates whether this is a global flush or not.
2513 */
2514VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
2515{
2516 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2517 PVM pVM = pVCpu->CTX_SUFF(pVM);
2518
2519 VMCPU_ASSERT_EMT(pVCpu);
2520
2521 /*
2522 * Always flag the necessary updates; necessary for hardware acceleration
2523 */
2524 /** @todo optimize this, it shouldn't always be necessary. */
2525 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2526 if (fGlobal)
2527 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2528 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2529
2530 /*
2531 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2532 */
2533 int rc = VINF_SUCCESS;
2534 RTGCPHYS GCPhysCR3;
2535 switch (pVCpu->pgm.s.enmGuestMode)
2536 {
2537 case PGMMODE_PAE:
2538 case PGMMODE_PAE_NX:
2539 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2540 break;
2541 case PGMMODE_AMD64:
2542 case PGMMODE_AMD64_NX:
2543 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2544 break;
2545 default:
2546 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2547 break;
2548 }
2549 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2550
2551 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2552 {
2553 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2554
2555 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2556 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2557 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2558
2559 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2560 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2561 if (RT_LIKELY(rc == VINF_SUCCESS))
2562 {
2563 if (pgmMapAreMappingsFloating(pVM))
2564 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2565 }
2566 else
2567 {
2568 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2569 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2570 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2571 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2572 if (pgmMapAreMappingsFloating(pVM))
2573 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2574 }
2575
2576 if (fGlobal)
2577 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2578 else
2579 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
2580 }
2581 else
2582 {
2583# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2584 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2585 if (pPool->cDirtyPages)
2586 {
2587 pgmLock(pVM);
2588 pgmPoolResetDirtyPages(pVM);
2589 pgmUnlock(pVM);
2590 }
2591# endif
2592 /*
2593 * Check if we have a pending update of the CR3 monitoring.
2594 */
2595 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2596 {
2597 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2598 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2599 }
2600 if (fGlobal)
2601 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2602 else
2603 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2604 }
2605
2606 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2607 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2608 return rc;
2609}
2610
2611
2612/**
2613 * Performs and schedules necessary updates following a CR3 load or reload when
2614 * using nested or extended paging.
2615 *
2616 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2617 * TLB and triggering a SyncCR3.
2618 *
2619 * This will normally involve mapping the guest PD or nPDPT
2620 *
2621 * @returns VBox status code.
2622 * @retval VINF_SUCCESS.
2623 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2624 * paging modes). This can safely be ignored and overridden since the
2625 * FF will be set too then.
2626 * @param pVCpu The cross context virtual CPU structure.
2627 * @param cr3 The new cr3.
2628 */
2629VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2630{
2631 VMCPU_ASSERT_EMT(pVCpu);
2632 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2633
2634 /* We assume we're only called in nested paging mode. */
2635 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2636 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2637 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2638
2639 /*
2640 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2641 */
2642 int rc = VINF_SUCCESS;
2643 RTGCPHYS GCPhysCR3;
2644 switch (pVCpu->pgm.s.enmGuestMode)
2645 {
2646 case PGMMODE_PAE:
2647 case PGMMODE_PAE_NX:
2648 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2649 break;
2650 case PGMMODE_AMD64:
2651 case PGMMODE_AMD64_NX:
2652 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2653 break;
2654 default:
2655 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2656 break;
2657 }
2658 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2659
2660 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2661 {
2662 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2663 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2664 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2665
2666 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2667 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2668
2669 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2670 }
2671
2672 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2673 return rc;
2674}
2675
2676
2677/**
2678 * Synchronize the paging structures.
2679 *
2680 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2681 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2682 * in several places, most importantly whenever the CR3 is loaded.
2683 *
2684 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2685 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2686 * the VMM into guest context.
2687 * @param pVCpu The cross context virtual CPU structure.
2688 * @param cr0 Guest context CR0 register
2689 * @param cr3 Guest context CR3 register
2690 * @param cr4 Guest context CR4 register
2691 * @param fGlobal Including global page directories or not
2692 */
2693VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2694{
2695 int rc;
2696
2697 VMCPU_ASSERT_EMT(pVCpu);
2698
2699 /*
2700 * The pool may have pending stuff and even require a return to ring-3 to
2701 * clear the whole thing.
2702 */
2703 rc = pgmPoolSyncCR3(pVCpu);
2704 if (rc != VINF_SUCCESS)
2705 return rc;
2706
2707 /*
2708 * We might be called when we shouldn't.
2709 *
2710 * The mode switching will ensure that the PD is resynced after every mode
2711 * switch. So, if we find ourselves here when in protected or real mode
2712 * we can safely clear the FF and return immediately.
2713 */
2714 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2715 {
2716 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2717 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2718 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2719 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2720 return VINF_SUCCESS;
2721 }
2722
2723 /* If global pages are not supported, then all flushes are global. */
2724 if (!(cr4 & X86_CR4_PGE))
2725 fGlobal = true;
2726 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2727 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2728
2729 /*
2730 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2731 * This should be done before SyncCR3.
2732 */
2733 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2734 {
2735 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2736
2737 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2738 RTGCPHYS GCPhysCR3;
2739 switch (pVCpu->pgm.s.enmGuestMode)
2740 {
2741 case PGMMODE_PAE:
2742 case PGMMODE_PAE_NX:
2743 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2744 break;
2745 case PGMMODE_AMD64:
2746 case PGMMODE_AMD64_NX:
2747 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2748 break;
2749 default:
2750 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2751 break;
2752 }
2753 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2754
2755 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2756 {
2757 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2758 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2759 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2760 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2761 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2762 }
2763
2764 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2765 if ( rc == VINF_PGM_SYNC_CR3
2766 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2767 {
2768 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2769#ifdef IN_RING3
2770 rc = pgmPoolSyncCR3(pVCpu);
2771#else
2772 if (rc == VINF_PGM_SYNC_CR3)
2773 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2774 return VINF_PGM_SYNC_CR3;
2775#endif
2776 }
2777 AssertRCReturn(rc, rc);
2778 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2779 }
2780
2781 /*
2782 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2783 */
2784 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2785
2786 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2787 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2788 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2789 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2790
2791 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2792 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2793 if (rc == VINF_SUCCESS)
2794 {
2795 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2796 {
2797 /* Go back to ring 3 if a pgm pool sync is again pending. */
2798 return VINF_PGM_SYNC_CR3;
2799 }
2800
2801 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2802 {
2803 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2804 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2805 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2806 }
2807
2808 /*
2809 * Check if we have a pending update of the CR3 monitoring.
2810 */
2811 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2812 {
2813 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2814 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2815 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2816 }
2817 }
2818
2819 /*
2820 * Now flush the CR3 (guest context).
2821 */
2822 if (rc == VINF_SUCCESS)
2823 PGM_INVL_VCPU_TLBS(pVCpu);
2824 return rc;
2825}
2826
2827
2828/**
2829 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2830 *
2831 * @returns VBox status code, with the following informational code for
2832 * VM scheduling.
2833 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2834 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2835 * (I.e. not in R3.)
2836 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2837 *
2838 * @param pVCpu The cross context virtual CPU structure.
2839 * @param cr0 The new cr0.
2840 * @param cr4 The new cr4.
2841 * @param efer The new extended feature enable register.
2842 */
2843VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2844{
2845 VMCPU_ASSERT_EMT(pVCpu);
2846
2847 /*
2848 * Calc the new guest mode.
2849 *
2850 * Note! We check PG before PE and without requiring PE because of the
2851 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2852 */
2853 PGMMODE enmGuestMode;
2854 if (cr0 & X86_CR0_PG)
2855 {
2856 if (!(cr4 & X86_CR4_PAE))
2857 {
2858 bool const fPse = !!(cr4 & X86_CR4_PSE);
2859 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2860 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2861 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2862 enmGuestMode = PGMMODE_32_BIT;
2863 }
2864 else if (!(efer & MSR_K6_EFER_LME))
2865 {
2866 if (!(efer & MSR_K6_EFER_NXE))
2867 enmGuestMode = PGMMODE_PAE;
2868 else
2869 enmGuestMode = PGMMODE_PAE_NX;
2870 }
2871 else
2872 {
2873 if (!(efer & MSR_K6_EFER_NXE))
2874 enmGuestMode = PGMMODE_AMD64;
2875 else
2876 enmGuestMode = PGMMODE_AMD64_NX;
2877 }
2878 }
2879 else if (!(cr0 & X86_CR0_PE))
2880 enmGuestMode = PGMMODE_REAL;
2881 else
2882 enmGuestMode = PGMMODE_PROTECTED;
2883
2884 /*
2885 * Did it change?
2886 */
2887 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2888 return VINF_SUCCESS;
2889
2890 /* Flush the TLB */
2891 PGM_INVL_VCPU_TLBS(pVCpu);
2892
2893#if defined(IN_RING3) || defined(IN_RING0)
2894 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2895#else
2896 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2897 return VINF_PGM_CHANGE_MODE;
2898#endif
2899}
2900
2901#ifndef IN_RC
2902
2903/**
2904 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2905 *
2906 * @returns PGM_TYPE_*.
2907 * @param pgmMode The mode value to convert.
2908 */
2909DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2910{
2911 switch (pgmMode)
2912 {
2913 case PGMMODE_REAL: return PGM_TYPE_REAL;
2914 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2915 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2916 case PGMMODE_PAE:
2917 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2918 case PGMMODE_AMD64:
2919 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2920 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2921 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2922 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2923 case PGMMODE_EPT: return PGM_TYPE_EPT;
2924 default:
2925 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2926 }
2927}
2928
2929
2930/**
2931 * Calculates the shadow paging mode.
2932 *
2933 * @returns The shadow paging mode.
2934 * @param pVM The cross context VM structure.
2935 * @param enmGuestMode The guest mode.
2936 * @param enmHostMode The host mode.
2937 * @param enmShadowMode The current shadow mode.
2938 * @param penmSwitcher Where to store the switcher to use.
2939 * VMMSWITCHER_INVALID means no change.
2940 */
2941static PGMMODE pgmCalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode,
2942 VMMSWITCHER *penmSwitcher)
2943{
2944 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
2945 switch (enmGuestMode)
2946 {
2947 /*
2948 * When switching to real or protected mode we don't change
2949 * anything since it's likely that we'll switch back pretty soon.
2950 *
2951 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2952 * and is supposed to determine which shadow paging and switcher to
2953 * use during init.
2954 */
2955 case PGMMODE_REAL:
2956 case PGMMODE_PROTECTED:
2957 if ( enmShadowMode != PGMMODE_INVALID
2958 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
2959 break; /* (no change) */
2960
2961 switch (enmHostMode)
2962 {
2963 case SUPPAGINGMODE_32_BIT:
2964 case SUPPAGINGMODE_32_BIT_GLOBAL:
2965 enmShadowMode = PGMMODE_32_BIT;
2966 enmSwitcher = VMMSWITCHER_32_TO_32;
2967 break;
2968
2969 case SUPPAGINGMODE_PAE:
2970 case SUPPAGINGMODE_PAE_NX:
2971 case SUPPAGINGMODE_PAE_GLOBAL:
2972 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2973 enmShadowMode = PGMMODE_PAE;
2974 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
2975 break;
2976
2977 case SUPPAGINGMODE_AMD64:
2978 case SUPPAGINGMODE_AMD64_GLOBAL:
2979 case SUPPAGINGMODE_AMD64_NX:
2980 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2981 enmShadowMode = PGMMODE_PAE;
2982 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
2983 break;
2984
2985 default:
2986 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
2987 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
2988 }
2989 break;
2990
2991 case PGMMODE_32_BIT:
2992 switch (enmHostMode)
2993 {
2994 case SUPPAGINGMODE_32_BIT:
2995 case SUPPAGINGMODE_32_BIT_GLOBAL:
2996 enmShadowMode = PGMMODE_32_BIT;
2997 enmSwitcher = VMMSWITCHER_32_TO_32;
2998 break;
2999
3000 case SUPPAGINGMODE_PAE:
3001 case SUPPAGINGMODE_PAE_NX:
3002 case SUPPAGINGMODE_PAE_GLOBAL:
3003 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3004 enmShadowMode = PGMMODE_PAE;
3005 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
3006 break;
3007
3008 case SUPPAGINGMODE_AMD64:
3009 case SUPPAGINGMODE_AMD64_GLOBAL:
3010 case SUPPAGINGMODE_AMD64_NX:
3011 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3012 enmShadowMode = PGMMODE_PAE;
3013 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
3014 break;
3015
3016 default:
3017 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3018 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3019 }
3020 break;
3021
3022 case PGMMODE_PAE:
3023 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3024 switch (enmHostMode)
3025 {
3026 case SUPPAGINGMODE_32_BIT:
3027 case SUPPAGINGMODE_32_BIT_GLOBAL:
3028 enmShadowMode = PGMMODE_PAE;
3029 enmSwitcher = VMMSWITCHER_32_TO_PAE;
3030 break;
3031
3032 case SUPPAGINGMODE_PAE:
3033 case SUPPAGINGMODE_PAE_NX:
3034 case SUPPAGINGMODE_PAE_GLOBAL:
3035 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3036 enmShadowMode = PGMMODE_PAE;
3037 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
3038 break;
3039
3040 case SUPPAGINGMODE_AMD64:
3041 case SUPPAGINGMODE_AMD64_GLOBAL:
3042 case SUPPAGINGMODE_AMD64_NX:
3043 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3044 enmShadowMode = PGMMODE_PAE;
3045 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
3046 break;
3047
3048 default:
3049 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3050 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3051 }
3052 break;
3053
3054 case PGMMODE_AMD64:
3055 case PGMMODE_AMD64_NX:
3056 switch (enmHostMode)
3057 {
3058 case SUPPAGINGMODE_32_BIT:
3059 case SUPPAGINGMODE_32_BIT_GLOBAL:
3060 enmShadowMode = PGMMODE_AMD64;
3061 enmSwitcher = VMMSWITCHER_32_TO_AMD64;
3062 break;
3063
3064 case SUPPAGINGMODE_PAE:
3065 case SUPPAGINGMODE_PAE_NX:
3066 case SUPPAGINGMODE_PAE_GLOBAL:
3067 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3068 enmShadowMode = PGMMODE_AMD64;
3069 enmSwitcher = VMMSWITCHER_PAE_TO_AMD64;
3070 break;
3071
3072 case SUPPAGINGMODE_AMD64:
3073 case SUPPAGINGMODE_AMD64_GLOBAL:
3074 case SUPPAGINGMODE_AMD64_NX:
3075 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3076 enmShadowMode = PGMMODE_AMD64;
3077 enmSwitcher = VMMSWITCHER_AMD64_TO_AMD64;
3078 break;
3079
3080 default:
3081 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3082 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3083 }
3084 break;
3085
3086 default:
3087 AssertLogRelMsgFailedReturnStmt(("enmGuestMode=%d\n", enmGuestMode),
3088 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3089 }
3090
3091 /*
3092 * Override the shadow mode when NEM or nested paging is active.
3093 */
3094 if (VM_IS_NEM_ENABLED(pVM))
3095 {
3096 pVM->pgm.s.fNestedPaging = true;
3097 enmShadowMode = PGMMODE_EPT; /* whatever harmless... */
3098 }
3099 else
3100 {
3101 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3102 pVM->pgm.s.fNestedPaging = fNestedPaging;
3103 if (fNestedPaging)
3104 {
3105 if (HMIsVmxActive(pVM))
3106 enmShadowMode = PGMMODE_EPT;
3107 else
3108 {
3109 /* The nested SVM paging depends on the host one. */
3110 Assert(HMIsSvmActive(pVM));
3111 if ( enmGuestMode == PGMMODE_AMD64
3112 || enmGuestMode == PGMMODE_AMD64_NX)
3113 enmShadowMode = PGMMODE_NESTED_AMD64;
3114 else
3115 switch (pVM->pgm.s.enmHostMode)
3116 {
3117 case SUPPAGINGMODE_32_BIT:
3118 case SUPPAGINGMODE_32_BIT_GLOBAL:
3119 enmShadowMode = PGMMODE_NESTED_32BIT;
3120 break;
3121
3122 case SUPPAGINGMODE_PAE:
3123 case SUPPAGINGMODE_PAE_GLOBAL:
3124 case SUPPAGINGMODE_PAE_NX:
3125 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3126 enmShadowMode = PGMMODE_NESTED_PAE;
3127 break;
3128
3129#if HC_ARCH_BITS == 64 || defined(RT_OS_DARWIN)
3130 case SUPPAGINGMODE_AMD64:
3131 case SUPPAGINGMODE_AMD64_GLOBAL:
3132 case SUPPAGINGMODE_AMD64_NX:
3133 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3134 enmShadowMode = PGMMODE_NESTED_AMD64;
3135 break;
3136#endif
3137 default:
3138 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode),
3139 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3140 }
3141 }
3142 }
3143 }
3144
3145 *penmSwitcher = enmSwitcher;
3146 return enmShadowMode;
3147}
3148
3149
3150/**
3151 * Performs the actual mode change.
3152 * This is called by PGMChangeMode and pgmR3InitPaging().
3153 *
3154 * @returns VBox status code. May suspend or power off the VM on error, but this
3155 * will trigger using FFs and not informational status codes.
3156 *
3157 * @param pVM The cross context VM structure.
3158 * @param pVCpu The cross context virtual CPU structure.
3159 * @param enmGuestMode The new guest mode. This is assumed to be different from
3160 * the current mode.
3161 */
3162VMM_INT_DECL(int) PGMHCChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
3163{
3164 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3165 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3166
3167 /*
3168 * Calc the shadow mode and switcher.
3169 */
3170 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
3171 PGMMODE enmShadowMode = PGMMODE_INVALID;
3172 enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
3173
3174#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3175 if ( enmSwitcher != VMMSWITCHER_INVALID
3176 && VM_IS_RAW_MODE_ENABLED(pVM))
3177 {
3178 /*
3179 * Select new switcher.
3180 */
3181 int rc = VMMR3SelectSwitcher(pVM, enmSwitcher);
3182 AssertLogRelMsgRCReturn(rc,("VMMR3SelectSwitcher(%d) -> %Rrc\n", enmSwitcher, rc), rc);
3183 }
3184#endif
3185
3186 /*
3187 * Exit old mode(s).
3188 */
3189 /* shadow */
3190 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3191 {
3192 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3193 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3194 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3195 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3196 {
3197 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3198 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3199 }
3200 }
3201 else
3202 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3203
3204 /* guest */
3205 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3206 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3207 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3208 {
3209 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3210 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3211 }
3212 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3213
3214 /*
3215 * Change the paging mode data indexes.
3216 */
3217 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3218 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3219 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3220 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3221 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3222 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPDE, VERR_PGM_MODE_IPE);
3223 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3224 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3225#ifdef IN_RING3
3226 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3227#endif
3228
3229 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3230 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3231 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3232 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3233 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3234 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3235 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3236#ifdef IN_RING3
3237 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3238#endif
3239
3240 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3241 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3242 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3243 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3244 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3245 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3246 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3247 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3248 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3249 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3250#ifdef IN_RING3
3251 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnRelocate, VERR_PGM_MODE_IPE);
3252#endif
3253#ifdef VBOX_STRICT
3254 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3255#endif
3256
3257 /*
3258 * Enter new shadow mode (if changed).
3259 */
3260 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3261 {
3262 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3263 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3264 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3265 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3266 }
3267
3268 /*
3269 * Always flag the necessary updates
3270 */
3271 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3272
3273 /*
3274 * Enter the new guest and shadow+guest modes.
3275 */
3276 /* Calc the new CR3 value. */
3277 RTGCPHYS GCPhysCR3;
3278 switch (enmGuestMode)
3279 {
3280 case PGMMODE_REAL:
3281 case PGMMODE_PROTECTED:
3282 GCPhysCR3 = NIL_RTGCPHYS;
3283 break;
3284
3285 case PGMMODE_32_BIT:
3286 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3287 break;
3288
3289 case PGMMODE_PAE_NX:
3290 case PGMMODE_PAE:
3291 if (!pVM->cpum.ro.GuestFeatures.fPae)
3292 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3293 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3294 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3295 break;
3296
3297#ifdef VBOX_WITH_64_BITS_GUESTS
3298 case PGMMODE_AMD64_NX:
3299 case PGMMODE_AMD64:
3300 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3301 break;
3302#endif
3303 default:
3304 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3305 }
3306
3307 /* Enter the new guest mode. */
3308 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3309 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3310 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3311
3312 /* Set the new guest CR3. */
3313 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3314
3315 /* status codes. */
3316 AssertRC(rc);
3317 AssertRC(rc2);
3318 if (RT_SUCCESS(rc))
3319 {
3320 rc = rc2;
3321 if (RT_SUCCESS(rc)) /* no informational status codes. */
3322 rc = VINF_SUCCESS;
3323 }
3324
3325 /*
3326 * Notify HM.
3327 */
3328 HMHCPagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3329 return rc;
3330}
3331
3332#endif /* !IN_RC */
3333
3334/**
3335 * Called by CPUM or REM when CR0.WP changes to 1.
3336 *
3337 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3338 * @thread EMT
3339 */
3340VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
3341{
3342 /*
3343 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3344 *
3345 * Use the counter to judge whether there might be pool pages with active
3346 * hacks in them. If there are, we will be running the risk of messing up
3347 * the guest by allowing it to write to read-only pages. Thus, we have to
3348 * clear the page pool ASAP if there is the slightest chance.
3349 */
3350 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3351 {
3352 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3353
3354 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3355 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3356 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3357 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3358 }
3359}
3360
3361
3362/**
3363 * Gets the current guest paging mode.
3364 *
3365 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3366 *
3367 * @returns The current paging mode.
3368 * @param pVCpu The cross context virtual CPU structure.
3369 */
3370VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3371{
3372 return pVCpu->pgm.s.enmGuestMode;
3373}
3374
3375
3376/**
3377 * Gets the current shadow paging mode.
3378 *
3379 * @returns The current paging mode.
3380 * @param pVCpu The cross context virtual CPU structure.
3381 */
3382VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3383{
3384 return pVCpu->pgm.s.enmShadowMode;
3385}
3386
3387
3388/**
3389 * Gets the current host paging mode.
3390 *
3391 * @returns The current paging mode.
3392 * @param pVM The cross context VM structure.
3393 */
3394VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3395{
3396 switch (pVM->pgm.s.enmHostMode)
3397 {
3398 case SUPPAGINGMODE_32_BIT:
3399 case SUPPAGINGMODE_32_BIT_GLOBAL:
3400 return PGMMODE_32_BIT;
3401
3402 case SUPPAGINGMODE_PAE:
3403 case SUPPAGINGMODE_PAE_GLOBAL:
3404 return PGMMODE_PAE;
3405
3406 case SUPPAGINGMODE_PAE_NX:
3407 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3408 return PGMMODE_PAE_NX;
3409
3410 case SUPPAGINGMODE_AMD64:
3411 case SUPPAGINGMODE_AMD64_GLOBAL:
3412 return PGMMODE_AMD64;
3413
3414 case SUPPAGINGMODE_AMD64_NX:
3415 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3416 return PGMMODE_AMD64_NX;
3417
3418 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3419 }
3420
3421 return PGMMODE_INVALID;
3422}
3423
3424
3425/**
3426 * Get mode name.
3427 *
3428 * @returns read-only name string.
3429 * @param enmMode The mode which name is desired.
3430 */
3431VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3432{
3433 switch (enmMode)
3434 {
3435 case PGMMODE_REAL: return "Real";
3436 case PGMMODE_PROTECTED: return "Protected";
3437 case PGMMODE_32_BIT: return "32-bit";
3438 case PGMMODE_PAE: return "PAE";
3439 case PGMMODE_PAE_NX: return "PAE+NX";
3440 case PGMMODE_AMD64: return "AMD64";
3441 case PGMMODE_AMD64_NX: return "AMD64+NX";
3442 case PGMMODE_NESTED_32BIT: return "Nested-32";
3443 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3444 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3445 case PGMMODE_EPT: return "EPT";
3446 default: return "unknown mode value";
3447 }
3448}
3449
3450
3451/**
3452 * Gets the physical address represented in the guest CR3 as PGM sees it.
3453 *
3454 * This is mainly for logging and debugging.
3455 *
3456 * @returns PGM's guest CR3 value.
3457 * @param pVCpu The cross context virtual CPU structure.
3458 */
3459VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3460{
3461 return pVCpu->pgm.s.GCPhysCR3;
3462}
3463
3464
3465
3466/**
3467 * Notification from CPUM that the EFER.NXE bit has changed.
3468 *
3469 * @param pVCpu The cross context virtual CPU structure of the CPU for
3470 * which EFER changed.
3471 * @param fNxe The new NXE state.
3472 */
3473VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3474{
3475/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3476 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3477
3478 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3479 if (fNxe)
3480 {
3481 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3482 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3483 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3484 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3485 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3486 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3487 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3488 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3489 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3490 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3491 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3492
3493 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3494 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3495 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3496 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3497 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3498 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3499 }
3500 else
3501 {
3502 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3503 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3504 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3505 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3506 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3507 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3508 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3509 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3510 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3511 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3512 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3513
3514 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3515 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3516 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3517 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3518 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3519 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3520 }
3521}
3522
3523
3524/**
3525 * Check if any pgm pool pages are marked dirty (not monitored)
3526 *
3527 * @returns bool locked/not locked
3528 * @param pVM The cross context VM structure.
3529 */
3530VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3531{
3532 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3533}
3534
3535
3536/**
3537 * Check if this VCPU currently owns the PGM lock.
3538 *
3539 * @returns bool owner/not owner
3540 * @param pVM The cross context VM structure.
3541 */
3542VMMDECL(bool) PGMIsLockOwner(PVM pVM)
3543{
3544 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
3545}
3546
3547
3548/**
3549 * Enable or disable large page usage
3550 *
3551 * @returns VBox status code.
3552 * @param pVM The cross context VM structure.
3553 * @param fUseLargePages Use/not use large pages
3554 */
3555VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
3556{
3557 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3558
3559 pVM->fUseLargePages = fUseLargePages;
3560 return VINF_SUCCESS;
3561}
3562
3563
3564/**
3565 * Acquire the PGM lock.
3566 *
3567 * @returns VBox status code
3568 * @param pVM The cross context VM structure.
3569 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3570 */
3571#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
3572int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
3573#else
3574int pgmLock(PVM pVM)
3575#endif
3576{
3577#if defined(VBOX_STRICT) && defined(IN_RING3)
3578 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3579#else
3580 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
3581#endif
3582#if defined(IN_RC) || defined(IN_RING0)
3583 if (rc == VERR_SEM_BUSY)
3584 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
3585#endif
3586 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3587 return rc;
3588}
3589
3590
3591/**
3592 * Release the PGM lock.
3593 *
3594 * @returns VBox status code
3595 * @param pVM The cross context VM structure.
3596 */
3597void pgmUnlock(PVM pVM)
3598{
3599 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3600 pVM->pgm.s.cDeprecatedPageLocks = 0;
3601 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
3602 if (rc == VINF_SEM_NESTED)
3603 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3604}
3605
3606#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3607
3608/**
3609 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
3610 *
3611 * @returns VBox status code.
3612 * @param pVM The cross context VM structure.
3613 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3614 * @param GCPhys The guest physical address of the page to map. The
3615 * offset bits are not ignored.
3616 * @param ppv Where to return the address corresponding to @a GCPhys.
3617 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3618 */
3619int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
3620{
3621 pgmLock(pVM);
3622
3623 /*
3624 * Convert it to a writable page and it on to the dynamic mapper.
3625 */
3626 int rc;
3627 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3628 if (RT_LIKELY(pPage))
3629 {
3630 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3631 if (RT_SUCCESS(rc))
3632 {
3633 void *pv;
3634 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
3635 if (RT_SUCCESS(rc))
3636 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
3637 }
3638 else
3639 AssertRC(rc);
3640 }
3641 else
3642 {
3643 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
3644 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3645 }
3646
3647 pgmUnlock(pVM);
3648 return rc;
3649}
3650
3651#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3652#if !defined(IN_R0) || defined(LOG_ENABLED)
3653
3654/** Format handler for PGMPAGE.
3655 * @copydoc FNRTSTRFORMATTYPE */
3656static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3657 const char *pszType, void const *pvValue,
3658 int cchWidth, int cchPrecision, unsigned fFlags,
3659 void *pvUser)
3660{
3661 size_t cch;
3662 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3663 if (RT_VALID_PTR(pPage))
3664 {
3665 char szTmp[64+80];
3666
3667 cch = 0;
3668
3669 /* The single char state stuff. */
3670 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3671 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3672
3673#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3674 if (IS_PART_INCLUDED(5))
3675 {
3676 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3677 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3678 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
3679 }
3680
3681 /* The type. */
3682 if (IS_PART_INCLUDED(4))
3683 {
3684 szTmp[cch++] = ':';
3685 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3686 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3687 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3688 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3689 }
3690
3691 /* The numbers. */
3692 if (IS_PART_INCLUDED(3))
3693 {
3694 szTmp[cch++] = ':';
3695 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3696 }
3697
3698 if (IS_PART_INCLUDED(2))
3699 {
3700 szTmp[cch++] = ':';
3701 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3702 }
3703
3704 if (IS_PART_INCLUDED(6))
3705 {
3706 szTmp[cch++] = ':';
3707 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3708 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3709 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3710 }
3711#undef IS_PART_INCLUDED
3712
3713 cch = pfnOutput(pvArgOutput, szTmp, cch);
3714 }
3715 else
3716 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3717 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3718 return cch;
3719}
3720
3721
3722/** Format handler for PGMRAMRANGE.
3723 * @copydoc FNRTSTRFORMATTYPE */
3724static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3725 const char *pszType, void const *pvValue,
3726 int cchWidth, int cchPrecision, unsigned fFlags,
3727 void *pvUser)
3728{
3729 size_t cch;
3730 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3731 if (VALID_PTR(pRam))
3732 {
3733 char szTmp[80];
3734 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3735 cch = pfnOutput(pvArgOutput, szTmp, cch);
3736 }
3737 else
3738 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3739 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3740 return cch;
3741}
3742
3743/** Format type andlers to be registered/deregistered. */
3744static const struct
3745{
3746 char szType[24];
3747 PFNRTSTRFORMATTYPE pfnHandler;
3748} g_aPgmFormatTypes[] =
3749{
3750 { "pgmpage", pgmFormatTypeHandlerPage },
3751 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3752};
3753
3754#endif /* !IN_R0 || LOG_ENABLED */
3755
3756/**
3757 * Registers the global string format types.
3758 *
3759 * This should be called at module load time or in some other manner that ensure
3760 * that it's called exactly one time.
3761 *
3762 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3763 */
3764VMMDECL(int) PGMRegisterStringFormatTypes(void)
3765{
3766#if !defined(IN_R0) || defined(LOG_ENABLED)
3767 int rc = VINF_SUCCESS;
3768 unsigned i;
3769 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3770 {
3771 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3772# ifdef IN_RING0
3773 if (rc == VERR_ALREADY_EXISTS)
3774 {
3775 /* in case of cleanup failure in ring-0 */
3776 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3777 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3778 }
3779# endif
3780 }
3781 if (RT_FAILURE(rc))
3782 while (i-- > 0)
3783 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3784
3785 return rc;
3786#else
3787 return VINF_SUCCESS;
3788#endif
3789}
3790
3791
3792/**
3793 * Deregisters the global string format types.
3794 *
3795 * This should be called at module unload time or in some other manner that
3796 * ensure that it's called exactly one time.
3797 */
3798VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3799{
3800#if !defined(IN_R0) || defined(LOG_ENABLED)
3801 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3802 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3803#endif
3804}
3805
3806#ifdef VBOX_STRICT
3807
3808/**
3809 * Asserts that there are no mapping conflicts.
3810 *
3811 * @returns Number of conflicts.
3812 * @param pVM The cross context VM structure.
3813 */
3814VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
3815{
3816 unsigned cErrors = 0;
3817
3818 /* Only applies to raw mode -> 1 VPCU */
3819 Assert(pVM->cCpus == 1);
3820 PVMCPU pVCpu = &pVM->aCpus[0];
3821
3822 /*
3823 * Check for mapping conflicts.
3824 */
3825 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3826 pMapping;
3827 pMapping = pMapping->CTX_SUFF(pNext))
3828 {
3829 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
3830 for (RTGCPTR GCPtr = pMapping->GCPtr;
3831 GCPtr <= pMapping->GCPtrLast;
3832 GCPtr += PAGE_SIZE)
3833 {
3834 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
3835 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
3836 {
3837 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
3838 cErrors++;
3839 break;
3840 }
3841 }
3842 }
3843
3844 return cErrors;
3845}
3846
3847
3848/**
3849 * Asserts that everything related to the guest CR3 is correctly shadowed.
3850 *
3851 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3852 * and assert the correctness of the guest CR3 mapping before asserting that the
3853 * shadow page tables is in sync with the guest page tables.
3854 *
3855 * @returns Number of conflicts.
3856 * @param pVM The cross context VM structure.
3857 * @param pVCpu The cross context virtual CPU structure.
3858 * @param cr3 The current guest CR3 register value.
3859 * @param cr4 The current guest CR4 register value.
3860 */
3861VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
3862{
3863 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3864
3865 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3866 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3867 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3868
3869 pgmLock(pVM);
3870 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3871 pgmUnlock(pVM);
3872
3873 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3874 return cErrors;
3875}
3876
3877#endif /* VBOX_STRICT */
3878
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette