VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 77863

Last change on this file since 77863 was 76993, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Allowing fetching VM-exit names from ring-0 as well. Various naming cleanups. Added HMDumpHwvirtVmxState() to be able to dump virtual VMCS state from ring-0 as well. Remove unusued HMIsVmxSupported() function.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 140.5 KB
Line 
1/* $Id: PGMAll.cpp 76993 2019-01-25 14:34:46Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/csam.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/trpm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/em.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vmm/vm.h>
42#include "PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*********************************************************************************************************************************
52* Structures and Typedefs *
53*********************************************************************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** Pointer to the VM. */
61 PVM pVM;
62 /** Pointer to the VMCPU. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*********************************************************************************************************************************
72* Internal Functions *
73*********************************************************************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76#ifndef IN_RC
77static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
78static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
79#endif
80
81
82/*
83 * Shadow - 32-bit mode
84 */
85#define PGM_SHW_TYPE PGM_TYPE_32BIT
86#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
87#include "PGMAllShw.h"
88
89/* Guest - real mode */
90#define PGM_GST_TYPE PGM_TYPE_REAL
91#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
92#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
93#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
94#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
95#include "PGMGstDefs.h"
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef BTH_PGMPOOLKIND_ROOT
100#undef PGM_BTH_NAME
101#undef PGM_GST_TYPE
102#undef PGM_GST_NAME
103
104/* Guest - protected mode */
105#define PGM_GST_TYPE PGM_TYPE_PROT
106#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
107#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
108#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
109#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
110#include "PGMGstDefs.h"
111#include "PGMAllGst.h"
112#include "PGMAllBth.h"
113#undef BTH_PGMPOOLKIND_PT_FOR_PT
114#undef BTH_PGMPOOLKIND_ROOT
115#undef PGM_BTH_NAME
116#undef PGM_GST_TYPE
117#undef PGM_GST_NAME
118
119/* Guest - 32-bit mode */
120#define PGM_GST_TYPE PGM_TYPE_32BIT
121#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
123#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
124#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
125#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
126#include "PGMGstDefs.h"
127#include "PGMAllGst.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_BIG
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136#undef PGM_SHW_TYPE
137#undef PGM_SHW_NAME
138
139
140/*
141 * Shadow - PAE mode
142 */
143#define PGM_SHW_TYPE PGM_TYPE_PAE
144#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#include "PGMAllShw.h"
147
148/* Guest - real mode */
149#define PGM_GST_TYPE PGM_TYPE_REAL
150#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
153#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
154#include "PGMGstDefs.h"
155#include "PGMAllBth.h"
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef BTH_PGMPOOLKIND_ROOT
158#undef PGM_BTH_NAME
159#undef PGM_GST_TYPE
160#undef PGM_GST_NAME
161
162/* Guest - protected mode */
163#define PGM_GST_TYPE PGM_TYPE_PROT
164#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
167#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
168#include "PGMGstDefs.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef BTH_PGMPOOLKIND_ROOT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176/* Guest - 32-bit mode */
177#define PGM_GST_TYPE PGM_TYPE_32BIT
178#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
179#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
180#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
181#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
182#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
183#include "PGMGstDefs.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef BTH_PGMPOOLKIND_ROOT
188#undef PGM_BTH_NAME
189#undef PGM_GST_TYPE
190#undef PGM_GST_NAME
191
192
193/* Guest - PAE mode */
194#define PGM_GST_TYPE PGM_TYPE_PAE
195#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
196#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
197#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
198#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
199#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
200#include "PGMGstDefs.h"
201#include "PGMAllGst.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_BIG
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#undef PGM_SHW_TYPE
211#undef PGM_SHW_NAME
212
213
214#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
215/*
216 * Shadow - AMD64 mode
217 */
218# define PGM_SHW_TYPE PGM_TYPE_AMD64
219# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
220# include "PGMAllShw.h"
221
222/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
223/** @todo retire this hack. */
224# define PGM_GST_TYPE PGM_TYPE_PROT
225# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
226# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
227# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
228# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
229# include "PGMGstDefs.h"
230# include "PGMAllBth.h"
231# undef BTH_PGMPOOLKIND_PT_FOR_PT
232# undef BTH_PGMPOOLKIND_ROOT
233# undef PGM_BTH_NAME
234# undef PGM_GST_TYPE
235# undef PGM_GST_NAME
236
237# ifdef VBOX_WITH_64_BITS_GUESTS
238/* Guest - AMD64 mode */
239# define PGM_GST_TYPE PGM_TYPE_AMD64
240# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
241# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
242# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
243# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
244# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
245# include "PGMGstDefs.h"
246# include "PGMAllGst.h"
247# include "PGMAllBth.h"
248# undef BTH_PGMPOOLKIND_PT_FOR_BIG
249# undef BTH_PGMPOOLKIND_PT_FOR_PT
250# undef BTH_PGMPOOLKIND_ROOT
251# undef PGM_BTH_NAME
252# undef PGM_GST_TYPE
253# undef PGM_GST_NAME
254# endif /* VBOX_WITH_64_BITS_GUESTS */
255
256# undef PGM_SHW_TYPE
257# undef PGM_SHW_NAME
258
259
260/*
261 * Shadow - 32-bit nested paging mode.
262 */
263# define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
264# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
265# include "PGMAllShw.h"
266
267/* Guest - real mode */
268# define PGM_GST_TYPE PGM_TYPE_REAL
269# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
270# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
271# include "PGMGstDefs.h"
272# include "PGMAllBth.h"
273# undef PGM_BTH_NAME
274# undef PGM_GST_TYPE
275# undef PGM_GST_NAME
276
277/* Guest - protected mode */
278# define PGM_GST_TYPE PGM_TYPE_PROT
279# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
280# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
281# include "PGMGstDefs.h"
282# include "PGMAllBth.h"
283# undef PGM_BTH_NAME
284# undef PGM_GST_TYPE
285# undef PGM_GST_NAME
286
287/* Guest - 32-bit mode */
288# define PGM_GST_TYPE PGM_TYPE_32BIT
289# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
290# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
291# include "PGMGstDefs.h"
292# include "PGMAllBth.h"
293# undef PGM_BTH_NAME
294# undef PGM_GST_TYPE
295# undef PGM_GST_NAME
296
297/* Guest - PAE mode */
298# define PGM_GST_TYPE PGM_TYPE_PAE
299# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
300# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
301# include "PGMGstDefs.h"
302# include "PGMAllBth.h"
303# undef PGM_BTH_NAME
304# undef PGM_GST_TYPE
305# undef PGM_GST_NAME
306
307# ifdef VBOX_WITH_64_BITS_GUESTS
308/* Guest - AMD64 mode */
309# define PGM_GST_TYPE PGM_TYPE_AMD64
310# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
311# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
312# include "PGMGstDefs.h"
313# include "PGMAllBth.h"
314# undef PGM_BTH_NAME
315# undef PGM_GST_TYPE
316# undef PGM_GST_NAME
317# endif /* VBOX_WITH_64_BITS_GUESTS */
318
319# undef PGM_SHW_TYPE
320# undef PGM_SHW_NAME
321
322
323/*
324 * Shadow - PAE nested paging mode.
325 */
326# define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
327# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
328# include "PGMAllShw.h"
329
330/* Guest - real mode */
331# define PGM_GST_TYPE PGM_TYPE_REAL
332# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
333# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
334# include "PGMGstDefs.h"
335# include "PGMAllBth.h"
336# undef PGM_BTH_NAME
337# undef PGM_GST_TYPE
338# undef PGM_GST_NAME
339
340/* Guest - protected mode */
341# define PGM_GST_TYPE PGM_TYPE_PROT
342# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
343# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
344# include "PGMGstDefs.h"
345# include "PGMAllBth.h"
346# undef PGM_BTH_NAME
347# undef PGM_GST_TYPE
348# undef PGM_GST_NAME
349
350/* Guest - 32-bit mode */
351# define PGM_GST_TYPE PGM_TYPE_32BIT
352# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
353# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
354# include "PGMGstDefs.h"
355# include "PGMAllBth.h"
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - PAE mode */
361# define PGM_GST_TYPE PGM_TYPE_PAE
362# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef PGM_BTH_NAME
367# undef PGM_GST_TYPE
368# undef PGM_GST_NAME
369
370# ifdef VBOX_WITH_64_BITS_GUESTS
371/* Guest - AMD64 mode */
372# define PGM_GST_TYPE PGM_TYPE_AMD64
373# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
374# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
375# include "PGMGstDefs.h"
376# include "PGMAllBth.h"
377# undef PGM_BTH_NAME
378# undef PGM_GST_TYPE
379# undef PGM_GST_NAME
380# endif /* VBOX_WITH_64_BITS_GUESTS */
381
382# undef PGM_SHW_TYPE
383# undef PGM_SHW_NAME
384
385
386/*
387 * Shadow - AMD64 nested paging mode.
388 */
389# define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
390# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
391# include "PGMAllShw.h"
392
393/* Guest - real mode */
394# define PGM_GST_TYPE PGM_TYPE_REAL
395# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
396# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
397# include "PGMGstDefs.h"
398# include "PGMAllBth.h"
399# undef PGM_BTH_NAME
400# undef PGM_GST_TYPE
401# undef PGM_GST_NAME
402
403/* Guest - protected mode */
404# define PGM_GST_TYPE PGM_TYPE_PROT
405# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
406# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
407# include "PGMGstDefs.h"
408# include "PGMAllBth.h"
409# undef PGM_BTH_NAME
410# undef PGM_GST_TYPE
411# undef PGM_GST_NAME
412
413/* Guest - 32-bit mode */
414# define PGM_GST_TYPE PGM_TYPE_32BIT
415# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
416# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
417# include "PGMGstDefs.h"
418# include "PGMAllBth.h"
419# undef PGM_BTH_NAME
420# undef PGM_GST_TYPE
421# undef PGM_GST_NAME
422
423/* Guest - PAE mode */
424# define PGM_GST_TYPE PGM_TYPE_PAE
425# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
426# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
427# include "PGMGstDefs.h"
428# include "PGMAllBth.h"
429# undef PGM_BTH_NAME
430# undef PGM_GST_TYPE
431# undef PGM_GST_NAME
432
433# ifdef VBOX_WITH_64_BITS_GUESTS
434/* Guest - AMD64 mode */
435# define PGM_GST_TYPE PGM_TYPE_AMD64
436# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
437# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
438# include "PGMGstDefs.h"
439# include "PGMAllBth.h"
440# undef PGM_BTH_NAME
441# undef PGM_GST_TYPE
442# undef PGM_GST_NAME
443# endif /* VBOX_WITH_64_BITS_GUESTS */
444
445# undef PGM_SHW_TYPE
446# undef PGM_SHW_NAME
447
448
449/*
450 * Shadow - EPT.
451 */
452# define PGM_SHW_TYPE PGM_TYPE_EPT
453# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
454# include "PGMAllShw.h"
455
456/* Guest - real mode */
457# define PGM_GST_TYPE PGM_TYPE_REAL
458# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
459# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
460# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
461# include "PGMGstDefs.h"
462# include "PGMAllBth.h"
463# undef BTH_PGMPOOLKIND_PT_FOR_PT
464# undef PGM_BTH_NAME
465# undef PGM_GST_TYPE
466# undef PGM_GST_NAME
467
468/* Guest - protected mode */
469# define PGM_GST_TYPE PGM_TYPE_PROT
470# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
471# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
472# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
473# include "PGMGstDefs.h"
474# include "PGMAllBth.h"
475# undef BTH_PGMPOOLKIND_PT_FOR_PT
476# undef PGM_BTH_NAME
477# undef PGM_GST_TYPE
478# undef PGM_GST_NAME
479
480/* Guest - 32-bit mode */
481# define PGM_GST_TYPE PGM_TYPE_32BIT
482# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
483# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
484# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
485# include "PGMGstDefs.h"
486# include "PGMAllBth.h"
487# undef BTH_PGMPOOLKIND_PT_FOR_PT
488# undef PGM_BTH_NAME
489# undef PGM_GST_TYPE
490# undef PGM_GST_NAME
491
492/* Guest - PAE mode */
493# define PGM_GST_TYPE PGM_TYPE_PAE
494# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
495# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
496# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
497# include "PGMGstDefs.h"
498# include "PGMAllBth.h"
499# undef BTH_PGMPOOLKIND_PT_FOR_PT
500# undef PGM_BTH_NAME
501# undef PGM_GST_TYPE
502# undef PGM_GST_NAME
503
504# ifdef VBOX_WITH_64_BITS_GUESTS
505/* Guest - AMD64 mode */
506# define PGM_GST_TYPE PGM_TYPE_AMD64
507# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
508# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
509# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
510# include "PGMGstDefs.h"
511# include "PGMAllBth.h"
512# undef BTH_PGMPOOLKIND_PT_FOR_PT
513# undef PGM_BTH_NAME
514# undef PGM_GST_TYPE
515# undef PGM_GST_NAME
516# endif /* VBOX_WITH_64_BITS_GUESTS */
517
518# undef PGM_SHW_TYPE
519# undef PGM_SHW_NAME
520
521
522/*
523 * Shadow - NEM / None.
524 */
525# define PGM_SHW_TYPE PGM_TYPE_NONE
526# define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
527# include "PGMAllShw.h"
528
529/* Guest - real mode */
530# define PGM_GST_TYPE PGM_TYPE_REAL
531# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
532# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
533# include "PGMGstDefs.h"
534# include "PGMAllBth.h"
535# undef PGM_BTH_NAME
536# undef PGM_GST_TYPE
537# undef PGM_GST_NAME
538
539/* Guest - protected mode */
540# define PGM_GST_TYPE PGM_TYPE_PROT
541# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
542# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
543# include "PGMGstDefs.h"
544# include "PGMAllBth.h"
545# undef PGM_BTH_NAME
546# undef PGM_GST_TYPE
547# undef PGM_GST_NAME
548
549/* Guest - 32-bit mode */
550# define PGM_GST_TYPE PGM_TYPE_32BIT
551# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
552# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
553# include "PGMGstDefs.h"
554# include "PGMAllBth.h"
555# undef PGM_BTH_NAME
556# undef PGM_GST_TYPE
557# undef PGM_GST_NAME
558
559/* Guest - PAE mode */
560# define PGM_GST_TYPE PGM_TYPE_PAE
561# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
562# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
563# include "PGMGstDefs.h"
564# include "PGMAllBth.h"
565# undef PGM_BTH_NAME
566# undef PGM_GST_TYPE
567# undef PGM_GST_NAME
568
569# ifdef VBOX_WITH_64_BITS_GUESTS
570/* Guest - AMD64 mode */
571# define PGM_GST_TYPE PGM_TYPE_AMD64
572# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
573# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
574# include "PGMGstDefs.h"
575# include "PGMAllBth.h"
576# undef PGM_BTH_NAME
577# undef PGM_GST_TYPE
578# undef PGM_GST_NAME
579# endif /* VBOX_WITH_64_BITS_GUESTS */
580
581# undef PGM_SHW_TYPE
582# undef PGM_SHW_NAME
583
584#endif /* !IN_RC */
585
586
587/**
588 * Guest mode data array.
589 */
590PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
591{
592 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
593 {
594 PGM_TYPE_REAL,
595 PGM_GST_NAME_REAL(GetPage),
596 PGM_GST_NAME_REAL(ModifyPage),
597 PGM_GST_NAME_REAL(GetPDE),
598 PGM_GST_NAME_REAL(Enter),
599 PGM_GST_NAME_REAL(Exit),
600#ifdef IN_RING3
601 PGM_GST_NAME_REAL(Relocate),
602#endif
603 },
604 {
605 PGM_TYPE_PROT,
606 PGM_GST_NAME_PROT(GetPage),
607 PGM_GST_NAME_PROT(ModifyPage),
608 PGM_GST_NAME_PROT(GetPDE),
609 PGM_GST_NAME_PROT(Enter),
610 PGM_GST_NAME_PROT(Exit),
611#ifdef IN_RING3
612 PGM_GST_NAME_PROT(Relocate),
613#endif
614 },
615 {
616 PGM_TYPE_32BIT,
617 PGM_GST_NAME_32BIT(GetPage),
618 PGM_GST_NAME_32BIT(ModifyPage),
619 PGM_GST_NAME_32BIT(GetPDE),
620 PGM_GST_NAME_32BIT(Enter),
621 PGM_GST_NAME_32BIT(Exit),
622#ifdef IN_RING3
623 PGM_GST_NAME_32BIT(Relocate),
624#endif
625 },
626 {
627 PGM_TYPE_PAE,
628 PGM_GST_NAME_PAE(GetPage),
629 PGM_GST_NAME_PAE(ModifyPage),
630 PGM_GST_NAME_PAE(GetPDE),
631 PGM_GST_NAME_PAE(Enter),
632 PGM_GST_NAME_PAE(Exit),
633#ifdef IN_RING3
634 PGM_GST_NAME_PAE(Relocate),
635#endif
636 },
637#if defined(VBOX_WITH_64_BITS_GUESTS) && !defined(IN_RC)
638 {
639 PGM_TYPE_AMD64,
640 PGM_GST_NAME_AMD64(GetPage),
641 PGM_GST_NAME_AMD64(ModifyPage),
642 PGM_GST_NAME_AMD64(GetPDE),
643 PGM_GST_NAME_AMD64(Enter),
644 PGM_GST_NAME_AMD64(Exit),
645# ifdef IN_RING3
646 PGM_GST_NAME_AMD64(Relocate),
647# endif
648 },
649#endif
650};
651
652
653/**
654 * The shadow mode data array.
655 */
656PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
657{
658 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
659 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
660 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
661 {
662 PGM_TYPE_32BIT,
663 PGM_SHW_NAME_32BIT(GetPage),
664 PGM_SHW_NAME_32BIT(ModifyPage),
665 PGM_SHW_NAME_32BIT(Enter),
666 PGM_SHW_NAME_32BIT(Exit),
667#ifdef IN_RING3
668 PGM_SHW_NAME_32BIT(Relocate),
669#endif
670 },
671 {
672 PGM_TYPE_PAE,
673 PGM_SHW_NAME_PAE(GetPage),
674 PGM_SHW_NAME_PAE(ModifyPage),
675 PGM_SHW_NAME_PAE(Enter),
676 PGM_SHW_NAME_PAE(Exit),
677#ifdef IN_RING3
678 PGM_SHW_NAME_PAE(Relocate),
679#endif
680 },
681#ifndef IN_RC
682 {
683 PGM_TYPE_AMD64,
684 PGM_SHW_NAME_AMD64(GetPage),
685 PGM_SHW_NAME_AMD64(ModifyPage),
686 PGM_SHW_NAME_AMD64(Enter),
687 PGM_SHW_NAME_AMD64(Exit),
688# ifdef IN_RING3
689 PGM_SHW_NAME_AMD64(Relocate),
690# endif
691 },
692 {
693 PGM_TYPE_NESTED_32BIT,
694 PGM_SHW_NAME_NESTED_32BIT(GetPage),
695 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
696 PGM_SHW_NAME_NESTED_32BIT(Enter),
697 PGM_SHW_NAME_NESTED_32BIT(Exit),
698# ifdef IN_RING3
699 PGM_SHW_NAME_NESTED_32BIT(Relocate),
700# endif
701 },
702 {
703 PGM_TYPE_NESTED_PAE,
704 PGM_SHW_NAME_NESTED_PAE(GetPage),
705 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
706 PGM_SHW_NAME_NESTED_PAE(Enter),
707 PGM_SHW_NAME_NESTED_PAE(Exit),
708# ifdef IN_RING3
709 PGM_SHW_NAME_NESTED_PAE(Relocate),
710# endif
711 },
712 {
713 PGM_TYPE_NESTED_AMD64,
714 PGM_SHW_NAME_NESTED_AMD64(GetPage),
715 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
716 PGM_SHW_NAME_NESTED_AMD64(Enter),
717 PGM_SHW_NAME_NESTED_AMD64(Exit),
718# ifdef IN_RING3
719 PGM_SHW_NAME_NESTED_AMD64(Relocate),
720# endif
721 },
722 {
723 PGM_TYPE_EPT,
724 PGM_SHW_NAME_EPT(GetPage),
725 PGM_SHW_NAME_EPT(ModifyPage),
726 PGM_SHW_NAME_EPT(Enter),
727 PGM_SHW_NAME_EPT(Exit),
728# ifdef IN_RING3
729 PGM_SHW_NAME_EPT(Relocate),
730# endif
731 },
732 {
733 PGM_TYPE_NONE,
734 PGM_SHW_NAME_NONE(GetPage),
735 PGM_SHW_NAME_NONE(ModifyPage),
736 PGM_SHW_NAME_NONE(Enter),
737 PGM_SHW_NAME_NONE(Exit),
738# ifdef IN_RING3
739 PGM_SHW_NAME_NONE(Relocate),
740# endif
741 },
742#endif /* IN_RC */
743};
744
745
746/**
747 * The guest+shadow mode data array.
748 */
749PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
750{
751#if !defined(IN_RING3) && !defined(VBOX_STRICT)
752# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
753# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
754 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
755
756#elif !defined(IN_RING3) && defined(VBOX_STRICT)
757# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
758# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
759 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
760
761#elif defined(IN_RING3) && !defined(VBOX_STRICT)
762# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
763# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
764 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
765
766#elif defined(IN_RING3) && defined(VBOX_STRICT)
767# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
768# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
769 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
770
771#else
772# error "Misconfig."
773#endif
774
775 /* 32-bit shadow paging mode: */
776 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
778 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
779 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
785 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
786 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
787
788 /* PAE shadow paging mode: */
789 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
793 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
800
801#ifndef IN_RC
802 /* AMD64 shadow paging mode: */
803 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
804 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
805 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
806 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
807 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
808# ifdef VBOX_WITH_64_BITS_GUESTS
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
810# else
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
812# endif
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
817 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
818
819 /* 32-bit nested paging mode: */
820 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
825# ifdef VBOX_WITH_64_BITS_GUESTS
826 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
827# else
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
829# endif
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
834 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
835
836 /* PAE nested paging mode: */
837 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
842# ifdef VBOX_WITH_64_BITS_GUESTS
843 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
844# else
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
846# endif
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
851 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
852
853 /* AMD64 nested paging mode: */
854 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
859# ifdef VBOX_WITH_64_BITS_GUESTS
860 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
861# else
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
863# endif
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
868 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
869
870 /* EPT nested paging mode: */
871 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
876# ifdef VBOX_WITH_64_BITS_GUESTS
877 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
878# else
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
880# endif
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
884 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
885 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
886
887 /* NONE / NEM: */
888 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
890 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
891 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
892 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
893# ifdef VBOX_WITH_64_BITS_GUESTS
894 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
895# else
896 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
897# endif
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
900 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
901 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
902 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
903
904#endif /* IN_RC */
905
906#undef PGMMODEDATABTH_ENTRY
907#undef PGMMODEDATABTH_NULL_ENTRY
908};
909
910
911#ifndef IN_RING3
912/**
913 * #PF Handler.
914 *
915 * @returns VBox status code (appropriate for trap handling and GC return).
916 * @param pVCpu The cross context virtual CPU structure.
917 * @param uErr The trap error code.
918 * @param pRegFrame Trap register frame.
919 * @param pvFault The fault address.
920 */
921VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
922{
923 PVM pVM = pVCpu->CTX_SUFF(pVM);
924
925 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
926 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
927 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
928
929
930#ifdef VBOX_WITH_STATISTICS
931 /*
932 * Error code stats.
933 */
934 if (uErr & X86_TRAP_PF_US)
935 {
936 if (!(uErr & X86_TRAP_PF_P))
937 {
938 if (uErr & X86_TRAP_PF_RW)
939 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
940 else
941 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
942 }
943 else if (uErr & X86_TRAP_PF_RW)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
945 else if (uErr & X86_TRAP_PF_RSVD)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
947 else if (uErr & X86_TRAP_PF_ID)
948 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
949 else
950 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
951 }
952 else
953 { /* Supervisor */
954 if (!(uErr & X86_TRAP_PF_P))
955 {
956 if (uErr & X86_TRAP_PF_RW)
957 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
958 else
959 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
960 }
961 else if (uErr & X86_TRAP_PF_RW)
962 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
963 else if (uErr & X86_TRAP_PF_ID)
964 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
965 else if (uErr & X86_TRAP_PF_RSVD)
966 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
967 }
968#endif /* VBOX_WITH_STATISTICS */
969
970 /*
971 * Call the worker.
972 */
973 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
974 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
975 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
976 bool fLockTaken = false;
977 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
978 if (fLockTaken)
979 {
980 PGM_LOCK_ASSERT_OWNER(pVM);
981 pgmUnlock(pVM);
982 }
983 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
984
985 /*
986 * Return code tweaks.
987 */
988 if (rc != VINF_SUCCESS)
989 {
990 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
991 rc = VINF_SUCCESS;
992
993# ifdef IN_RING0
994 /* Note: hack alert for difficult to reproduce problem. */
995 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
996 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
997 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
998 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
999 {
1000 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
1001 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1002 rc = VINF_SUCCESS;
1003 }
1004# endif
1005 }
1006
1007 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
1008 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
1009 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
1010 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
1011 return rc;
1012}
1013#endif /* !IN_RING3 */
1014
1015
1016/**
1017 * Prefetch a page
1018 *
1019 * Typically used to sync commonly used pages before entering raw mode
1020 * after a CR3 reload.
1021 *
1022 * @returns VBox status code suitable for scheduling.
1023 * @retval VINF_SUCCESS on success.
1024 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1025 * @param pVCpu The cross context virtual CPU structure.
1026 * @param GCPtrPage Page to invalidate.
1027 */
1028VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
1029{
1030 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
1031
1032 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1033 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1034 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1035 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1036
1037 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
1038 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1039 return rc;
1040}
1041
1042
1043/**
1044 * Gets the mapping corresponding to the specified address (if any).
1045 *
1046 * @returns Pointer to the mapping.
1047 * @returns NULL if not
1048 *
1049 * @param pVM The cross context VM structure.
1050 * @param GCPtr The guest context pointer.
1051 */
1052PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
1053{
1054 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1055 while (pMapping)
1056 {
1057 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
1058 break;
1059 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
1060 return pMapping;
1061 pMapping = pMapping->CTX_SUFF(pNext);
1062 }
1063 return NULL;
1064}
1065
1066
1067/**
1068 * Verifies a range of pages for read or write access
1069 *
1070 * Only checks the guest's page tables
1071 *
1072 * @returns VBox status code.
1073 * @param pVCpu The cross context virtual CPU structure.
1074 * @param Addr Guest virtual address to check
1075 * @param cbSize Access size
1076 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1077 * @remarks Current not in use.
1078 */
1079VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1080{
1081 /*
1082 * Validate input.
1083 */
1084 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
1085 {
1086 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
1087 return VERR_INVALID_PARAMETER;
1088 }
1089
1090 uint64_t fPage;
1091 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
1092 if (RT_FAILURE(rc))
1093 {
1094 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
1095 return VINF_EM_RAW_GUEST_TRAP;
1096 }
1097
1098 /*
1099 * Check if the access would cause a page fault
1100 *
1101 * Note that hypervisor page directories are not present in the guest's tables, so this check
1102 * is sufficient.
1103 */
1104 bool fWrite = !!(fAccess & X86_PTE_RW);
1105 bool fUser = !!(fAccess & X86_PTE_US);
1106 if ( !(fPage & X86_PTE_P)
1107 || (fWrite && !(fPage & X86_PTE_RW))
1108 || (fUser && !(fPage & X86_PTE_US)) )
1109 {
1110 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
1111 return VINF_EM_RAW_GUEST_TRAP;
1112 }
1113 if ( RT_SUCCESS(rc)
1114 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
1115 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Verifies a range of pages for read or write access
1122 *
1123 * Supports handling of pages marked for dirty bit tracking and CSAM
1124 *
1125 * @returns VBox status code.
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param Addr Guest virtual address to check
1128 * @param cbSize Access size
1129 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1130 */
1131VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1132{
1133 PVM pVM = pVCpu->CTX_SUFF(pVM);
1134
1135 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
1136
1137 /*
1138 * Get going.
1139 */
1140 uint64_t fPageGst;
1141 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
1142 if (RT_FAILURE(rc))
1143 {
1144 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
1145 return VINF_EM_RAW_GUEST_TRAP;
1146 }
1147
1148 /*
1149 * Check if the access would cause a page fault
1150 *
1151 * Note that hypervisor page directories are not present in the guest's tables, so this check
1152 * is sufficient.
1153 */
1154 const bool fWrite = !!(fAccess & X86_PTE_RW);
1155 const bool fUser = !!(fAccess & X86_PTE_US);
1156 if ( !(fPageGst & X86_PTE_P)
1157 || (fWrite && !(fPageGst & X86_PTE_RW))
1158 || (fUser && !(fPageGst & X86_PTE_US)) )
1159 {
1160 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
1161 return VINF_EM_RAW_GUEST_TRAP;
1162 }
1163
1164 if (!pVM->pgm.s.fNestedPaging)
1165 {
1166 /*
1167 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
1168 */
1169 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
1170 if ( rc == VERR_PAGE_NOT_PRESENT
1171 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1172 {
1173 /*
1174 * Page is not present in our page tables.
1175 * Try to sync it!
1176 */
1177 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
1178 uint32_t const uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
1179 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1180 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1181 AssertReturn(g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
1182 rc = g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage(pVCpu, Addr, fPageGst, uErr);
1183 if (rc != VINF_SUCCESS)
1184 return rc;
1185 }
1186 else
1187 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
1188 }
1189
1190#if 0 /* def VBOX_STRICT; triggers too often now */
1191 /*
1192 * This check is a bit paranoid, but useful.
1193 */
1194 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
1195 uint64_t fPageShw;
1196 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
1197 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
1198 || (fWrite && !(fPageShw & X86_PTE_RW))
1199 || (fUser && !(fPageShw & X86_PTE_US)) )
1200 {
1201 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
1202 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
1203 return VINF_EM_RAW_GUEST_TRAP;
1204 }
1205#endif
1206
1207 if ( RT_SUCCESS(rc)
1208 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
1209 || Addr + cbSize < Addr))
1210 {
1211 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
1212 for (;;)
1213 {
1214 Addr += PAGE_SIZE;
1215 if (cbSize > PAGE_SIZE)
1216 cbSize -= PAGE_SIZE;
1217 else
1218 cbSize = 1;
1219 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
1220 if (rc != VINF_SUCCESS)
1221 break;
1222 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
1223 break;
1224 }
1225 }
1226 return rc;
1227}
1228
1229
1230/**
1231 * Emulation of the invlpg instruction (HC only actually).
1232 *
1233 * @returns Strict VBox status code, special care required.
1234 * @retval VINF_PGM_SYNC_CR3 - handled.
1235 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1236 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1237 *
1238 * @param pVCpu The cross context virtual CPU structure.
1239 * @param GCPtrPage Page to invalidate.
1240 *
1241 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1242 * safe, but there could be edge cases!
1243 *
1244 * @todo Flush page or page directory only if necessary!
1245 * @todo VBOXSTRICTRC
1246 */
1247VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
1248{
1249 PVM pVM = pVCpu->CTX_SUFF(pVM);
1250 int rc;
1251 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1252
1253#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
1254 /*
1255 * Notify the recompiler so it can record this instruction.
1256 */
1257 REMNotifyInvalidatePage(pVM, GCPtrPage);
1258#endif /* !IN_RING3 */
1259 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1260
1261
1262#ifdef IN_RC
1263 /*
1264 * Check for conflicts and pending CR3 monitoring updates.
1265 */
1266 if (pgmMapAreMappingsFloating(pVM))
1267 {
1268 if ( pgmGetMapping(pVM, GCPtrPage)
1269 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
1270 {
1271 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
1272 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1273 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
1274 return VINF_PGM_SYNC_CR3;
1275 }
1276
1277 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1278 {
1279 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
1280 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
1281 return VINF_EM_RAW_EMULATE_INSTR;
1282 }
1283 }
1284#endif /* IN_RC */
1285
1286 /*
1287 * Call paging mode specific worker.
1288 */
1289 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1290 pgmLock(pVM);
1291
1292 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1293 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1294 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, pgmUnlock(pVM), VERR_PGM_MODE_IPE);
1295 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1296
1297 pgmUnlock(pVM);
1298 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
1299
1300#ifdef IN_RING3
1301 /*
1302 * Check if we have a pending update of the CR3 monitoring.
1303 */
1304 if ( RT_SUCCESS(rc)
1305 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
1306 {
1307 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1308 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
1309 }
1310
1311# ifdef VBOX_WITH_RAW_MODE
1312 /*
1313 * Inform CSAM about the flush
1314 *
1315 * Note: This is to check if monitored pages have been changed; when we implement
1316 * callbacks for virtual handlers, this is no longer required.
1317 */
1318 CSAMR3FlushPage(pVM, GCPtrPage);
1319# endif
1320#endif /* IN_RING3 */
1321
1322 /* Ignore all irrelevant error codes. */
1323 if ( rc == VERR_PAGE_NOT_PRESENT
1324 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1325 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1326 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1327 rc = VINF_SUCCESS;
1328
1329 return rc;
1330}
1331
1332
1333/**
1334 * Executes an instruction using the interpreter.
1335 *
1336 * @returns VBox status code (appropriate for trap handling and GC return).
1337 * @param pVM The cross context VM structure.
1338 * @param pVCpu The cross context virtual CPU structure.
1339 * @param pRegFrame Register frame.
1340 * @param pvFault Fault address.
1341 */
1342VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1343{
1344 NOREF(pVM);
1345 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1346 if (rc == VERR_EM_INTERPRETER)
1347 rc = VINF_EM_RAW_EMULATE_INSTR;
1348 if (rc != VINF_SUCCESS)
1349 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1350 return rc;
1351}
1352
1353
1354/**
1355 * Gets effective page information (from the VMM page directory).
1356 *
1357 * @returns VBox status code.
1358 * @param pVCpu The cross context virtual CPU structure.
1359 * @param GCPtr Guest Context virtual address of the page.
1360 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1361 * @param pHCPhys Where to store the HC physical address of the page.
1362 * This is page aligned.
1363 * @remark You should use PGMMapGetPage() for pages in a mapping.
1364 */
1365VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1366{
1367 PVM pVM = pVCpu->CTX_SUFF(pVM);
1368 pgmLock(pVM);
1369
1370 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1371 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1372 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1373 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1374
1375 pgmUnlock(pVM);
1376 return rc;
1377}
1378
1379
1380/**
1381 * Modify page flags for a range of pages in the shadow context.
1382 *
1383 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1384 *
1385 * @returns VBox status code.
1386 * @param pVCpu The cross context virtual CPU structure.
1387 * @param GCPtr Virtual address of the first page in the range.
1388 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1389 * @param fMask The AND mask - page flags X86_PTE_*.
1390 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1391 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1392 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1393 */
1394DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1395{
1396 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1397 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1398
1399 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1400
1401 PVM pVM = pVCpu->CTX_SUFF(pVM);
1402 pgmLock(pVM);
1403
1404 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1405 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1406 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1407 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1408
1409 pgmUnlock(pVM);
1410 return rc;
1411}
1412
1413
1414/**
1415 * Changing the page flags for a single page in the shadow page tables so as to
1416 * make it read-only.
1417 *
1418 * @returns VBox status code.
1419 * @param pVCpu The cross context virtual CPU structure.
1420 * @param GCPtr Virtual address of the first page in the range.
1421 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1422 */
1423VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1424{
1425 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1426}
1427
1428
1429/**
1430 * Changing the page flags for a single page in the shadow page tables so as to
1431 * make it writable.
1432 *
1433 * The call must know with 101% certainty that the guest page tables maps this
1434 * as writable too. This function will deal shared, zero and write monitored
1435 * pages.
1436 *
1437 * @returns VBox status code.
1438 * @param pVCpu The cross context virtual CPU structure.
1439 * @param GCPtr Virtual address of the first page in the range.
1440 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1441 */
1442VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1443{
1444 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1445 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1446 return VINF_SUCCESS;
1447}
1448
1449
1450/**
1451 * Changing the page flags for a single page in the shadow page tables so as to
1452 * make it not present.
1453 *
1454 * @returns VBox status code.
1455 * @param pVCpu The cross context virtual CPU structure.
1456 * @param GCPtr Virtual address of the first page in the range.
1457 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1458 */
1459VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1460{
1461 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1462}
1463
1464
1465/**
1466 * Changing the page flags for a single page in the shadow page tables so as to
1467 * make it supervisor and writable.
1468 *
1469 * This if for dealing with CR0.WP=0 and readonly user pages.
1470 *
1471 * @returns VBox status code.
1472 * @param pVCpu The cross context virtual CPU structure.
1473 * @param GCPtr Virtual address of the first page in the range.
1474 * @param fBigPage Whether or not this is a big page. If it is, we have to
1475 * change the shadow PDE as well. If it isn't, the caller
1476 * has checked that the shadow PDE doesn't need changing.
1477 * We ASSUME 4KB pages backing the big page here!
1478 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1479 */
1480int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1481{
1482 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1483 if (rc == VINF_SUCCESS && fBigPage)
1484 {
1485 /* this is a bit ugly... */
1486 switch (pVCpu->pgm.s.enmShadowMode)
1487 {
1488 case PGMMODE_32_BIT:
1489 {
1490 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1491 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1492 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1493 pPde->n.u1Write = 1;
1494 Log(("-> PDE=%#llx (32)\n", pPde->u));
1495 break;
1496 }
1497 case PGMMODE_PAE:
1498 case PGMMODE_PAE_NX:
1499 {
1500 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1501 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1502 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1503 pPde->n.u1Write = 1;
1504 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1505 break;
1506 }
1507 default:
1508 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1509 }
1510 }
1511 return rc;
1512}
1513
1514
1515/**
1516 * Gets the shadow page directory for the specified address, PAE.
1517 *
1518 * @returns Pointer to the shadow PD.
1519 * @param pVCpu The cross context virtual CPU structure.
1520 * @param GCPtr The address.
1521 * @param uGstPdpe Guest PDPT entry. Valid.
1522 * @param ppPD Receives address of page directory
1523 */
1524int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1525{
1526 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1527 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1528 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1529 PVM pVM = pVCpu->CTX_SUFF(pVM);
1530 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1531 PPGMPOOLPAGE pShwPage;
1532 int rc;
1533
1534 PGM_LOCK_ASSERT_OWNER(pVM);
1535
1536 /* Allocate page directory if not present. */
1537 if ( !pPdpe->n.u1Present
1538 && !(pPdpe->u & X86_PDPE_PG_MASK))
1539 {
1540 RTGCPTR64 GCPdPt;
1541 PGMPOOLKIND enmKind;
1542
1543 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1544 {
1545 /* AMD-V nested paging or real/protected mode without paging. */
1546 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1547 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1548 }
1549 else
1550 {
1551 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1552 {
1553 if (!(uGstPdpe & X86_PDPE_P))
1554 {
1555 /* PD not present; guest must reload CR3 to change it.
1556 * No need to monitor anything in this case.
1557 */
1558 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1559
1560 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1561 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1562 uGstPdpe |= X86_PDPE_P;
1563 }
1564 else
1565 {
1566 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1567 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1568 }
1569 }
1570 else
1571 {
1572 GCPdPt = CPUMGetGuestCR3(pVCpu);
1573 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1574 }
1575 }
1576
1577 /* Create a reference back to the PDPT by using the index in its shadow page. */
1578 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1579 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1580 &pShwPage);
1581 AssertRCReturn(rc, rc);
1582
1583 /* The PD was cached or created; hook it up now. */
1584 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1585
1586# if defined(IN_RC)
1587 /*
1588 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1589 * PDPT entry; the CPU fetches them only during cr3 load, so any
1590 * non-present PDPT will continue to cause page faults.
1591 */
1592 ASMReloadCR3();
1593# endif
1594 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1595 }
1596 else
1597 {
1598 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1599 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1600 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1601
1602 pgmPoolCacheUsed(pPool, pShwPage);
1603 }
1604 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1605 return VINF_SUCCESS;
1606}
1607
1608
1609/**
1610 * Gets the pointer to the shadow page directory entry for an address, PAE.
1611 *
1612 * @returns Pointer to the PDE.
1613 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1614 * @param GCPtr The address.
1615 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1616 */
1617DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1618{
1619 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1620 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1621 PVM pVM = pVCpu->CTX_SUFF(pVM);
1622
1623 PGM_LOCK_ASSERT_OWNER(pVM);
1624
1625 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1626 if (!pPdpt->a[iPdPt].n.u1Present)
1627 {
1628 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1629 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1630 }
1631 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1632
1633 /* Fetch the pgm pool shadow descriptor. */
1634 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1635 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1636
1637 *ppShwPde = pShwPde;
1638 return VINF_SUCCESS;
1639}
1640
1641#ifndef IN_RC
1642
1643/**
1644 * Syncs the SHADOW page directory pointer for the specified address.
1645 *
1646 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1647 *
1648 * The caller is responsible for making sure the guest has a valid PD before
1649 * calling this function.
1650 *
1651 * @returns VBox status code.
1652 * @param pVCpu The cross context virtual CPU structure.
1653 * @param GCPtr The address.
1654 * @param uGstPml4e Guest PML4 entry (valid).
1655 * @param uGstPdpe Guest PDPT entry (valid).
1656 * @param ppPD Receives address of page directory
1657 */
1658static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1659{
1660 PVM pVM = pVCpu->CTX_SUFF(pVM);
1661 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1662 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1663 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1664 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1665 PPGMPOOLPAGE pShwPage;
1666 int rc;
1667
1668 PGM_LOCK_ASSERT_OWNER(pVM);
1669
1670 /* Allocate page directory pointer table if not present. */
1671 if ( !pPml4e->n.u1Present
1672 && !(pPml4e->u & X86_PML4E_PG_MASK))
1673 {
1674 RTGCPTR64 GCPml4;
1675 PGMPOOLKIND enmKind;
1676
1677 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1678
1679 if (fNestedPagingOrNoGstPaging)
1680 {
1681 /* AMD-V nested paging or real/protected mode without paging */
1682 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1683 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1684 }
1685 else
1686 {
1687 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1688 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1689 }
1690
1691 /* Create a reference back to the PDPT by using the index in its shadow page. */
1692 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1693 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1694 &pShwPage);
1695 AssertRCReturn(rc, rc);
1696 }
1697 else
1698 {
1699 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1700 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1701
1702 pgmPoolCacheUsed(pPool, pShwPage);
1703 }
1704 /* The PDPT was cached or created; hook it up now. */
1705 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1706
1707 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1708 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1709 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1710
1711 /* Allocate page directory if not present. */
1712 if ( !pPdpe->n.u1Present
1713 && !(pPdpe->u & X86_PDPE_PG_MASK))
1714 {
1715 RTGCPTR64 GCPdPt;
1716 PGMPOOLKIND enmKind;
1717
1718 if (fNestedPagingOrNoGstPaging)
1719 {
1720 /* AMD-V nested paging or real/protected mode without paging */
1721 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1722 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1723 }
1724 else
1725 {
1726 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1727 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1728 }
1729
1730 /* Create a reference back to the PDPT by using the index in its shadow page. */
1731 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1732 pShwPage->idx, iPdPt, false /*fLockPage*/,
1733 &pShwPage);
1734 AssertRCReturn(rc, rc);
1735 }
1736 else
1737 {
1738 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1739 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1740
1741 pgmPoolCacheUsed(pPool, pShwPage);
1742 }
1743 /* The PD was cached or created; hook it up now. */
1744 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1745
1746 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1747 return VINF_SUCCESS;
1748}
1749
1750
1751/**
1752 * Gets the SHADOW page directory pointer for the specified address (long mode).
1753 *
1754 * @returns VBox status code.
1755 * @param pVCpu The cross context virtual CPU structure.
1756 * @param GCPtr The address.
1757 * @param ppPdpt Receives address of pdpt
1758 * @param ppPD Receives address of page directory
1759 */
1760DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1761{
1762 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1763 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1764
1765 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1766
1767 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1768 if (ppPml4e)
1769 *ppPml4e = (PX86PML4E)pPml4e;
1770
1771 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1772
1773 if (!pPml4e->n.u1Present)
1774 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1775
1776 PVM pVM = pVCpu->CTX_SUFF(pVM);
1777 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1778 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1779 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1780
1781 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1782 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1783 if (!pPdpt->a[iPdPt].n.u1Present)
1784 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1785
1786 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1787 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1788
1789 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1790 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1797 * backing pages in case the PDPT or PML4 entry is missing.
1798 *
1799 * @returns VBox status code.
1800 * @param pVCpu The cross context virtual CPU structure.
1801 * @param GCPtr The address.
1802 * @param ppPdpt Receives address of pdpt
1803 * @param ppPD Receives address of page directory
1804 */
1805static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1806{
1807 PVM pVM = pVCpu->CTX_SUFF(pVM);
1808 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1809 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1810 PEPTPML4 pPml4;
1811 PEPTPML4E pPml4e;
1812 PPGMPOOLPAGE pShwPage;
1813 int rc;
1814
1815 Assert(pVM->pgm.s.fNestedPaging);
1816 PGM_LOCK_ASSERT_OWNER(pVM);
1817
1818 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1819 Assert(pPml4);
1820
1821 /* Allocate page directory pointer table if not present. */
1822 pPml4e = &pPml4->a[iPml4];
1823 if ( !pPml4e->n.u1Present
1824 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1825 {
1826 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1827 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1828
1829 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1830 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1831 &pShwPage);
1832 AssertRCReturn(rc, rc);
1833 }
1834 else
1835 {
1836 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1837 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1838
1839 pgmPoolCacheUsed(pPool, pShwPage);
1840 }
1841 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1842 pPml4e->u = pShwPage->Core.Key;
1843 pPml4e->n.u1Present = 1;
1844 pPml4e->n.u1Write = 1;
1845 pPml4e->n.u1Execute = 1;
1846
1847 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1848 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1849 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1850
1851 if (ppPdpt)
1852 *ppPdpt = pPdpt;
1853
1854 /* Allocate page directory if not present. */
1855 if ( !pPdpe->n.u1Present
1856 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1857 {
1858 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1859 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1860 pShwPage->idx, iPdPt, false /*fLockPage*/,
1861 &pShwPage);
1862 AssertRCReturn(rc, rc);
1863 }
1864 else
1865 {
1866 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1867 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1868
1869 pgmPoolCacheUsed(pPool, pShwPage);
1870 }
1871 /* The PD was cached or created; hook it up now and fill with the default value. */
1872 pPdpe->u = pShwPage->Core.Key;
1873 pPdpe->n.u1Present = 1;
1874 pPdpe->n.u1Write = 1;
1875 pPdpe->n.u1Execute = 1;
1876
1877 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1878 return VINF_SUCCESS;
1879}
1880
1881#endif /* IN_RC */
1882
1883#ifdef IN_RING0
1884/**
1885 * Synchronizes a range of nested page table entries.
1886 *
1887 * The caller must own the PGM lock.
1888 *
1889 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1890 * @param GCPhys Where to start.
1891 * @param cPages How many pages which entries should be synced.
1892 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1893 * host paging mode for AMD-V).
1894 */
1895int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1896{
1897 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1898
1899/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1900 int rc;
1901 switch (enmShwPagingMode)
1902 {
1903 case PGMMODE_32_BIT:
1904 {
1905 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1906 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1907 break;
1908 }
1909
1910 case PGMMODE_PAE:
1911 case PGMMODE_PAE_NX:
1912 {
1913 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1914 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1915 break;
1916 }
1917
1918 case PGMMODE_AMD64:
1919 case PGMMODE_AMD64_NX:
1920 {
1921 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1922 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1923 break;
1924 }
1925
1926 case PGMMODE_EPT:
1927 {
1928 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1929 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1930 break;
1931 }
1932
1933 default:
1934 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1935 }
1936 return rc;
1937}
1938#endif /* IN_RING0 */
1939
1940
1941/**
1942 * Gets effective Guest OS page information.
1943 *
1944 * When GCPtr is in a big page, the function will return as if it was a normal
1945 * 4KB page. If the need for distinguishing between big and normal page becomes
1946 * necessary at a later point, a PGMGstGetPage() will be created for that
1947 * purpose.
1948 *
1949 * @returns VBox status code.
1950 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1951 * @param GCPtr Guest Context virtual address of the page.
1952 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1953 * @param pGCPhys Where to store the GC physical address of the page.
1954 * This is page aligned. The fact that the
1955 */
1956VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1957{
1958 VMCPU_ASSERT_EMT(pVCpu);
1959 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1960 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1961 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1962 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1963}
1964
1965
1966/**
1967 * Performs a guest page table walk.
1968 *
1969 * The guest should be in paged protect mode or long mode when making a call to
1970 * this function.
1971 *
1972 * @returns VBox status code.
1973 * @retval VINF_SUCCESS on success.
1974 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1975 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1976 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1977 *
1978 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1979 * @param GCPtr The guest virtual address to walk by.
1980 * @param pWalk Where to return the walk result. This is valid for some
1981 * error codes as well.
1982 */
1983int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1984{
1985 VMCPU_ASSERT_EMT(pVCpu);
1986 switch (pVCpu->pgm.s.enmGuestMode)
1987 {
1988 case PGMMODE_32_BIT:
1989 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1990 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1991
1992 case PGMMODE_PAE:
1993 case PGMMODE_PAE_NX:
1994 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1995 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1996
1997#if !defined(IN_RC)
1998 case PGMMODE_AMD64:
1999 case PGMMODE_AMD64_NX:
2000 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2001 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
2002#endif
2003
2004 case PGMMODE_REAL:
2005 case PGMMODE_PROTECTED:
2006 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2007 return VERR_PGM_NOT_USED_IN_MODE;
2008
2009#if defined(IN_RC)
2010 case PGMMODE_AMD64:
2011 case PGMMODE_AMD64_NX:
2012#endif
2013 case PGMMODE_NESTED_32BIT:
2014 case PGMMODE_NESTED_PAE:
2015 case PGMMODE_NESTED_AMD64:
2016 case PGMMODE_EPT:
2017 default:
2018 AssertFailed();
2019 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2020 return VERR_PGM_NOT_USED_IN_MODE;
2021 }
2022}
2023
2024
2025/**
2026 * Tries to continue the previous walk.
2027 *
2028 * @note Requires the caller to hold the PGM lock from the first
2029 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2030 * we cannot use the pointers.
2031 *
2032 * @returns VBox status code.
2033 * @retval VINF_SUCCESS on success.
2034 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2035 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2036 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2037 *
2038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2039 * @param GCPtr The guest virtual address to walk by.
2040 * @param pWalk Pointer to the previous walk result and where to return
2041 * the result of this walk. This is valid for some error
2042 * codes as well.
2043 */
2044int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
2045{
2046 /*
2047 * We can only handle successfully walks.
2048 * We also limit ourselves to the next page.
2049 */
2050 if ( pWalk->u.Core.fSucceeded
2051 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
2052 {
2053 Assert(pWalk->u.Core.uLevel == 0);
2054 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2055 {
2056 /*
2057 * AMD64
2058 */
2059 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
2060 {
2061 /*
2062 * We fall back to full walk if the PDE table changes, if any
2063 * reserved bits are set, or if the effective page access changes.
2064 */
2065 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2066 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2067 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2068 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2069
2070 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
2071 {
2072 if (pWalk->u.Amd64.pPte)
2073 {
2074 X86PTEPAE Pte;
2075 Pte.u = pWalk->u.Amd64.pPte[1].u;
2076 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2077 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2078 {
2079
2080 pWalk->u.Core.GCPtr = GCPtr;
2081 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2082 pWalk->u.Amd64.Pte.u = Pte.u;
2083 pWalk->u.Amd64.pPte++;
2084 return VINF_SUCCESS;
2085 }
2086 }
2087 }
2088 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
2089 {
2090 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2091 if (pWalk->u.Amd64.pPde)
2092 {
2093 X86PDEPAE Pde;
2094 Pde.u = pWalk->u.Amd64.pPde[1].u;
2095 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
2096 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2097 {
2098 /* Get the new PTE and check out the first entry. */
2099 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2100 &pWalk->u.Amd64.pPt);
2101 if (RT_SUCCESS(rc))
2102 {
2103 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
2104 X86PTEPAE Pte;
2105 Pte.u = pWalk->u.Amd64.pPte->u;
2106 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2107 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2108 {
2109 pWalk->u.Core.GCPtr = GCPtr;
2110 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2111 pWalk->u.Amd64.Pte.u = Pte.u;
2112 pWalk->u.Amd64.Pde.u = Pde.u;
2113 pWalk->u.Amd64.pPde++;
2114 return VINF_SUCCESS;
2115 }
2116 }
2117 }
2118 }
2119 }
2120 }
2121 else if (!pWalk->u.Core.fGigantPage)
2122 {
2123 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
2124 {
2125 pWalk->u.Core.GCPtr = GCPtr;
2126 pWalk->u.Core.GCPhys += PAGE_SIZE;
2127 return VINF_SUCCESS;
2128 }
2129 }
2130 else
2131 {
2132 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
2133 {
2134 pWalk->u.Core.GCPtr = GCPtr;
2135 pWalk->u.Core.GCPhys += PAGE_SIZE;
2136 return VINF_SUCCESS;
2137 }
2138 }
2139 }
2140 }
2141 /* Case we don't handle. Do full walk. */
2142 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
2143}
2144
2145
2146/**
2147 * Checks if the page is present.
2148 *
2149 * @returns true if the page is present.
2150 * @returns false if the page is not present.
2151 * @param pVCpu The cross context virtual CPU structure.
2152 * @param GCPtr Address within the page.
2153 */
2154VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
2155{
2156 VMCPU_ASSERT_EMT(pVCpu);
2157 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
2158 return RT_SUCCESS(rc);
2159}
2160
2161
2162/**
2163 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2164 *
2165 * @returns VBox status code.
2166 * @param pVCpu The cross context virtual CPU structure.
2167 * @param GCPtr The address of the first page.
2168 * @param cb The size of the range in bytes.
2169 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2170 */
2171VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2172{
2173 VMCPU_ASSERT_EMT(pVCpu);
2174 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2175}
2176
2177
2178/**
2179 * Modify page flags for a range of pages in the guest's tables
2180 *
2181 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2182 *
2183 * @returns VBox status code.
2184 * @param pVCpu The cross context virtual CPU structure.
2185 * @param GCPtr Virtual address of the first page in the range.
2186 * @param cb Size (in bytes) of the range to apply the modification to.
2187 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2188 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2189 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2190 */
2191VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2192{
2193 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2194 VMCPU_ASSERT_EMT(pVCpu);
2195
2196 /*
2197 * Validate input.
2198 */
2199 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2200 Assert(cb);
2201
2202 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2203
2204 /*
2205 * Adjust input.
2206 */
2207 cb += GCPtr & PAGE_OFFSET_MASK;
2208 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2209 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2210
2211 /*
2212 * Call worker.
2213 */
2214 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2215 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2216 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2217 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2218
2219 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
2220 return rc;
2221}
2222
2223
2224#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2225
2226/**
2227 * Performs the lazy mapping of the 32-bit guest PD.
2228 *
2229 * @returns VBox status code.
2230 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2231 * @param ppPd Where to return the pointer to the mapping. This is
2232 * always set.
2233 */
2234int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
2235{
2236 PVM pVM = pVCpu->CTX_SUFF(pVM);
2237 pgmLock(pVM);
2238
2239 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2240
2241 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2242 PPGMPAGE pPage;
2243 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2244 if (RT_SUCCESS(rc))
2245 {
2246 RTHCPTR HCPtrGuestCR3;
2247 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2248 if (RT_SUCCESS(rc))
2249 {
2250 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
2251# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2252 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
2253# endif
2254 *ppPd = (PX86PD)HCPtrGuestCR3;
2255
2256 pgmUnlock(pVM);
2257 return VINF_SUCCESS;
2258 }
2259
2260 AssertRC(rc);
2261 }
2262 pgmUnlock(pVM);
2263
2264 *ppPd = NULL;
2265 return rc;
2266}
2267
2268
2269/**
2270 * Performs the lazy mapping of the PAE guest PDPT.
2271 *
2272 * @returns VBox status code.
2273 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2274 * @param ppPdpt Where to return the pointer to the mapping. This is
2275 * always set.
2276 */
2277int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
2278{
2279 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2280 PVM pVM = pVCpu->CTX_SUFF(pVM);
2281 pgmLock(pVM);
2282
2283 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2284 PPGMPAGE pPage;
2285 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2286 if (RT_SUCCESS(rc))
2287 {
2288 RTHCPTR HCPtrGuestCR3;
2289 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2290 if (RT_SUCCESS(rc))
2291 {
2292 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2293# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2294 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
2295# endif
2296 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
2297
2298 pgmUnlock(pVM);
2299 return VINF_SUCCESS;
2300 }
2301
2302 AssertRC(rc);
2303 }
2304
2305 pgmUnlock(pVM);
2306 *ppPdpt = NULL;
2307 return rc;
2308}
2309
2310
2311/**
2312 * Performs the lazy mapping / updating of a PAE guest PD.
2313 *
2314 * @returns Pointer to the mapping.
2315 * @returns VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2317 * @param iPdpt Which PD entry to map (0..3).
2318 * @param ppPd Where to return the pointer to the mapping. This is
2319 * always set.
2320 */
2321int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2322{
2323 PVM pVM = pVCpu->CTX_SUFF(pVM);
2324 pgmLock(pVM);
2325
2326 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2327 Assert(pGuestPDPT);
2328 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
2329 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2330 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2331
2332 PPGMPAGE pPage;
2333 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2334 if (RT_SUCCESS(rc))
2335 {
2336 RTRCPTR RCPtr = NIL_RTRCPTR;
2337 RTHCPTR HCPtr = NIL_RTHCPTR;
2338#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2339 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
2340 AssertRC(rc);
2341#endif
2342 if (RT_SUCCESS(rc) && fChanged)
2343 {
2344 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
2345 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
2346 }
2347 if (RT_SUCCESS(rc))
2348 {
2349 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
2350# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2351 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
2352# endif
2353 if (fChanged)
2354 {
2355 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2356 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
2357 }
2358
2359 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
2360 pgmUnlock(pVM);
2361 return VINF_SUCCESS;
2362 }
2363 }
2364
2365 /* Invalid page or some failure, invalidate the entry. */
2366 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2367 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
2368# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2369 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
2370# endif
2371 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
2372
2373 pgmUnlock(pVM);
2374 return rc;
2375}
2376
2377#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2378#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2379/**
2380 * Performs the lazy mapping of the 32-bit guest PD.
2381 *
2382 * @returns VBox status code.
2383 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2384 * @param ppPml4 Where to return the pointer to the mapping. This will
2385 * always be set.
2386 */
2387int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
2388{
2389 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2390 PVM pVM = pVCpu->CTX_SUFF(pVM);
2391 pgmLock(pVM);
2392
2393 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2394 PPGMPAGE pPage;
2395 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2396 if (RT_SUCCESS(rc))
2397 {
2398 RTHCPTR HCPtrGuestCR3;
2399 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
2400 if (RT_SUCCESS(rc))
2401 {
2402 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
2403# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2404 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
2405# endif
2406 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
2407
2408 pgmUnlock(pVM);
2409 return VINF_SUCCESS;
2410 }
2411 }
2412
2413 pgmUnlock(pVM);
2414 *ppPml4 = NULL;
2415 return rc;
2416}
2417#endif
2418
2419
2420/**
2421 * Gets the PAE PDPEs values cached by the CPU.
2422 *
2423 * @returns VBox status code.
2424 * @param pVCpu The cross context virtual CPU structure.
2425 * @param paPdpes Where to return the four PDPEs. The array
2426 * pointed to must have 4 entries.
2427 */
2428VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
2429{
2430 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2431
2432 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
2433 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
2434 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
2435 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
2436 return VINF_SUCCESS;
2437}
2438
2439
2440/**
2441 * Sets the PAE PDPEs values cached by the CPU.
2442 *
2443 * @remarks This must be called *AFTER* PGMUpdateCR3.
2444 *
2445 * @param pVCpu The cross context virtual CPU structure.
2446 * @param paPdpes The four PDPE values. The array pointed to must
2447 * have exactly 4 entries.
2448 *
2449 * @remarks No-long-jump zone!!!
2450 */
2451VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
2452{
2453 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2454
2455 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
2456 {
2457 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
2458 {
2459 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
2460
2461 /* Force lazy remapping if it changed in any way. */
2462 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2463# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2464 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2465# endif
2466 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
2467 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2468 }
2469 }
2470
2471 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
2472}
2473
2474
2475/**
2476 * Gets the current CR3 register value for the shadow memory context.
2477 * @returns CR3 value.
2478 * @param pVCpu The cross context virtual CPU structure.
2479 */
2480VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2481{
2482 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2483 AssertPtrReturn(pPoolPage, 0);
2484 return pPoolPage->Core.Key;
2485}
2486
2487
2488/**
2489 * Gets the current CR3 register value for the nested memory context.
2490 * @returns CR3 value.
2491 * @param pVCpu The cross context virtual CPU structure.
2492 * @param enmShadowMode The shadow paging mode.
2493 */
2494VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
2495{
2496 NOREF(enmShadowMode);
2497 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
2498 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
2499}
2500
2501
2502/**
2503 * Gets the current CR3 register value for the HC intermediate memory context.
2504 * @returns CR3 value.
2505 * @param pVM The cross context VM structure.
2506 */
2507VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
2508{
2509 switch (pVM->pgm.s.enmHostMode)
2510 {
2511 case SUPPAGINGMODE_32_BIT:
2512 case SUPPAGINGMODE_32_BIT_GLOBAL:
2513 return pVM->pgm.s.HCPhysInterPD;
2514
2515 case SUPPAGINGMODE_PAE:
2516 case SUPPAGINGMODE_PAE_GLOBAL:
2517 case SUPPAGINGMODE_PAE_NX:
2518 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2519 return pVM->pgm.s.HCPhysInterPaePDPT;
2520
2521 case SUPPAGINGMODE_AMD64:
2522 case SUPPAGINGMODE_AMD64_GLOBAL:
2523 case SUPPAGINGMODE_AMD64_NX:
2524 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2525 return pVM->pgm.s.HCPhysInterPaePDPT;
2526
2527 default:
2528 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
2529 return NIL_RTHCPHYS;
2530 }
2531}
2532
2533
2534/**
2535 * Gets the current CR3 register value for the RC intermediate memory context.
2536 * @returns CR3 value.
2537 * @param pVM The cross context VM structure.
2538 * @param pVCpu The cross context virtual CPU structure.
2539 */
2540VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
2541{
2542 switch (pVCpu->pgm.s.enmShadowMode)
2543 {
2544 case PGMMODE_32_BIT:
2545 return pVM->pgm.s.HCPhysInterPD;
2546
2547 case PGMMODE_PAE:
2548 case PGMMODE_PAE_NX:
2549 return pVM->pgm.s.HCPhysInterPaePDPT;
2550
2551 case PGMMODE_AMD64:
2552 case PGMMODE_AMD64_NX:
2553 return pVM->pgm.s.HCPhysInterPaePML4;
2554
2555 case PGMMODE_NESTED_32BIT:
2556 case PGMMODE_NESTED_PAE:
2557 case PGMMODE_NESTED_AMD64:
2558 case PGMMODE_EPT:
2559 return 0; /* not relevant */
2560
2561 default:
2562 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
2563 return NIL_RTHCPHYS;
2564 }
2565}
2566
2567
2568/**
2569 * Gets the CR3 register value for the 32-Bit intermediate memory context.
2570 * @returns CR3 value.
2571 * @param pVM The cross context VM structure.
2572 */
2573VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
2574{
2575 return pVM->pgm.s.HCPhysInterPD;
2576}
2577
2578
2579/**
2580 * Gets the CR3 register value for the PAE intermediate memory context.
2581 * @returns CR3 value.
2582 * @param pVM The cross context VM structure.
2583 */
2584VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
2585{
2586 return pVM->pgm.s.HCPhysInterPaePDPT;
2587}
2588
2589
2590/**
2591 * Gets the CR3 register value for the AMD64 intermediate memory context.
2592 * @returns CR3 value.
2593 * @param pVM The cross context VM structure.
2594 */
2595VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
2596{
2597 return pVM->pgm.s.HCPhysInterPaePML4;
2598}
2599
2600
2601/**
2602 * Performs and schedules necessary updates following a CR3 load or reload.
2603 *
2604 * This will normally involve mapping the guest PD or nPDPT
2605 *
2606 * @returns VBox status code.
2607 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2608 * safely be ignored and overridden since the FF will be set too then.
2609 * @param pVCpu The cross context virtual CPU structure.
2610 * @param cr3 The new cr3.
2611 * @param fGlobal Indicates whether this is a global flush or not.
2612 */
2613VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
2614{
2615 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2616 PVM pVM = pVCpu->CTX_SUFF(pVM);
2617
2618 VMCPU_ASSERT_EMT(pVCpu);
2619
2620 /*
2621 * Always flag the necessary updates; necessary for hardware acceleration
2622 */
2623 /** @todo optimize this, it shouldn't always be necessary. */
2624 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2625 if (fGlobal)
2626 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2627 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2628
2629 /*
2630 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2631 */
2632 int rc = VINF_SUCCESS;
2633 RTGCPHYS GCPhysCR3;
2634 switch (pVCpu->pgm.s.enmGuestMode)
2635 {
2636 case PGMMODE_PAE:
2637 case PGMMODE_PAE_NX:
2638 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2639 break;
2640 case PGMMODE_AMD64:
2641 case PGMMODE_AMD64_NX:
2642 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2643 break;
2644 default:
2645 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2646 break;
2647 }
2648 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2649
2650 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2651 if (GCPhysOldCR3 != GCPhysCR3)
2652 {
2653 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2654 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2655 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2656
2657 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2658 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2659 if (RT_LIKELY(rc == VINF_SUCCESS))
2660 {
2661 if (pgmMapAreMappingsFloating(pVM))
2662 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2663 }
2664 else
2665 {
2666 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2667 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2668 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2669 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2670 if (pgmMapAreMappingsFloating(pVM))
2671 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2672 }
2673
2674 if (fGlobal)
2675 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2676 else
2677 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
2678 }
2679 else
2680 {
2681# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2682 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2683 if (pPool->cDirtyPages)
2684 {
2685 pgmLock(pVM);
2686 pgmPoolResetDirtyPages(pVM);
2687 pgmUnlock(pVM);
2688 }
2689# endif
2690 /*
2691 * Check if we have a pending update of the CR3 monitoring.
2692 */
2693 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2694 {
2695 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2696 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2697 }
2698 if (fGlobal)
2699 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2700 else
2701 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2702 }
2703
2704 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2705 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2706 return rc;
2707}
2708
2709
2710/**
2711 * Performs and schedules necessary updates following a CR3 load or reload when
2712 * using nested or extended paging.
2713 *
2714 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2715 * TLB and triggering a SyncCR3.
2716 *
2717 * This will normally involve mapping the guest PD or nPDPT
2718 *
2719 * @returns VBox status code.
2720 * @retval VINF_SUCCESS.
2721 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2722 * paging modes). This can safely be ignored and overridden since the
2723 * FF will be set too then.
2724 * @param pVCpu The cross context virtual CPU structure.
2725 * @param cr3 The new cr3.
2726 */
2727VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2728{
2729 VMCPU_ASSERT_EMT(pVCpu);
2730 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2731
2732 /* We assume we're only called in nested paging mode. */
2733 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2734 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2735 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2736
2737 /*
2738 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2739 */
2740 int rc = VINF_SUCCESS;
2741 RTGCPHYS GCPhysCR3;
2742 switch (pVCpu->pgm.s.enmGuestMode)
2743 {
2744 case PGMMODE_PAE:
2745 case PGMMODE_PAE_NX:
2746 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2747 break;
2748 case PGMMODE_AMD64:
2749 case PGMMODE_AMD64_NX:
2750 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2751 break;
2752 default:
2753 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2754 break;
2755 }
2756 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2757
2758 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2759 {
2760 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2761 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2762 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2763
2764 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2765 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2766
2767 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2768 }
2769
2770 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2771 return rc;
2772}
2773
2774
2775/**
2776 * Synchronize the paging structures.
2777 *
2778 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2779 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2780 * in several places, most importantly whenever the CR3 is loaded.
2781 *
2782 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2783 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2784 * the VMM into guest context.
2785 * @param pVCpu The cross context virtual CPU structure.
2786 * @param cr0 Guest context CR0 register
2787 * @param cr3 Guest context CR3 register
2788 * @param cr4 Guest context CR4 register
2789 * @param fGlobal Including global page directories or not
2790 */
2791VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2792{
2793 int rc;
2794
2795 VMCPU_ASSERT_EMT(pVCpu);
2796
2797 /*
2798 * The pool may have pending stuff and even require a return to ring-3 to
2799 * clear the whole thing.
2800 */
2801 rc = pgmPoolSyncCR3(pVCpu);
2802 if (rc != VINF_SUCCESS)
2803 return rc;
2804
2805 /*
2806 * We might be called when we shouldn't.
2807 *
2808 * The mode switching will ensure that the PD is resynced after every mode
2809 * switch. So, if we find ourselves here when in protected or real mode
2810 * we can safely clear the FF and return immediately.
2811 */
2812 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2813 {
2814 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2815 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2816 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2817 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2818 return VINF_SUCCESS;
2819 }
2820
2821 /* If global pages are not supported, then all flushes are global. */
2822 if (!(cr4 & X86_CR4_PGE))
2823 fGlobal = true;
2824 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2825 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2826
2827 /*
2828 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2829 * This should be done before SyncCR3.
2830 */
2831 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2832 {
2833 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2834
2835 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2836 RTGCPHYS GCPhysCR3;
2837 switch (pVCpu->pgm.s.enmGuestMode)
2838 {
2839 case PGMMODE_PAE:
2840 case PGMMODE_PAE_NX:
2841 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2842 break;
2843 case PGMMODE_AMD64:
2844 case PGMMODE_AMD64_NX:
2845 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2846 break;
2847 default:
2848 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2849 break;
2850 }
2851 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2852
2853 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2854 {
2855 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2856 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2857 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2858 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2859 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2860 }
2861
2862 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2863 if ( rc == VINF_PGM_SYNC_CR3
2864 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2865 {
2866 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2867#ifdef IN_RING3
2868 rc = pgmPoolSyncCR3(pVCpu);
2869#else
2870 if (rc == VINF_PGM_SYNC_CR3)
2871 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2872 return VINF_PGM_SYNC_CR3;
2873#endif
2874 }
2875 AssertRCReturn(rc, rc);
2876 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2877 }
2878
2879 /*
2880 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2881 */
2882 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2883
2884 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2885 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2886 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2887 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2888
2889 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2890 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2891 if (rc == VINF_SUCCESS)
2892 {
2893 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2894 {
2895 /* Go back to ring 3 if a pgm pool sync is again pending. */
2896 return VINF_PGM_SYNC_CR3;
2897 }
2898
2899 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2900 {
2901 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2902 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2904 }
2905
2906 /*
2907 * Check if we have a pending update of the CR3 monitoring.
2908 */
2909 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2910 {
2911 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2912 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2913 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2914 }
2915 }
2916
2917 /*
2918 * Now flush the CR3 (guest context).
2919 */
2920 if (rc == VINF_SUCCESS)
2921 PGM_INVL_VCPU_TLBS(pVCpu);
2922 return rc;
2923}
2924
2925
2926/**
2927 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2928 *
2929 * @returns VBox status code, with the following informational code for
2930 * VM scheduling.
2931 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2932 * @retval VINF_PGM_CHANGE_MODE if we're in RC the mode changes. This will
2933 * NOT be returned in ring-3 or ring-0.
2934 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2935 *
2936 * @param pVCpu The cross context virtual CPU structure.
2937 * @param cr0 The new cr0.
2938 * @param cr4 The new cr4.
2939 * @param efer The new extended feature enable register.
2940 */
2941VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2942{
2943 VMCPU_ASSERT_EMT(pVCpu);
2944
2945 /*
2946 * Calc the new guest mode.
2947 *
2948 * Note! We check PG before PE and without requiring PE because of the
2949 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2950 */
2951 PGMMODE enmGuestMode;
2952 if (cr0 & X86_CR0_PG)
2953 {
2954 if (!(cr4 & X86_CR4_PAE))
2955 {
2956 bool const fPse = !!(cr4 & X86_CR4_PSE);
2957 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2958 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2959 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2960 enmGuestMode = PGMMODE_32_BIT;
2961 }
2962 else if (!(efer & MSR_K6_EFER_LME))
2963 {
2964 if (!(efer & MSR_K6_EFER_NXE))
2965 enmGuestMode = PGMMODE_PAE;
2966 else
2967 enmGuestMode = PGMMODE_PAE_NX;
2968 }
2969 else
2970 {
2971 if (!(efer & MSR_K6_EFER_NXE))
2972 enmGuestMode = PGMMODE_AMD64;
2973 else
2974 enmGuestMode = PGMMODE_AMD64_NX;
2975 }
2976 }
2977 else if (!(cr0 & X86_CR0_PE))
2978 enmGuestMode = PGMMODE_REAL;
2979 else
2980 enmGuestMode = PGMMODE_PROTECTED;
2981
2982 /*
2983 * Did it change?
2984 */
2985 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2986 return VINF_SUCCESS;
2987
2988 /* Flush the TLB */
2989 PGM_INVL_VCPU_TLBS(pVCpu);
2990
2991#ifndef IN_RC
2992 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2993#else
2994 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2995 return VINF_PGM_CHANGE_MODE;
2996#endif
2997}
2998
2999#ifndef IN_RC
3000
3001/**
3002 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3003 *
3004 * @returns PGM_TYPE_*.
3005 * @param pgmMode The mode value to convert.
3006 */
3007DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3008{
3009 switch (pgmMode)
3010 {
3011 case PGMMODE_REAL: return PGM_TYPE_REAL;
3012 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3013 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3014 case PGMMODE_PAE:
3015 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3016 case PGMMODE_AMD64:
3017 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3018 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3019 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3020 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3021 case PGMMODE_EPT: return PGM_TYPE_EPT;
3022 case PGMMODE_NONE: return PGM_TYPE_NONE;
3023 default:
3024 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3025 }
3026}
3027
3028
3029/**
3030 * Calculates the shadow paging mode.
3031 *
3032 * @returns The shadow paging mode.
3033 * @param pVM The cross context VM structure.
3034 * @param enmGuestMode The guest mode.
3035 * @param enmHostMode The host mode.
3036 * @param enmShadowMode The current shadow mode.
3037 * @param penmSwitcher Where to store the switcher to use.
3038 * VMMSWITCHER_INVALID means no change.
3039 */
3040static PGMMODE pgmCalcShadowMode(PVM pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode,
3041 VMMSWITCHER *penmSwitcher)
3042{
3043 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
3044 switch (enmGuestMode)
3045 {
3046 /*
3047 * When switching to real or protected mode we don't change
3048 * anything since it's likely that we'll switch back pretty soon.
3049 *
3050 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
3051 * and is supposed to determine which shadow paging and switcher to
3052 * use during init.
3053 */
3054 case PGMMODE_REAL:
3055 case PGMMODE_PROTECTED:
3056 if ( enmShadowMode != PGMMODE_INVALID
3057 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
3058 break; /* (no change) */
3059
3060 switch (enmHostMode)
3061 {
3062 case SUPPAGINGMODE_32_BIT:
3063 case SUPPAGINGMODE_32_BIT_GLOBAL:
3064 enmShadowMode = PGMMODE_32_BIT;
3065 enmSwitcher = VMMSWITCHER_32_TO_32;
3066 break;
3067
3068 case SUPPAGINGMODE_PAE:
3069 case SUPPAGINGMODE_PAE_NX:
3070 case SUPPAGINGMODE_PAE_GLOBAL:
3071 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3072 enmShadowMode = PGMMODE_PAE;
3073 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
3074 break;
3075
3076 case SUPPAGINGMODE_AMD64:
3077 case SUPPAGINGMODE_AMD64_GLOBAL:
3078 case SUPPAGINGMODE_AMD64_NX:
3079 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3080 enmShadowMode = PGMMODE_PAE;
3081 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
3082 break;
3083
3084 default:
3085 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3086 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3087 }
3088 break;
3089
3090 case PGMMODE_32_BIT:
3091 switch (enmHostMode)
3092 {
3093 case SUPPAGINGMODE_32_BIT:
3094 case SUPPAGINGMODE_32_BIT_GLOBAL:
3095 enmShadowMode = PGMMODE_32_BIT;
3096 enmSwitcher = VMMSWITCHER_32_TO_32;
3097 break;
3098
3099 case SUPPAGINGMODE_PAE:
3100 case SUPPAGINGMODE_PAE_NX:
3101 case SUPPAGINGMODE_PAE_GLOBAL:
3102 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3103 enmShadowMode = PGMMODE_PAE;
3104 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
3105 break;
3106
3107 case SUPPAGINGMODE_AMD64:
3108 case SUPPAGINGMODE_AMD64_GLOBAL:
3109 case SUPPAGINGMODE_AMD64_NX:
3110 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3111 enmShadowMode = PGMMODE_PAE;
3112 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
3113 break;
3114
3115 default:
3116 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3117 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3118 }
3119 break;
3120
3121 case PGMMODE_PAE:
3122 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3123 switch (enmHostMode)
3124 {
3125 case SUPPAGINGMODE_32_BIT:
3126 case SUPPAGINGMODE_32_BIT_GLOBAL:
3127 enmShadowMode = PGMMODE_PAE;
3128 enmSwitcher = VMMSWITCHER_32_TO_PAE;
3129 break;
3130
3131 case SUPPAGINGMODE_PAE:
3132 case SUPPAGINGMODE_PAE_NX:
3133 case SUPPAGINGMODE_PAE_GLOBAL:
3134 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3135 enmShadowMode = PGMMODE_PAE;
3136 enmSwitcher = VMMSWITCHER_PAE_TO_PAE;
3137 break;
3138
3139 case SUPPAGINGMODE_AMD64:
3140 case SUPPAGINGMODE_AMD64_GLOBAL:
3141 case SUPPAGINGMODE_AMD64_NX:
3142 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3143 enmShadowMode = PGMMODE_PAE;
3144 enmSwitcher = VMMSWITCHER_AMD64_TO_PAE;
3145 break;
3146
3147 default:
3148 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3149 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3150 }
3151 break;
3152
3153 case PGMMODE_AMD64:
3154 case PGMMODE_AMD64_NX:
3155 switch (enmHostMode)
3156 {
3157 case SUPPAGINGMODE_32_BIT:
3158 case SUPPAGINGMODE_32_BIT_GLOBAL:
3159 enmShadowMode = PGMMODE_AMD64;
3160 enmSwitcher = VMMSWITCHER_32_TO_AMD64;
3161 break;
3162
3163 case SUPPAGINGMODE_PAE:
3164 case SUPPAGINGMODE_PAE_NX:
3165 case SUPPAGINGMODE_PAE_GLOBAL:
3166 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3167 enmShadowMode = PGMMODE_AMD64;
3168 enmSwitcher = VMMSWITCHER_PAE_TO_AMD64;
3169 break;
3170
3171 case SUPPAGINGMODE_AMD64:
3172 case SUPPAGINGMODE_AMD64_GLOBAL:
3173 case SUPPAGINGMODE_AMD64_NX:
3174 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3175 enmShadowMode = PGMMODE_AMD64;
3176 enmSwitcher = VMMSWITCHER_AMD64_TO_AMD64;
3177 break;
3178
3179 default:
3180 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", enmHostMode),
3181 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3182 }
3183 break;
3184
3185 default:
3186 AssertLogRelMsgFailedReturnStmt(("enmGuestMode=%d\n", enmGuestMode),
3187 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3188 }
3189
3190 /*
3191 * Override the shadow mode when NEM or nested paging is active.
3192 */
3193 if (VM_IS_NEM_ENABLED(pVM))
3194 {
3195 pVM->pgm.s.fNestedPaging = true;
3196 enmShadowMode = PGMMODE_NONE;
3197 }
3198 else
3199 {
3200 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3201 pVM->pgm.s.fNestedPaging = fNestedPaging;
3202 if (fNestedPaging)
3203 {
3204 if (HMIsVmxActive(pVM))
3205 enmShadowMode = PGMMODE_EPT;
3206 else
3207 {
3208 /* The nested SVM paging depends on the host one. */
3209 Assert(HMIsSvmActive(pVM));
3210 if ( enmGuestMode == PGMMODE_AMD64
3211 || enmGuestMode == PGMMODE_AMD64_NX)
3212 enmShadowMode = PGMMODE_NESTED_AMD64;
3213 else
3214 switch (pVM->pgm.s.enmHostMode)
3215 {
3216 case SUPPAGINGMODE_32_BIT:
3217 case SUPPAGINGMODE_32_BIT_GLOBAL:
3218 enmShadowMode = PGMMODE_NESTED_32BIT;
3219 break;
3220
3221 case SUPPAGINGMODE_PAE:
3222 case SUPPAGINGMODE_PAE_GLOBAL:
3223 case SUPPAGINGMODE_PAE_NX:
3224 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3225 enmShadowMode = PGMMODE_NESTED_PAE;
3226 break;
3227
3228#if HC_ARCH_BITS == 64 || defined(RT_OS_DARWIN)
3229 case SUPPAGINGMODE_AMD64:
3230 case SUPPAGINGMODE_AMD64_GLOBAL:
3231 case SUPPAGINGMODE_AMD64_NX:
3232 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3233 enmShadowMode = PGMMODE_NESTED_AMD64;
3234 break;
3235#endif
3236 default:
3237 AssertLogRelMsgFailedReturnStmt(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode),
3238 *penmSwitcher = VMMSWITCHER_INVALID, PGMMODE_INVALID);
3239 }
3240 }
3241 }
3242 }
3243
3244 *penmSwitcher = enmSwitcher;
3245 return enmShadowMode;
3246}
3247
3248
3249/**
3250 * Performs the actual mode change.
3251 * This is called by PGMChangeMode and pgmR3InitPaging().
3252 *
3253 * @returns VBox status code. May suspend or power off the VM on error, but this
3254 * will trigger using FFs and not informational status codes.
3255 *
3256 * @param pVM The cross context VM structure.
3257 * @param pVCpu The cross context virtual CPU structure.
3258 * @param enmGuestMode The new guest mode. This is assumed to be different from
3259 * the current mode.
3260 */
3261VMM_INT_DECL(int) PGMHCChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
3262{
3263 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3264 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3265
3266 /*
3267 * Calc the shadow mode and switcher.
3268 */
3269 VMMSWITCHER enmSwitcher = VMMSWITCHER_INVALID;
3270 PGMMODE enmShadowMode = PGMMODE_INVALID;
3271 enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode, &enmSwitcher);
3272
3273#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3274 if ( enmSwitcher != VMMSWITCHER_INVALID
3275 && VM_IS_RAW_MODE_ENABLED(pVM))
3276 {
3277 /*
3278 * Select new switcher.
3279 */
3280 int rc = VMMR3SelectSwitcher(pVM, enmSwitcher);
3281 AssertLogRelMsgRCReturn(rc,("VMMR3SelectSwitcher(%d) -> %Rrc\n", enmSwitcher, rc), rc);
3282 }
3283#endif
3284
3285 /*
3286 * Exit old mode(s).
3287 */
3288 /* shadow */
3289 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3290 {
3291 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3292 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3293 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3294 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3295 {
3296 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3297 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3298 }
3299 }
3300 else
3301 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3302
3303 /* guest */
3304 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3305 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3306 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3307 {
3308 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3309 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3310 }
3311 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3312
3313 /*
3314 * Change the paging mode data indexes.
3315 */
3316 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3317 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3318 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3319 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3320 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3321 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPDE, VERR_PGM_MODE_IPE);
3322 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3323 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3324#ifdef IN_RING3
3325 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3326#endif
3327
3328 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3329 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3330 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3331 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3332 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3333 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3334 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3335#ifdef IN_RING3
3336 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3337#endif
3338
3339 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3340 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3341 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3342 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3343 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3344 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3345 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3346 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3347 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3348 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3349#ifdef VBOX_STRICT
3350 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3351#endif
3352
3353 /*
3354 * Enter new shadow mode (if changed).
3355 */
3356 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3357 {
3358 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3359 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3360 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3361 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3362 }
3363
3364 /*
3365 * Always flag the necessary updates
3366 */
3367 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3368
3369 /*
3370 * Enter the new guest and shadow+guest modes.
3371 */
3372 /* Calc the new CR3 value. */
3373 RTGCPHYS GCPhysCR3;
3374 switch (enmGuestMode)
3375 {
3376 case PGMMODE_REAL:
3377 case PGMMODE_PROTECTED:
3378 GCPhysCR3 = NIL_RTGCPHYS;
3379 break;
3380
3381 case PGMMODE_32_BIT:
3382 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3383 break;
3384
3385 case PGMMODE_PAE_NX:
3386 case PGMMODE_PAE:
3387 if (!pVM->cpum.ro.GuestFeatures.fPae)
3388 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3389 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3390 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3391 break;
3392
3393#ifdef VBOX_WITH_64_BITS_GUESTS
3394 case PGMMODE_AMD64_NX:
3395 case PGMMODE_AMD64:
3396 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3397 break;
3398#endif
3399 default:
3400 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3401 }
3402
3403 /* Enter the new guest mode. */
3404 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3405 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3406 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3407
3408 /* Set the new guest CR3. */
3409 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3410
3411 /* status codes. */
3412 AssertRC(rc);
3413 AssertRC(rc2);
3414 if (RT_SUCCESS(rc))
3415 {
3416 rc = rc2;
3417 if (RT_SUCCESS(rc)) /* no informational status codes. */
3418 rc = VINF_SUCCESS;
3419 }
3420
3421 /*
3422 * Notify HM.
3423 */
3424 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3425 return rc;
3426}
3427
3428#endif /* !IN_RC */
3429
3430/**
3431 * Called by CPUM or REM when CR0.WP changes to 1.
3432 *
3433 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3434 * @thread EMT
3435 */
3436VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
3437{
3438 /*
3439 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3440 *
3441 * Use the counter to judge whether there might be pool pages with active
3442 * hacks in them. If there are, we will be running the risk of messing up
3443 * the guest by allowing it to write to read-only pages. Thus, we have to
3444 * clear the page pool ASAP if there is the slightest chance.
3445 */
3446 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3447 {
3448 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3449
3450 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3451 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3452 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3453 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3454 }
3455}
3456
3457
3458/**
3459 * Gets the current guest paging mode.
3460 *
3461 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3462 *
3463 * @returns The current paging mode.
3464 * @param pVCpu The cross context virtual CPU structure.
3465 */
3466VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3467{
3468 return pVCpu->pgm.s.enmGuestMode;
3469}
3470
3471
3472/**
3473 * Gets the current shadow paging mode.
3474 *
3475 * @returns The current paging mode.
3476 * @param pVCpu The cross context virtual CPU structure.
3477 */
3478VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3479{
3480 return pVCpu->pgm.s.enmShadowMode;
3481}
3482
3483
3484/**
3485 * Gets the current host paging mode.
3486 *
3487 * @returns The current paging mode.
3488 * @param pVM The cross context VM structure.
3489 */
3490VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3491{
3492 switch (pVM->pgm.s.enmHostMode)
3493 {
3494 case SUPPAGINGMODE_32_BIT:
3495 case SUPPAGINGMODE_32_BIT_GLOBAL:
3496 return PGMMODE_32_BIT;
3497
3498 case SUPPAGINGMODE_PAE:
3499 case SUPPAGINGMODE_PAE_GLOBAL:
3500 return PGMMODE_PAE;
3501
3502 case SUPPAGINGMODE_PAE_NX:
3503 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3504 return PGMMODE_PAE_NX;
3505
3506 case SUPPAGINGMODE_AMD64:
3507 case SUPPAGINGMODE_AMD64_GLOBAL:
3508 return PGMMODE_AMD64;
3509
3510 case SUPPAGINGMODE_AMD64_NX:
3511 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3512 return PGMMODE_AMD64_NX;
3513
3514 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3515 }
3516
3517 return PGMMODE_INVALID;
3518}
3519
3520
3521/**
3522 * Get mode name.
3523 *
3524 * @returns read-only name string.
3525 * @param enmMode The mode which name is desired.
3526 */
3527VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3528{
3529 switch (enmMode)
3530 {
3531 case PGMMODE_REAL: return "Real";
3532 case PGMMODE_PROTECTED: return "Protected";
3533 case PGMMODE_32_BIT: return "32-bit";
3534 case PGMMODE_PAE: return "PAE";
3535 case PGMMODE_PAE_NX: return "PAE+NX";
3536 case PGMMODE_AMD64: return "AMD64";
3537 case PGMMODE_AMD64_NX: return "AMD64+NX";
3538 case PGMMODE_NESTED_32BIT: return "Nested-32";
3539 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3540 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3541 case PGMMODE_EPT: return "EPT";
3542 case PGMMODE_NONE: return "None";
3543 default: return "unknown mode value";
3544 }
3545}
3546
3547
3548/**
3549 * Gets the physical address represented in the guest CR3 as PGM sees it.
3550 *
3551 * This is mainly for logging and debugging.
3552 *
3553 * @returns PGM's guest CR3 value.
3554 * @param pVCpu The cross context virtual CPU structure.
3555 */
3556VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3557{
3558 return pVCpu->pgm.s.GCPhysCR3;
3559}
3560
3561
3562
3563/**
3564 * Notification from CPUM that the EFER.NXE bit has changed.
3565 *
3566 * @param pVCpu The cross context virtual CPU structure of the CPU for
3567 * which EFER changed.
3568 * @param fNxe The new NXE state.
3569 */
3570VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3571{
3572/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3573 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3574
3575 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3576 if (fNxe)
3577 {
3578 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3579 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3580 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3581 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3582 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3583 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3584 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3585 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3586 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3587 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3588 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3589
3590 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3591 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3592 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3593 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3594 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3595 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3596 }
3597 else
3598 {
3599 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3600 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3601 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3602 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3603 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3604 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3605 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3606 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3607 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3608 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3609 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3610
3611 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3612 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3613 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3614 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3615 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3616 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3617 }
3618}
3619
3620
3621/**
3622 * Check if any pgm pool pages are marked dirty (not monitored)
3623 *
3624 * @returns bool locked/not locked
3625 * @param pVM The cross context VM structure.
3626 */
3627VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3628{
3629 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3630}
3631
3632
3633/**
3634 * Check if this VCPU currently owns the PGM lock.
3635 *
3636 * @returns bool owner/not owner
3637 * @param pVM The cross context VM structure.
3638 */
3639VMMDECL(bool) PGMIsLockOwner(PVM pVM)
3640{
3641 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
3642}
3643
3644
3645/**
3646 * Enable or disable large page usage
3647 *
3648 * @returns VBox status code.
3649 * @param pVM The cross context VM structure.
3650 * @param fUseLargePages Use/not use large pages
3651 */
3652VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
3653{
3654 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3655
3656 pVM->fUseLargePages = fUseLargePages;
3657 return VINF_SUCCESS;
3658}
3659
3660
3661/**
3662 * Acquire the PGM lock.
3663 *
3664 * @returns VBox status code
3665 * @param pVM The cross context VM structure.
3666 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3667 */
3668#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
3669int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
3670#else
3671int pgmLock(PVM pVM)
3672#endif
3673{
3674#if defined(VBOX_STRICT) && defined(IN_RING3)
3675 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3676#else
3677 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
3678#endif
3679#if defined(IN_RC) || defined(IN_RING0)
3680 if (rc == VERR_SEM_BUSY)
3681 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
3682#endif
3683 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
3684 return rc;
3685}
3686
3687
3688/**
3689 * Release the PGM lock.
3690 *
3691 * @returns VBox status code
3692 * @param pVM The cross context VM structure.
3693 */
3694void pgmUnlock(PVM pVM)
3695{
3696 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3697 pVM->pgm.s.cDeprecatedPageLocks = 0;
3698 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
3699 if (rc == VINF_SEM_NESTED)
3700 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3701}
3702
3703#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
3704
3705/**
3706 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
3707 *
3708 * @returns VBox status code.
3709 * @param pVM The cross context VM structure.
3710 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3711 * @param GCPhys The guest physical address of the page to map. The
3712 * offset bits are not ignored.
3713 * @param ppv Where to return the address corresponding to @a GCPhys.
3714 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3715 */
3716int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
3717{
3718 pgmLock(pVM);
3719
3720 /*
3721 * Convert it to a writable page and it on to the dynamic mapper.
3722 */
3723 int rc;
3724 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3725 if (RT_LIKELY(pPage))
3726 {
3727 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3728 if (RT_SUCCESS(rc))
3729 {
3730 void *pv;
3731 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
3732 if (RT_SUCCESS(rc))
3733 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
3734 }
3735 else
3736 AssertRC(rc);
3737 }
3738 else
3739 {
3740 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
3741 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
3742 }
3743
3744 pgmUnlock(pVM);
3745 return rc;
3746}
3747
3748#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
3749#if !defined(IN_R0) || defined(LOG_ENABLED)
3750
3751/** Format handler for PGMPAGE.
3752 * @copydoc FNRTSTRFORMATTYPE */
3753static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3754 const char *pszType, void const *pvValue,
3755 int cchWidth, int cchPrecision, unsigned fFlags,
3756 void *pvUser)
3757{
3758 size_t cch;
3759 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3760 if (RT_VALID_PTR(pPage))
3761 {
3762 char szTmp[64+80];
3763
3764 cch = 0;
3765
3766 /* The single char state stuff. */
3767 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3768 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3769
3770#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3771 if (IS_PART_INCLUDED(5))
3772 {
3773 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3774 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3775 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
3776 }
3777
3778 /* The type. */
3779 if (IS_PART_INCLUDED(4))
3780 {
3781 szTmp[cch++] = ':';
3782 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3783 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3784 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3785 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3786 }
3787
3788 /* The numbers. */
3789 if (IS_PART_INCLUDED(3))
3790 {
3791 szTmp[cch++] = ':';
3792 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3793 }
3794
3795 if (IS_PART_INCLUDED(2))
3796 {
3797 szTmp[cch++] = ':';
3798 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3799 }
3800
3801 if (IS_PART_INCLUDED(6))
3802 {
3803 szTmp[cch++] = ':';
3804 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3805 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3806 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3807 }
3808#undef IS_PART_INCLUDED
3809
3810 cch = pfnOutput(pvArgOutput, szTmp, cch);
3811 }
3812 else
3813 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3814 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3815 return cch;
3816}
3817
3818
3819/** Format handler for PGMRAMRANGE.
3820 * @copydoc FNRTSTRFORMATTYPE */
3821static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3822 const char *pszType, void const *pvValue,
3823 int cchWidth, int cchPrecision, unsigned fFlags,
3824 void *pvUser)
3825{
3826 size_t cch;
3827 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3828 if (VALID_PTR(pRam))
3829 {
3830 char szTmp[80];
3831 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3832 cch = pfnOutput(pvArgOutput, szTmp, cch);
3833 }
3834 else
3835 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3836 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3837 return cch;
3838}
3839
3840/** Format type andlers to be registered/deregistered. */
3841static const struct
3842{
3843 char szType[24];
3844 PFNRTSTRFORMATTYPE pfnHandler;
3845} g_aPgmFormatTypes[] =
3846{
3847 { "pgmpage", pgmFormatTypeHandlerPage },
3848 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3849};
3850
3851#endif /* !IN_R0 || LOG_ENABLED */
3852
3853/**
3854 * Registers the global string format types.
3855 *
3856 * This should be called at module load time or in some other manner that ensure
3857 * that it's called exactly one time.
3858 *
3859 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3860 */
3861VMMDECL(int) PGMRegisterStringFormatTypes(void)
3862{
3863#if !defined(IN_R0) || defined(LOG_ENABLED)
3864 int rc = VINF_SUCCESS;
3865 unsigned i;
3866 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3867 {
3868 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3869# ifdef IN_RING0
3870 if (rc == VERR_ALREADY_EXISTS)
3871 {
3872 /* in case of cleanup failure in ring-0 */
3873 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3874 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3875 }
3876# endif
3877 }
3878 if (RT_FAILURE(rc))
3879 while (i-- > 0)
3880 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3881
3882 return rc;
3883#else
3884 return VINF_SUCCESS;
3885#endif
3886}
3887
3888
3889/**
3890 * Deregisters the global string format types.
3891 *
3892 * This should be called at module unload time or in some other manner that
3893 * ensure that it's called exactly one time.
3894 */
3895VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3896{
3897#if !defined(IN_R0) || defined(LOG_ENABLED)
3898 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3899 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3900#endif
3901}
3902
3903#ifdef VBOX_STRICT
3904
3905/**
3906 * Asserts that there are no mapping conflicts.
3907 *
3908 * @returns Number of conflicts.
3909 * @param pVM The cross context VM structure.
3910 */
3911VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
3912{
3913 unsigned cErrors = 0;
3914
3915 /* Only applies to raw mode -> 1 VPCU */
3916 Assert(pVM->cCpus == 1);
3917 PVMCPU pVCpu = &pVM->aCpus[0];
3918
3919 /*
3920 * Check for mapping conflicts.
3921 */
3922 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3923 pMapping;
3924 pMapping = pMapping->CTX_SUFF(pNext))
3925 {
3926 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
3927 for (RTGCPTR GCPtr = pMapping->GCPtr;
3928 GCPtr <= pMapping->GCPtrLast;
3929 GCPtr += PAGE_SIZE)
3930 {
3931 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
3932 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
3933 {
3934 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
3935 cErrors++;
3936 break;
3937 }
3938 }
3939 }
3940
3941 return cErrors;
3942}
3943
3944
3945/**
3946 * Asserts that everything related to the guest CR3 is correctly shadowed.
3947 *
3948 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3949 * and assert the correctness of the guest CR3 mapping before asserting that the
3950 * shadow page tables is in sync with the guest page tables.
3951 *
3952 * @returns Number of conflicts.
3953 * @param pVM The cross context VM structure.
3954 * @param pVCpu The cross context virtual CPU structure.
3955 * @param cr3 The current guest CR3 register value.
3956 * @param cr4 The current guest CR4 register value.
3957 */
3958VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
3959{
3960 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3961
3962 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3963 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3964 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3965
3966 pgmLock(pVM);
3967 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3968 pgmUnlock(pVM);
3969
3970 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3971 return cErrors;
3972}
3973
3974#endif /* VBOX_STRICT */
3975
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette