VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 92561

Last change on this file since 92561 was 92547, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 When CR3 is updated (PGMFlushTLB or PGMUpdateCR3) we need to translate it to a guest-physical address if it's a nested-guest physical address when EPT paging is enabled.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 136.0 KB
Line 
1/* $Id: PGMAll.cpp 92547 2021-11-22 12:14:41Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
53static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
54 PPGMPTWALKGST pGstWalk);
55static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
56#endif
57static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
58static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
59
60
61#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
62/* Guest - EPT SLAT is identical for all guest paging mode. */
63# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
64# define PGM_GST_TYPE PGM_TYPE_EPT
65# include "PGMGstDefs.h"
66# include "PGMAllGstSlatEpt.cpp.h"
67# undef PGM_GST_TYPE
68#endif
69
70
71/*
72 * Shadow - 32-bit mode
73 */
74#define PGM_SHW_TYPE PGM_TYPE_32BIT
75#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
76#include "PGMAllShw.h"
77
78/* Guest - real mode */
79#define PGM_GST_TYPE PGM_TYPE_REAL
80#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
84#include "PGMGstDefs.h"
85#include "PGMAllGst.h"
86#include "PGMAllBth.h"
87#undef BTH_PGMPOOLKIND_PT_FOR_PT
88#undef BTH_PGMPOOLKIND_ROOT
89#undef PGM_BTH_NAME
90#undef PGM_GST_TYPE
91#undef PGM_GST_NAME
92
93/* Guest - protected mode */
94#define PGM_GST_TYPE PGM_TYPE_PROT
95#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
96#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
97#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
98#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
99#include "PGMGstDefs.h"
100#include "PGMAllGst.h"
101#include "PGMAllBth.h"
102#undef BTH_PGMPOOLKIND_PT_FOR_PT
103#undef BTH_PGMPOOLKIND_ROOT
104#undef PGM_BTH_NAME
105#undef PGM_GST_TYPE
106#undef PGM_GST_NAME
107
108/* Guest - 32-bit mode */
109#define PGM_GST_TYPE PGM_TYPE_32BIT
110#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
111#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
112#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
113#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
114#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
115#include "PGMGstDefs.h"
116#include "PGMAllGst.h"
117#include "PGMAllBth.h"
118#undef BTH_PGMPOOLKIND_PT_FOR_BIG
119#undef BTH_PGMPOOLKIND_PT_FOR_PT
120#undef BTH_PGMPOOLKIND_ROOT
121#undef PGM_BTH_NAME
122#undef PGM_GST_TYPE
123#undef PGM_GST_NAME
124
125#undef PGM_SHW_TYPE
126#undef PGM_SHW_NAME
127
128
129/*
130 * Shadow - PAE mode
131 */
132#define PGM_SHW_TYPE PGM_TYPE_PAE
133#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
134#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
135#include "PGMAllShw.h"
136
137/* Guest - real mode */
138#define PGM_GST_TYPE PGM_TYPE_REAL
139#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
142#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
143#include "PGMGstDefs.h"
144#include "PGMAllBth.h"
145#undef BTH_PGMPOOLKIND_PT_FOR_PT
146#undef BTH_PGMPOOLKIND_ROOT
147#undef PGM_BTH_NAME
148#undef PGM_GST_TYPE
149#undef PGM_GST_NAME
150
151/* Guest - protected mode */
152#define PGM_GST_TYPE PGM_TYPE_PROT
153#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
154#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
155#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
156#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
157#include "PGMGstDefs.h"
158#include "PGMAllBth.h"
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef BTH_PGMPOOLKIND_ROOT
161#undef PGM_BTH_NAME
162#undef PGM_GST_TYPE
163#undef PGM_GST_NAME
164
165/* Guest - 32-bit mode */
166#define PGM_GST_TYPE PGM_TYPE_32BIT
167#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
168#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
169#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
170#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
171#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
172#include "PGMGstDefs.h"
173#include "PGMAllBth.h"
174#undef BTH_PGMPOOLKIND_PT_FOR_BIG
175#undef BTH_PGMPOOLKIND_PT_FOR_PT
176#undef BTH_PGMPOOLKIND_ROOT
177#undef PGM_BTH_NAME
178#undef PGM_GST_TYPE
179#undef PGM_GST_NAME
180
181
182/* Guest - PAE mode */
183#define PGM_GST_TYPE PGM_TYPE_PAE
184#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
185#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
186#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
187#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
188#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
189#include "PGMGstDefs.h"
190#include "PGMAllGst.h"
191#include "PGMAllBth.h"
192#undef BTH_PGMPOOLKIND_PT_FOR_BIG
193#undef BTH_PGMPOOLKIND_PT_FOR_PT
194#undef BTH_PGMPOOLKIND_ROOT
195#undef PGM_BTH_NAME
196#undef PGM_GST_TYPE
197#undef PGM_GST_NAME
198
199#undef PGM_SHW_TYPE
200#undef PGM_SHW_NAME
201
202
203/*
204 * Shadow - AMD64 mode
205 */
206#define PGM_SHW_TYPE PGM_TYPE_AMD64
207#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
208#include "PGMAllShw.h"
209
210/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
211/** @todo retire this hack. */
212#define PGM_GST_TYPE PGM_TYPE_PROT
213#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
214#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
215#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
216#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
217#include "PGMGstDefs.h"
218#include "PGMAllBth.h"
219#undef BTH_PGMPOOLKIND_PT_FOR_PT
220#undef BTH_PGMPOOLKIND_ROOT
221#undef PGM_BTH_NAME
222#undef PGM_GST_TYPE
223#undef PGM_GST_NAME
224
225#ifdef VBOX_WITH_64_BITS_GUESTS
226/* Guest - AMD64 mode */
227# define PGM_GST_TYPE PGM_TYPE_AMD64
228# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
229# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
230# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
231# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
232# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
233# include "PGMGstDefs.h"
234# include "PGMAllGst.h"
235# include "PGMAllBth.h"
236# undef BTH_PGMPOOLKIND_PT_FOR_BIG
237# undef BTH_PGMPOOLKIND_PT_FOR_PT
238# undef BTH_PGMPOOLKIND_ROOT
239# undef PGM_BTH_NAME
240# undef PGM_GST_TYPE
241# undef PGM_GST_NAME
242#endif /* VBOX_WITH_64_BITS_GUESTS */
243
244#undef PGM_SHW_TYPE
245#undef PGM_SHW_NAME
246
247
248/*
249 * Shadow - 32-bit nested paging mode.
250 */
251#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
252#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
253#include "PGMAllShw.h"
254
255/* Guest - real mode */
256#define PGM_GST_TYPE PGM_TYPE_REAL
257#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
258#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
259#include "PGMGstDefs.h"
260#include "PGMAllBth.h"
261#undef PGM_BTH_NAME
262#undef PGM_GST_TYPE
263#undef PGM_GST_NAME
264
265/* Guest - protected mode */
266#define PGM_GST_TYPE PGM_TYPE_PROT
267#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
268#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
269#include "PGMGstDefs.h"
270#include "PGMAllBth.h"
271#undef PGM_BTH_NAME
272#undef PGM_GST_TYPE
273#undef PGM_GST_NAME
274
275/* Guest - 32-bit mode */
276#define PGM_GST_TYPE PGM_TYPE_32BIT
277#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
278#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
279#include "PGMGstDefs.h"
280#include "PGMAllBth.h"
281#undef PGM_BTH_NAME
282#undef PGM_GST_TYPE
283#undef PGM_GST_NAME
284
285/* Guest - PAE mode */
286#define PGM_GST_TYPE PGM_TYPE_PAE
287#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
288#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
289#include "PGMGstDefs.h"
290#include "PGMAllBth.h"
291#undef PGM_BTH_NAME
292#undef PGM_GST_TYPE
293#undef PGM_GST_NAME
294
295#ifdef VBOX_WITH_64_BITS_GUESTS
296/* Guest - AMD64 mode */
297# define PGM_GST_TYPE PGM_TYPE_AMD64
298# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
299# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
300# include "PGMGstDefs.h"
301# include "PGMAllBth.h"
302# undef PGM_BTH_NAME
303# undef PGM_GST_TYPE
304# undef PGM_GST_NAME
305#endif /* VBOX_WITH_64_BITS_GUESTS */
306
307#undef PGM_SHW_TYPE
308#undef PGM_SHW_NAME
309
310
311/*
312 * Shadow - PAE nested paging mode.
313 */
314#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
315#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
316#include "PGMAllShw.h"
317
318/* Guest - real mode */
319#define PGM_GST_TYPE PGM_TYPE_REAL
320#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
321#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
322#include "PGMGstDefs.h"
323#include "PGMAllBth.h"
324#undef PGM_BTH_NAME
325#undef PGM_GST_TYPE
326#undef PGM_GST_NAME
327
328/* Guest - protected mode */
329#define PGM_GST_TYPE PGM_TYPE_PROT
330#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
331#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
332#include "PGMGstDefs.h"
333#include "PGMAllBth.h"
334#undef PGM_BTH_NAME
335#undef PGM_GST_TYPE
336#undef PGM_GST_NAME
337
338/* Guest - 32-bit mode */
339#define PGM_GST_TYPE PGM_TYPE_32BIT
340#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
341#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
342#include "PGMGstDefs.h"
343#include "PGMAllBth.h"
344#undef PGM_BTH_NAME
345#undef PGM_GST_TYPE
346#undef PGM_GST_NAME
347
348/* Guest - PAE mode */
349#define PGM_GST_TYPE PGM_TYPE_PAE
350#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
351#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
352#include "PGMGstDefs.h"
353#include "PGMAllBth.h"
354#undef PGM_BTH_NAME
355#undef PGM_GST_TYPE
356#undef PGM_GST_NAME
357
358#ifdef VBOX_WITH_64_BITS_GUESTS
359/* Guest - AMD64 mode */
360# define PGM_GST_TYPE PGM_TYPE_AMD64
361# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
363# include "PGMGstDefs.h"
364# include "PGMAllBth.h"
365# undef PGM_BTH_NAME
366# undef PGM_GST_TYPE
367# undef PGM_GST_NAME
368#endif /* VBOX_WITH_64_BITS_GUESTS */
369
370#undef PGM_SHW_TYPE
371#undef PGM_SHW_NAME
372
373
374/*
375 * Shadow - AMD64 nested paging mode.
376 */
377#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
378#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
379#include "PGMAllShw.h"
380
381/* Guest - real mode */
382#define PGM_GST_TYPE PGM_TYPE_REAL
383#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
384#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
385#include "PGMGstDefs.h"
386#include "PGMAllBth.h"
387#undef PGM_BTH_NAME
388#undef PGM_GST_TYPE
389#undef PGM_GST_NAME
390
391/* Guest - protected mode */
392#define PGM_GST_TYPE PGM_TYPE_PROT
393#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
394#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
395#include "PGMGstDefs.h"
396#include "PGMAllBth.h"
397#undef PGM_BTH_NAME
398#undef PGM_GST_TYPE
399#undef PGM_GST_NAME
400
401/* Guest - 32-bit mode */
402#define PGM_GST_TYPE PGM_TYPE_32BIT
403#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
404#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
405#include "PGMGstDefs.h"
406#include "PGMAllBth.h"
407#undef PGM_BTH_NAME
408#undef PGM_GST_TYPE
409#undef PGM_GST_NAME
410
411/* Guest - PAE mode */
412#define PGM_GST_TYPE PGM_TYPE_PAE
413#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
414#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
415#include "PGMGstDefs.h"
416#include "PGMAllBth.h"
417#undef PGM_BTH_NAME
418#undef PGM_GST_TYPE
419#undef PGM_GST_NAME
420
421#ifdef VBOX_WITH_64_BITS_GUESTS
422/* Guest - AMD64 mode */
423# define PGM_GST_TYPE PGM_TYPE_AMD64
424# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
425# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
426# include "PGMGstDefs.h"
427# include "PGMAllBth.h"
428# undef PGM_BTH_NAME
429# undef PGM_GST_TYPE
430# undef PGM_GST_NAME
431#endif /* VBOX_WITH_64_BITS_GUESTS */
432
433#undef PGM_SHW_TYPE
434#undef PGM_SHW_NAME
435
436
437/*
438 * Shadow - EPT.
439 */
440#define PGM_SHW_TYPE PGM_TYPE_EPT
441#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
442#include "PGMAllShw.h"
443
444/* Guest - real mode */
445#define PGM_GST_TYPE PGM_TYPE_REAL
446#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
447#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
448#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
449#include "PGMGstDefs.h"
450#include "PGMAllBth.h"
451#undef BTH_PGMPOOLKIND_PT_FOR_PT
452#undef PGM_BTH_NAME
453#undef PGM_GST_TYPE
454#undef PGM_GST_NAME
455
456/* Guest - protected mode */
457#define PGM_GST_TYPE PGM_TYPE_PROT
458#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
459#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
460#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
461#include "PGMGstDefs.h"
462#include "PGMAllBth.h"
463#undef BTH_PGMPOOLKIND_PT_FOR_PT
464#undef PGM_BTH_NAME
465#undef PGM_GST_TYPE
466#undef PGM_GST_NAME
467
468/* Guest - 32-bit mode */
469#define PGM_GST_TYPE PGM_TYPE_32BIT
470#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
471#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
472#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
473#include "PGMGstDefs.h"
474#include "PGMAllBth.h"
475#undef BTH_PGMPOOLKIND_PT_FOR_PT
476#undef PGM_BTH_NAME
477#undef PGM_GST_TYPE
478#undef PGM_GST_NAME
479
480/* Guest - PAE mode */
481#define PGM_GST_TYPE PGM_TYPE_PAE
482#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
483#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
484#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
485#include "PGMGstDefs.h"
486#include "PGMAllBth.h"
487#undef BTH_PGMPOOLKIND_PT_FOR_PT
488#undef PGM_BTH_NAME
489#undef PGM_GST_TYPE
490#undef PGM_GST_NAME
491
492#ifdef VBOX_WITH_64_BITS_GUESTS
493/* Guest - AMD64 mode */
494# define PGM_GST_TYPE PGM_TYPE_AMD64
495# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
496# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
497# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
498# include "PGMGstDefs.h"
499# include "PGMAllBth.h"
500# undef BTH_PGMPOOLKIND_PT_FOR_PT
501# undef PGM_BTH_NAME
502# undef PGM_GST_TYPE
503# undef PGM_GST_NAME
504#endif /* VBOX_WITH_64_BITS_GUESTS */
505
506#undef PGM_SHW_TYPE
507#undef PGM_SHW_NAME
508
509
510/*
511 * Shadow - NEM / None.
512 */
513#define PGM_SHW_TYPE PGM_TYPE_NONE
514#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
515#include "PGMAllShw.h"
516
517/* Guest - real mode */
518#define PGM_GST_TYPE PGM_TYPE_REAL
519#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
520#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
521#include "PGMGstDefs.h"
522#include "PGMAllBth.h"
523#undef PGM_BTH_NAME
524#undef PGM_GST_TYPE
525#undef PGM_GST_NAME
526
527/* Guest - protected mode */
528#define PGM_GST_TYPE PGM_TYPE_PROT
529#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
530#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
531#include "PGMGstDefs.h"
532#include "PGMAllBth.h"
533#undef PGM_BTH_NAME
534#undef PGM_GST_TYPE
535#undef PGM_GST_NAME
536
537/* Guest - 32-bit mode */
538#define PGM_GST_TYPE PGM_TYPE_32BIT
539#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
540#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
541#include "PGMGstDefs.h"
542#include "PGMAllBth.h"
543#undef PGM_BTH_NAME
544#undef PGM_GST_TYPE
545#undef PGM_GST_NAME
546
547/* Guest - PAE mode */
548#define PGM_GST_TYPE PGM_TYPE_PAE
549#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
550#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
551#include "PGMGstDefs.h"
552#include "PGMAllBth.h"
553#undef PGM_BTH_NAME
554#undef PGM_GST_TYPE
555#undef PGM_GST_NAME
556
557#ifdef VBOX_WITH_64_BITS_GUESTS
558/* Guest - AMD64 mode */
559# define PGM_GST_TYPE PGM_TYPE_AMD64
560# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
561# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
562# include "PGMGstDefs.h"
563# include "PGMAllBth.h"
564# undef PGM_BTH_NAME
565# undef PGM_GST_TYPE
566# undef PGM_GST_NAME
567#endif /* VBOX_WITH_64_BITS_GUESTS */
568
569#undef PGM_SHW_TYPE
570#undef PGM_SHW_NAME
571
572
573
574/**
575 * Guest mode data array.
576 */
577PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
578{
579 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
580 {
581 PGM_TYPE_REAL,
582 PGM_GST_NAME_REAL(GetPage),
583 PGM_GST_NAME_REAL(ModifyPage),
584 PGM_GST_NAME_REAL(Enter),
585 PGM_GST_NAME_REAL(Exit),
586#ifdef IN_RING3
587 PGM_GST_NAME_REAL(Relocate),
588#endif
589 },
590 {
591 PGM_TYPE_PROT,
592 PGM_GST_NAME_PROT(GetPage),
593 PGM_GST_NAME_PROT(ModifyPage),
594 PGM_GST_NAME_PROT(Enter),
595 PGM_GST_NAME_PROT(Exit),
596#ifdef IN_RING3
597 PGM_GST_NAME_PROT(Relocate),
598#endif
599 },
600 {
601 PGM_TYPE_32BIT,
602 PGM_GST_NAME_32BIT(GetPage),
603 PGM_GST_NAME_32BIT(ModifyPage),
604 PGM_GST_NAME_32BIT(Enter),
605 PGM_GST_NAME_32BIT(Exit),
606#ifdef IN_RING3
607 PGM_GST_NAME_32BIT(Relocate),
608#endif
609 },
610 {
611 PGM_TYPE_PAE,
612 PGM_GST_NAME_PAE(GetPage),
613 PGM_GST_NAME_PAE(ModifyPage),
614 PGM_GST_NAME_PAE(Enter),
615 PGM_GST_NAME_PAE(Exit),
616#ifdef IN_RING3
617 PGM_GST_NAME_PAE(Relocate),
618#endif
619 },
620#ifdef VBOX_WITH_64_BITS_GUESTS
621 {
622 PGM_TYPE_AMD64,
623 PGM_GST_NAME_AMD64(GetPage),
624 PGM_GST_NAME_AMD64(ModifyPage),
625 PGM_GST_NAME_AMD64(Enter),
626 PGM_GST_NAME_AMD64(Exit),
627# ifdef IN_RING3
628 PGM_GST_NAME_AMD64(Relocate),
629# endif
630 },
631#endif
632};
633
634
635/**
636 * The shadow mode data array.
637 */
638PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
639{
640 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
641 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
642 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
643 {
644 PGM_TYPE_32BIT,
645 PGM_SHW_NAME_32BIT(GetPage),
646 PGM_SHW_NAME_32BIT(ModifyPage),
647 PGM_SHW_NAME_32BIT(Enter),
648 PGM_SHW_NAME_32BIT(Exit),
649#ifdef IN_RING3
650 PGM_SHW_NAME_32BIT(Relocate),
651#endif
652 },
653 {
654 PGM_TYPE_PAE,
655 PGM_SHW_NAME_PAE(GetPage),
656 PGM_SHW_NAME_PAE(ModifyPage),
657 PGM_SHW_NAME_PAE(Enter),
658 PGM_SHW_NAME_PAE(Exit),
659#ifdef IN_RING3
660 PGM_SHW_NAME_PAE(Relocate),
661#endif
662 },
663 {
664 PGM_TYPE_AMD64,
665 PGM_SHW_NAME_AMD64(GetPage),
666 PGM_SHW_NAME_AMD64(ModifyPage),
667 PGM_SHW_NAME_AMD64(Enter),
668 PGM_SHW_NAME_AMD64(Exit),
669#ifdef IN_RING3
670 PGM_SHW_NAME_AMD64(Relocate),
671#endif
672 },
673 {
674 PGM_TYPE_NESTED_32BIT,
675 PGM_SHW_NAME_NESTED_32BIT(GetPage),
676 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
677 PGM_SHW_NAME_NESTED_32BIT(Enter),
678 PGM_SHW_NAME_NESTED_32BIT(Exit),
679#ifdef IN_RING3
680 PGM_SHW_NAME_NESTED_32BIT(Relocate),
681#endif
682 },
683 {
684 PGM_TYPE_NESTED_PAE,
685 PGM_SHW_NAME_NESTED_PAE(GetPage),
686 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
687 PGM_SHW_NAME_NESTED_PAE(Enter),
688 PGM_SHW_NAME_NESTED_PAE(Exit),
689#ifdef IN_RING3
690 PGM_SHW_NAME_NESTED_PAE(Relocate),
691#endif
692 },
693 {
694 PGM_TYPE_NESTED_AMD64,
695 PGM_SHW_NAME_NESTED_AMD64(GetPage),
696 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
697 PGM_SHW_NAME_NESTED_AMD64(Enter),
698 PGM_SHW_NAME_NESTED_AMD64(Exit),
699#ifdef IN_RING3
700 PGM_SHW_NAME_NESTED_AMD64(Relocate),
701#endif
702 },
703 {
704 PGM_TYPE_EPT,
705 PGM_SHW_NAME_EPT(GetPage),
706 PGM_SHW_NAME_EPT(ModifyPage),
707 PGM_SHW_NAME_EPT(Enter),
708 PGM_SHW_NAME_EPT(Exit),
709#ifdef IN_RING3
710 PGM_SHW_NAME_EPT(Relocate),
711#endif
712 },
713 {
714 PGM_TYPE_NONE,
715 PGM_SHW_NAME_NONE(GetPage),
716 PGM_SHW_NAME_NONE(ModifyPage),
717 PGM_SHW_NAME_NONE(Enter),
718 PGM_SHW_NAME_NONE(Exit),
719#ifdef IN_RING3
720 PGM_SHW_NAME_NONE(Relocate),
721#endif
722 },
723};
724
725
726/**
727 * The guest+shadow mode data array.
728 */
729PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
730{
731#if !defined(IN_RING3) && !defined(VBOX_STRICT)
732# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
733# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
734 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
735
736#elif !defined(IN_RING3) && defined(VBOX_STRICT)
737# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
738# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
739 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
740
741#elif defined(IN_RING3) && !defined(VBOX_STRICT)
742# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
743# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
744 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
745
746#elif defined(IN_RING3) && defined(VBOX_STRICT)
747# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
748# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
749 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
750
751#else
752# error "Misconfig."
753#endif
754
755 /* 32-bit shadow paging mode: */
756 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
757 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
758 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
760 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
761 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
767
768 /* PAE shadow paging mode: */
769 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
770 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
771 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
772 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
773 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
774 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
775 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
780
781 /* AMD64 shadow paging mode: */
782 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
783 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
784 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
785 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
786 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
787#ifdef VBOX_WITH_64_BITS_GUESTS
788 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
789#else
790 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
791#endif
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
797
798 /* 32-bit nested paging mode: */
799 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
800 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
801 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
802 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
804#ifdef VBOX_WITH_64_BITS_GUESTS
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
806#else
807 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
808#endif
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
814
815 /* PAE nested paging mode: */
816 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
817 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
821#ifdef VBOX_WITH_64_BITS_GUESTS
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
823#else
824 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
825#endif
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
831
832 /* AMD64 nested paging mode: */
833 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
834 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
838#ifdef VBOX_WITH_64_BITS_GUESTS
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
840#else
841 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
842#endif
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
848
849 /* EPT nested paging mode: */
850 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
851 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
855#ifdef VBOX_WITH_64_BITS_GUESTS
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
857#else
858 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
859#endif
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
865
866 /* NONE / NEM: */
867 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
868 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
872#ifdef VBOX_WITH_64_BITS_GUESTS
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
874#else
875 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
876#endif
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
882
883
884#undef PGMMODEDATABTH_ENTRY
885#undef PGMMODEDATABTH_NULL_ENTRY
886};
887
888
889#ifdef IN_RING0
890/**
891 * #PF Handler.
892 *
893 * @returns VBox status code (appropriate for trap handling and GC return).
894 * @param pVCpu The cross context virtual CPU structure.
895 * @param uErr The trap error code.
896 * @param pRegFrame Trap register frame.
897 * @param pvFault The fault address.
898 */
899VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
900{
901 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
902
903 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
904 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
905 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
906
907
908# ifdef VBOX_WITH_STATISTICS
909 /*
910 * Error code stats.
911 */
912 if (uErr & X86_TRAP_PF_US)
913 {
914 if (!(uErr & X86_TRAP_PF_P))
915 {
916 if (uErr & X86_TRAP_PF_RW)
917 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
918 else
919 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
920 }
921 else if (uErr & X86_TRAP_PF_RW)
922 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
923 else if (uErr & X86_TRAP_PF_RSVD)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
925 else if (uErr & X86_TRAP_PF_ID)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
927 else
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
929 }
930 else
931 { /* Supervisor */
932 if (!(uErr & X86_TRAP_PF_P))
933 {
934 if (uErr & X86_TRAP_PF_RW)
935 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
936 else
937 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
938 }
939 else if (uErr & X86_TRAP_PF_RW)
940 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
941 else if (uErr & X86_TRAP_PF_ID)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
943 else if (uErr & X86_TRAP_PF_RSVD)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
945 }
946# endif /* VBOX_WITH_STATISTICS */
947
948 /*
949 * Call the worker.
950 */
951 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
952 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
953 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
954 bool fLockTaken = false;
955 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
956 if (fLockTaken)
957 {
958 PGM_LOCK_ASSERT_OWNER(pVM);
959 PGM_UNLOCK(pVM);
960 }
961 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
962
963 /*
964 * Return code tweaks.
965 */
966 if (rc != VINF_SUCCESS)
967 {
968 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
969 rc = VINF_SUCCESS;
970
971 /* Note: hack alert for difficult to reproduce problem. */
972 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
973 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
974 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
975 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
976 {
977 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
978 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
979 rc = VINF_SUCCESS;
980 }
981 }
982
983 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
984 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
985 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
986 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
987 return rc;
988}
989#endif /* IN_RING0 */
990
991
992/**
993 * Prefetch a page
994 *
995 * Typically used to sync commonly used pages before entering raw mode
996 * after a CR3 reload.
997 *
998 * @returns VBox status code suitable for scheduling.
999 * @retval VINF_SUCCESS on success.
1000 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1001 * @param pVCpu The cross context virtual CPU structure.
1002 * @param GCPtrPage Page to invalidate.
1003 */
1004VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1005{
1006 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1007
1008 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1009 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1010 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1011 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1012
1013 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1014 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1015 return rc;
1016}
1017
1018
1019/**
1020 * Emulation of the invlpg instruction (HC only actually).
1021 *
1022 * @returns Strict VBox status code, special care required.
1023 * @retval VINF_PGM_SYNC_CR3 - handled.
1024 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1025 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1026 *
1027 * @param pVCpu The cross context virtual CPU structure.
1028 * @param GCPtrPage Page to invalidate.
1029 *
1030 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1031 * safe, but there could be edge cases!
1032 *
1033 * @todo Flush page or page directory only if necessary!
1034 * @todo VBOXSTRICTRC
1035 */
1036VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1037{
1038 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1039 int rc;
1040 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1041
1042 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1043
1044 /*
1045 * Call paging mode specific worker.
1046 */
1047 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1048 PGM_LOCK_VOID(pVM);
1049
1050 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1051 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1052 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1053 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1054
1055 PGM_UNLOCK(pVM);
1056 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1057
1058 /* Ignore all irrelevant error codes. */
1059 if ( rc == VERR_PAGE_NOT_PRESENT
1060 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1061 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1062 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1063 rc = VINF_SUCCESS;
1064
1065 return rc;
1066}
1067
1068
1069/**
1070 * Executes an instruction using the interpreter.
1071 *
1072 * @returns VBox status code (appropriate for trap handling and GC return).
1073 * @param pVM The cross context VM structure.
1074 * @param pVCpu The cross context virtual CPU structure.
1075 * @param pRegFrame Register frame.
1076 * @param pvFault Fault address.
1077 */
1078VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1079{
1080 NOREF(pVM);
1081 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1082 if (rc == VERR_EM_INTERPRETER)
1083 rc = VINF_EM_RAW_EMULATE_INSTR;
1084 if (rc != VINF_SUCCESS)
1085 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1086 return rc;
1087}
1088
1089
1090/**
1091 * Gets effective page information (from the VMM page directory).
1092 *
1093 * @returns VBox status code.
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param GCPtr Guest Context virtual address of the page.
1096 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1097 * @param pHCPhys Where to store the HC physical address of the page.
1098 * This is page aligned.
1099 * @remark You should use PGMMapGetPage() for pages in a mapping.
1100 */
1101VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1102{
1103 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1104 PGM_LOCK_VOID(pVM);
1105
1106 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1107 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1108 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1109 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1110
1111 PGM_UNLOCK(pVM);
1112 return rc;
1113}
1114
1115
1116/**
1117 * Modify page flags for a range of pages in the shadow context.
1118 *
1119 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1120 *
1121 * @returns VBox status code.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param GCPtr Virtual address of the first page in the range.
1124 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1125 * @param fMask The AND mask - page flags X86_PTE_*.
1126 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1127 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1128 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1129 */
1130DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1131{
1132 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1133 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1134
1135 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1136
1137 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1138 PGM_LOCK_VOID(pVM);
1139
1140 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1141 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1142 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1143 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1144
1145 PGM_UNLOCK(pVM);
1146 return rc;
1147}
1148
1149
1150/**
1151 * Changing the page flags for a single page in the shadow page tables so as to
1152 * make it read-only.
1153 *
1154 * @returns VBox status code.
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param GCPtr Virtual address of the first page in the range.
1157 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1158 */
1159VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1160{
1161 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1162}
1163
1164
1165/**
1166 * Changing the page flags for a single page in the shadow page tables so as to
1167 * make it writable.
1168 *
1169 * The call must know with 101% certainty that the guest page tables maps this
1170 * as writable too. This function will deal shared, zero and write monitored
1171 * pages.
1172 *
1173 * @returns VBox status code.
1174 * @param pVCpu The cross context virtual CPU structure.
1175 * @param GCPtr Virtual address of the first page in the range.
1176 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1177 */
1178VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1179{
1180 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1181 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * Changing the page flags for a single page in the shadow page tables so as to
1188 * make it not present.
1189 *
1190 * @returns VBox status code.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 * @param GCPtr Virtual address of the first page in the range.
1193 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1194 */
1195VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1196{
1197 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1198}
1199
1200
1201/**
1202 * Changing the page flags for a single page in the shadow page tables so as to
1203 * make it supervisor and writable.
1204 *
1205 * This if for dealing with CR0.WP=0 and readonly user pages.
1206 *
1207 * @returns VBox status code.
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param GCPtr Virtual address of the first page in the range.
1210 * @param fBigPage Whether or not this is a big page. If it is, we have to
1211 * change the shadow PDE as well. If it isn't, the caller
1212 * has checked that the shadow PDE doesn't need changing.
1213 * We ASSUME 4KB pages backing the big page here!
1214 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1215 */
1216int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1217{
1218 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1219 if (rc == VINF_SUCCESS && fBigPage)
1220 {
1221 /* this is a bit ugly... */
1222 switch (pVCpu->pgm.s.enmShadowMode)
1223 {
1224 case PGMMODE_32_BIT:
1225 {
1226 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1227 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1228 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1229 pPde->u |= X86_PDE_RW;
1230 Log(("-> PDE=%#llx (32)\n", pPde->u));
1231 break;
1232 }
1233 case PGMMODE_PAE:
1234 case PGMMODE_PAE_NX:
1235 {
1236 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1237 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1238 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1239 pPde->u |= X86_PDE_RW;
1240 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1241 break;
1242 }
1243 default:
1244 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1245 }
1246 }
1247 return rc;
1248}
1249
1250
1251/**
1252 * Gets the shadow page directory for the specified address, PAE.
1253 *
1254 * @returns Pointer to the shadow PD.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr The address.
1257 * @param uGstPdpe Guest PDPT entry. Valid.
1258 * @param ppPD Receives address of page directory
1259 */
1260int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1261{
1262 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1263 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1264 PPGMPOOLPAGE pShwPage;
1265 int rc;
1266 PGM_LOCK_ASSERT_OWNER(pVM);
1267
1268
1269 /* Allocate page directory if not present. */
1270 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1271 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1272 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1273 X86PGPAEUINT const uPdpe = pPdpe->u;
1274 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1275 {
1276 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1277 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1278 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1279
1280 pgmPoolCacheUsed(pPool, pShwPage);
1281
1282 /* Update the entry if necessary. */
1283 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1284 if (uPdpeNew == uPdpe)
1285 { /* likely */ }
1286 else
1287 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1288 }
1289 else
1290 {
1291 RTGCPTR64 GCPdPt;
1292 PGMPOOLKIND enmKind;
1293 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1294 {
1295 /* AMD-V nested paging or real/protected mode without paging. */
1296 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1297 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1298 }
1299 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1300 {
1301 if (uGstPdpe & X86_PDPE_P)
1302 {
1303 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1304 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1305 }
1306 else
1307 {
1308 /* PD not present; guest must reload CR3 to change it.
1309 * No need to monitor anything in this case. */
1310 /** @todo r=bird: WTF is hit?!? */
1311 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1312 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1313 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1314 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1315 }
1316 }
1317 else
1318 {
1319 GCPdPt = CPUMGetGuestCR3(pVCpu);
1320 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1321 }
1322
1323 /* Create a reference back to the PDPT by using the index in its shadow page. */
1324 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1325 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1326 &pShwPage);
1327 AssertRCReturn(rc, rc);
1328
1329 /* Hook it up. */
1330 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1331 }
1332 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1333
1334 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Gets the pointer to the shadow page directory entry for an address, PAE.
1341 *
1342 * @returns Pointer to the PDE.
1343 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1344 * @param GCPtr The address.
1345 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1346 */
1347DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1348{
1349 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1350 PGM_LOCK_ASSERT_OWNER(pVM);
1351
1352 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1353 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1354 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1355 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1356 if (!(uPdpe & X86_PDPE_P))
1357 {
1358 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1359 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1360 }
1361 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1362
1363 /* Fetch the pgm pool shadow descriptor. */
1364 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1365 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1366
1367 *ppShwPde = pShwPde;
1368 return VINF_SUCCESS;
1369}
1370
1371
1372/**
1373 * Syncs the SHADOW page directory pointer for the specified address.
1374 *
1375 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1376 *
1377 * The caller is responsible for making sure the guest has a valid PD before
1378 * calling this function.
1379 *
1380 * @returns VBox status code.
1381 * @param pVCpu The cross context virtual CPU structure.
1382 * @param GCPtr The address.
1383 * @param uGstPml4e Guest PML4 entry (valid).
1384 * @param uGstPdpe Guest PDPT entry (valid).
1385 * @param ppPD Receives address of page directory
1386 */
1387static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1388{
1389 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1390 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1391 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1392 int rc;
1393
1394 PGM_LOCK_ASSERT_OWNER(pVM);
1395
1396 /*
1397 * PML4.
1398 */
1399 PPGMPOOLPAGE pShwPage;
1400 {
1401 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1402 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1403 X86PGPAEUINT const uPml4e = pPml4e->u;
1404
1405 /* Allocate page directory pointer table if not present. */
1406 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1407 {
1408 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1409 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1410
1411 pgmPoolCacheUsed(pPool, pShwPage);
1412
1413 /* Update the entry if needed. */
1414 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1415 | (uPml4e & PGM_PML4_FLAGS);
1416 if (uPml4e == uPml4eNew)
1417 { /* likely */ }
1418 else
1419 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1420 }
1421 else
1422 {
1423 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1424
1425 RTGCPTR64 GCPml4;
1426 PGMPOOLKIND enmKind;
1427 if (fNestedPagingOrNoGstPaging)
1428 {
1429 /* AMD-V nested paging or real/protected mode without paging */
1430 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1431 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1432 }
1433 else
1434 {
1435 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1436 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1437 }
1438
1439 /* Create a reference back to the PDPT by using the index in its shadow page. */
1440 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1441 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1442 &pShwPage);
1443 AssertRCReturn(rc, rc);
1444
1445 /* Hook it up. */
1446 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1447 | (uPml4e & PGM_PML4_FLAGS));
1448 }
1449 }
1450
1451 /*
1452 * PDPT.
1453 */
1454 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1455 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1456 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1457 X86PGPAEUINT const uPdpe = pPdpe->u;
1458
1459 /* Allocate page directory if not present. */
1460 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1461 {
1462 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1463 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1464
1465 pgmPoolCacheUsed(pPool, pShwPage);
1466
1467 /* Update the entry if needed. */
1468 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1469 | (uPdpe & PGM_PDPT_FLAGS);
1470 if (uPdpe == uPdpeNew)
1471 { /* likely */ }
1472 else
1473 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1474 }
1475 else
1476 {
1477 RTGCPTR64 GCPdPt;
1478 PGMPOOLKIND enmKind;
1479 if (fNestedPagingOrNoGstPaging)
1480 {
1481 /* AMD-V nested paging or real/protected mode without paging */
1482 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1483 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1484 }
1485 else
1486 {
1487 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1488 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1489 }
1490
1491 /* Create a reference back to the PDPT by using the index in its shadow page. */
1492 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1493 pShwPage->idx, iPdPt, false /*fLockPage*/,
1494 &pShwPage);
1495 AssertRCReturn(rc, rc);
1496
1497 /* Hook it up. */
1498 ASMAtomicWriteU64(&pPdpe->u,
1499 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1500 }
1501
1502 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1503 return VINF_SUCCESS;
1504}
1505
1506
1507/**
1508 * Gets the SHADOW page directory pointer for the specified address (long mode).
1509 *
1510 * @returns VBox status code.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 * @param GCPtr The address.
1513 * @param ppPml4e Receives the address of the page map level 4 entry.
1514 * @param ppPdpt Receives the address of the page directory pointer table.
1515 * @param ppPD Receives the address of the page directory.
1516 */
1517DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1518{
1519 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1520 PGM_LOCK_ASSERT_OWNER(pVM);
1521
1522 /*
1523 * PML4
1524 */
1525 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1526 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1527 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1528 if (ppPml4e)
1529 *ppPml4e = (PX86PML4E)pPml4e;
1530 X86PGPAEUINT const uPml4e = pPml4e->u;
1531 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1532 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1533 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1534
1535 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1536 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1537 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1538
1539 /*
1540 * PDPT
1541 */
1542 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1543 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1544 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1545 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1546 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1547
1548 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1549 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1550
1551 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1552 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1553 return VINF_SUCCESS;
1554}
1555
1556
1557/**
1558 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1559 * backing pages in case the PDPT or PML4 entry is missing.
1560 *
1561 * @returns VBox status code.
1562 * @param pVCpu The cross context virtual CPU structure.
1563 * @param GCPtr The address.
1564 * @param ppPdpt Receives address of pdpt
1565 * @param ppPD Receives address of page directory
1566 */
1567static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1568{
1569 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1570 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1571 int rc;
1572
1573 Assert(pVM->pgm.s.fNestedPaging);
1574 PGM_LOCK_ASSERT_OWNER(pVM);
1575
1576 /*
1577 * PML4 level.
1578 */
1579 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1580 Assert(pPml4);
1581
1582 /* Allocate page directory pointer table if not present. */
1583 PPGMPOOLPAGE pShwPage;
1584 {
1585 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1586 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1587 EPTPML4E Pml4e;
1588 Pml4e.u = pPml4e->u;
1589 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1590 {
1591 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1592 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1593 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1594 &pShwPage);
1595 AssertRCReturn(rc, rc);
1596
1597 /* Hook up the new PDPT now. */
1598 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1599 }
1600 else
1601 {
1602 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1603 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1604
1605 pgmPoolCacheUsed(pPool, pShwPage);
1606
1607 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1608 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1609 { }
1610 else
1611 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1612 }
1613 }
1614
1615 /*
1616 * PDPT level.
1617 */
1618 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1619 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1620 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1621
1622 if (ppPdpt)
1623 *ppPdpt = pPdpt;
1624
1625 /* Allocate page directory if not present. */
1626 EPTPDPTE Pdpe;
1627 Pdpe.u = pPdpe->u;
1628 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1629 {
1630 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1631 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1632 pShwPage->idx, iPdPt, false /*fLockPage*/,
1633 &pShwPage);
1634 AssertRCReturn(rc, rc);
1635
1636 /* Hook up the new PD now. */
1637 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1638 }
1639 else
1640 {
1641 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1642 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1643
1644 pgmPoolCacheUsed(pPool, pShwPage);
1645
1646 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1647 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1648 { }
1649 else
1650 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1651 }
1652
1653 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1654 return VINF_SUCCESS;
1655}
1656
1657
1658#ifdef IN_RING0
1659/**
1660 * Synchronizes a range of nested page table entries.
1661 *
1662 * The caller must own the PGM lock.
1663 *
1664 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1665 * @param GCPhys Where to start.
1666 * @param cPages How many pages which entries should be synced.
1667 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1668 * host paging mode for AMD-V).
1669 */
1670int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1671{
1672 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1673
1674/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1675 int rc;
1676 switch (enmShwPagingMode)
1677 {
1678 case PGMMODE_32_BIT:
1679 {
1680 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1681 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1682 break;
1683 }
1684
1685 case PGMMODE_PAE:
1686 case PGMMODE_PAE_NX:
1687 {
1688 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1689 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1690 break;
1691 }
1692
1693 case PGMMODE_AMD64:
1694 case PGMMODE_AMD64_NX:
1695 {
1696 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1697 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1698 break;
1699 }
1700
1701 case PGMMODE_EPT:
1702 {
1703 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1704 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1705 break;
1706 }
1707
1708 default:
1709 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1710 }
1711 return rc;
1712}
1713#endif /* IN_RING0 */
1714
1715
1716/**
1717 * Gets effective Guest OS page information.
1718 *
1719 * When GCPtr is in a big page, the function will return as if it was a normal
1720 * 4KB page. If the need for distinguishing between big and normal page becomes
1721 * necessary at a later point, a PGMGstGetPage() will be created for that
1722 * purpose.
1723 *
1724 * @returns VBox status code.
1725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1726 * @param GCPtr Guest Context virtual address of the page.
1727 * @param pWalk Where to store the page walk information.
1728 */
1729VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1730{
1731 VMCPU_ASSERT_EMT(pVCpu);
1732 Assert(pWalk);
1733 RT_BZERO(pWalk, sizeof(*pWalk));
1734 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1735 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1736 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1737 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1738}
1739
1740
1741/**
1742 * Performs a guest page table walk.
1743 *
1744 * The guest should be in paged protect mode or long mode when making a call to
1745 * this function.
1746 *
1747 * @returns VBox status code.
1748 * @retval VINF_SUCCESS on success.
1749 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1750 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1751 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1752 *
1753 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1754 * @param GCPtr The guest virtual address to walk by.
1755 * @param pWalk Where to return the walk result. This is valid for some
1756 * error codes as well.
1757 * @param pGstWalk The guest mode specific page walk information.
1758 */
1759int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1760{
1761 VMCPU_ASSERT_EMT(pVCpu);
1762 switch (pVCpu->pgm.s.enmGuestMode)
1763 {
1764 case PGMMODE_32_BIT:
1765 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1766 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1767
1768 case PGMMODE_PAE:
1769 case PGMMODE_PAE_NX:
1770 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1771 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1772
1773 case PGMMODE_AMD64:
1774 case PGMMODE_AMD64_NX:
1775 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1776 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1777
1778 case PGMMODE_REAL:
1779 case PGMMODE_PROTECTED:
1780 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1781 return VERR_PGM_NOT_USED_IN_MODE;
1782
1783 case PGMMODE_EPT:
1784 case PGMMODE_NESTED_32BIT:
1785 case PGMMODE_NESTED_PAE:
1786 case PGMMODE_NESTED_AMD64:
1787 default:
1788 AssertFailed();
1789 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1790 return VERR_PGM_NOT_USED_IN_MODE;
1791 }
1792}
1793
1794
1795#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1796/**
1797 * Performs a guest second-level address translation (SLAT).
1798 *
1799 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1800 * function.
1801 *
1802 * @returns VBox status code.
1803 * @retval VINF_SUCCESS on success.
1804 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1805 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1806 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1807 *
1808 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1809 * @param GCPhysNested The nested-guest physical address being translated
1810 * (input).
1811 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1812 * valid. This indicates the SLAT is caused when
1813 * translating a nested-guest linear address.
1814 * @param GCPtrNested The nested-guest virtual address that initiated the
1815 * SLAT. If none, pass NIL_RTGCPTR.
1816 * @param pWalk Where to return the walk result. This is valid for
1817 * some error codes as well.
1818 * @param pGstWalk The second-level paging-mode specific walk
1819 * information.
1820 */
1821static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1822 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1823{
1824 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1825 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1826 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1827 switch (pVCpu->pgm.s.enmGuestSlatMode)
1828 {
1829 case PGMSLAT_EPT:
1830 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1831 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1832
1833 default:
1834 AssertFailed();
1835 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1836 return VERR_PGM_NOT_USED_IN_MODE;
1837 }
1838}
1839
1840
1841/**
1842 * Performs a guest second-level address translation (SLAT) for a nested-guest
1843 * physical address.
1844 *
1845 * This version requires the SLAT mode to be provided by the caller because we could
1846 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1847 * register values.
1848 *
1849 * @returns VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1851 * @param enmSlatMode The second-level paging mode to use.
1852 * @param GCPhysNested The nested-guest physical address to translate.
1853 * @param pWalk Where to store the walk result.
1854 * @param pGstWalk Where to store the second-level paging-mode specific
1855 * walk information.
1856 */
1857static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1858 PPGMPTWALKGST pGstWalk)
1859{
1860 AssertPtr(pWalk);
1861 AssertPtr(pGstWalk);
1862 switch (enmSlatMode)
1863 {
1864 case PGMSLAT_EPT:
1865 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1866 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1867 &pGstWalk->u.Ept);
1868
1869 default:
1870 AssertFailed();
1871 return VERR_PGM_NOT_USED_IN_MODE;
1872 }
1873}
1874#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1875
1876
1877/**
1878 * Tries to continue the previous walk.
1879 *
1880 * @note Requires the caller to hold the PGM lock from the first
1881 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1882 * we cannot use the pointers.
1883 *
1884 * @returns VBox status code.
1885 * @retval VINF_SUCCESS on success.
1886 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1887 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1888 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1889 *
1890 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1891 * @param GCPtr The guest virtual address to walk by.
1892 * @param pWalk Pointer to the previous walk result and where to return
1893 * the result of this walk. This is valid for some error
1894 * codes as well.
1895 * @param pGstWalk The guest-mode specific walk information.
1896 */
1897int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1898{
1899 /*
1900 * We can only handle successfully walks.
1901 * We also limit ourselves to the next page.
1902 */
1903 if ( pWalk->fSucceeded
1904 && GCPtr - pWalk->GCPtr == PAGE_SIZE)
1905 {
1906 Assert(pWalk->uLevel == 0);
1907 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1908 {
1909 /*
1910 * AMD64
1911 */
1912 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1913 {
1914 /*
1915 * We fall back to full walk if the PDE table changes, if any
1916 * reserved bits are set, or if the effective page access changes.
1917 */
1918 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1919 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1920 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1921 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1922
1923 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1924 {
1925 if (pGstWalk->u.Amd64.pPte)
1926 {
1927 X86PTEPAE Pte;
1928 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1929 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1930 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1931 {
1932 pWalk->GCPtr = GCPtr;
1933 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1934 pGstWalk->u.Amd64.Pte.u = Pte.u;
1935 pGstWalk->u.Amd64.pPte++;
1936 return VINF_SUCCESS;
1937 }
1938 }
1939 }
1940 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1941 {
1942 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1943 if (pGstWalk->u.Amd64.pPde)
1944 {
1945 X86PDEPAE Pde;
1946 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1947 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1948 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1949 {
1950 /* Get the new PTE and check out the first entry. */
1951 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1952 &pGstWalk->u.Amd64.pPt);
1953 if (RT_SUCCESS(rc))
1954 {
1955 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1956 X86PTEPAE Pte;
1957 Pte.u = pGstWalk->u.Amd64.pPte->u;
1958 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1959 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1960 {
1961 pWalk->GCPtr = GCPtr;
1962 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1963 pGstWalk->u.Amd64.Pte.u = Pte.u;
1964 pGstWalk->u.Amd64.Pde.u = Pde.u;
1965 pGstWalk->u.Amd64.pPde++;
1966 return VINF_SUCCESS;
1967 }
1968 }
1969 }
1970 }
1971 }
1972 }
1973 else if (!pWalk->fGigantPage)
1974 {
1975 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
1976 {
1977 pWalk->GCPtr = GCPtr;
1978 pWalk->GCPhys += PAGE_SIZE;
1979 return VINF_SUCCESS;
1980 }
1981 }
1982 else
1983 {
1984 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
1985 {
1986 pWalk->GCPtr = GCPtr;
1987 pWalk->GCPhys += PAGE_SIZE;
1988 return VINF_SUCCESS;
1989 }
1990 }
1991 }
1992 }
1993 /* Case we don't handle. Do full walk. */
1994 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
1995}
1996
1997
1998/**
1999 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2000 *
2001 * @returns VBox status code.
2002 * @param pVCpu The cross context virtual CPU structure.
2003 * @param GCPtr The address of the first page.
2004 * @param cb The size of the range in bytes.
2005 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2006 */
2007VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2008{
2009 VMCPU_ASSERT_EMT(pVCpu);
2010 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2011}
2012
2013
2014/**
2015 * Modify page flags for a range of pages in the guest's tables
2016 *
2017 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2018 *
2019 * @returns VBox status code.
2020 * @param pVCpu The cross context virtual CPU structure.
2021 * @param GCPtr Virtual address of the first page in the range.
2022 * @param cb Size (in bytes) of the range to apply the modification to.
2023 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2024 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2025 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2026 */
2027VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2028{
2029 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2030 VMCPU_ASSERT_EMT(pVCpu);
2031
2032 /*
2033 * Validate input.
2034 */
2035 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2036 Assert(cb);
2037
2038 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2039
2040 /*
2041 * Adjust input.
2042 */
2043 cb += GCPtr & PAGE_OFFSET_MASK;
2044 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2045 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2046
2047 /*
2048 * Call worker.
2049 */
2050 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2051 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2052 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2053 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2054
2055 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2056 return rc;
2057}
2058
2059
2060/**
2061 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2062 *
2063 * @returns @c true if the PDPE is valid, @c false otherwise.
2064 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2065 * @param paPaePdpes The PAE PDPEs to validate.
2066 *
2067 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2068 */
2069VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2070{
2071 Assert(paPaePdpes);
2072 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2073 {
2074 X86PDPE const PaePdpe = paPaePdpes[i];
2075 if ( !(PaePdpe.u & X86_PDPE_P)
2076 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2077 { /* likely */ }
2078 else
2079 return false;
2080 }
2081 return true;
2082}
2083
2084
2085/**
2086 * Performs the lazy mapping of the 32-bit guest PD.
2087 *
2088 * @returns VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2090 * @param ppPd Where to return the pointer to the mapping. This is
2091 * always set.
2092 */
2093int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2094{
2095 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2096 PGM_LOCK_VOID(pVM);
2097
2098 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2099
2100 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2101 PPGMPAGE pPage;
2102 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2103 * guest-physical address here. */
2104 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2105 if (RT_SUCCESS(rc))
2106 {
2107 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2108 if (RT_SUCCESS(rc))
2109 {
2110# ifdef IN_RING3
2111 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2112 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2113# else
2114 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2115 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2116# endif
2117 PGM_UNLOCK(pVM);
2118 return VINF_SUCCESS;
2119 }
2120 AssertRC(rc);
2121 }
2122 PGM_UNLOCK(pVM);
2123
2124 *ppPd = NULL;
2125 return rc;
2126}
2127
2128
2129/**
2130 * Performs the lazy mapping of the PAE guest PDPT.
2131 *
2132 * @returns VBox status code.
2133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2134 * @param ppPdpt Where to return the pointer to the mapping. This is
2135 * always set.
2136 */
2137int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2138{
2139 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2140 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2141 PGM_LOCK_VOID(pVM);
2142
2143 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2144 PPGMPAGE pPage;
2145 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2146 * guest-physical address here. */
2147 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2148 if (RT_SUCCESS(rc))
2149 {
2150 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2151 if (RT_SUCCESS(rc))
2152 {
2153# ifdef IN_RING3
2154 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2155 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2156# else
2157 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2158 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2159# endif
2160 PGM_UNLOCK(pVM);
2161 return VINF_SUCCESS;
2162 }
2163 AssertRC(rc);
2164 }
2165
2166 PGM_UNLOCK(pVM);
2167 *ppPdpt = NULL;
2168 return rc;
2169}
2170
2171
2172/**
2173 * Performs the lazy mapping / updating of a PAE guest PD.
2174 *
2175 * @returns Pointer to the mapping.
2176 * @returns VBox status code.
2177 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2178 * @param iPdpt Which PD entry to map (0..3).
2179 * @param ppPd Where to return the pointer to the mapping. This is
2180 * always set.
2181 */
2182int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2183{
2184 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2185 PGM_LOCK_VOID(pVM);
2186
2187 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2188 Assert(pGuestPDPT);
2189 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2190 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2191 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2192
2193 PPGMPAGE pPage;
2194 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2195 if (RT_SUCCESS(rc))
2196 {
2197 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2198 AssertRC(rc);
2199 if (RT_SUCCESS(rc))
2200 {
2201# ifdef IN_RING3
2202 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2203 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2204# else
2205 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2206 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2207# endif
2208 if (fChanged)
2209 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2210 PGM_UNLOCK(pVM);
2211 return VINF_SUCCESS;
2212 }
2213 }
2214
2215 /* Invalid page or some failure, invalidate the entry. */
2216 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2217 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2218 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2219
2220 PGM_UNLOCK(pVM);
2221 return rc;
2222}
2223
2224
2225/**
2226 * Performs the lazy mapping of the 32-bit guest PD.
2227 *
2228 * @returns VBox status code.
2229 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2230 * @param ppPml4 Where to return the pointer to the mapping. This will
2231 * always be set.
2232 */
2233int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2234{
2235 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2236 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2237 PGM_LOCK_VOID(pVM);
2238
2239 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2240 PPGMPAGE pPage;
2241 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2242 * guest-physical address here. */
2243 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2244 if (RT_SUCCESS(rc))
2245 {
2246 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2247 if (RT_SUCCESS(rc))
2248 {
2249# ifdef IN_RING3
2250 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2251 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2252# else
2253 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2254 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2255# endif
2256 PGM_UNLOCK(pVM);
2257 return VINF_SUCCESS;
2258 }
2259 }
2260
2261 PGM_UNLOCK(pVM);
2262 *ppPml4 = NULL;
2263 return rc;
2264}
2265
2266
2267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2268 /**
2269 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2270 *
2271 * @returns VBox status code.
2272 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2273 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2274 * always be set.
2275 */
2276int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2277{
2278 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2279 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2280 PGM_LOCK_VOID(pVM);
2281
2282 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2283 PPGMPAGE pPage;
2284 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2285 if (RT_SUCCESS(rc))
2286 {
2287 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2288 if (RT_SUCCESS(rc))
2289 {
2290# ifdef IN_RING3
2291 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2292 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2293# else
2294 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2295 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2296# endif
2297 PGM_UNLOCK(pVM);
2298 return VINF_SUCCESS;
2299 }
2300 }
2301
2302 PGM_UNLOCK(pVM);
2303 *ppEptPml4 = NULL;
2304 return rc;
2305}
2306#endif
2307
2308
2309/**
2310 * Gets the current CR3 register value for the shadow memory context.
2311 * @returns CR3 value.
2312 * @param pVCpu The cross context virtual CPU structure.
2313 */
2314VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2315{
2316 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2317 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2318 return pPoolPage->Core.Key;
2319}
2320
2321
2322/**
2323 * Forces lazy remapping of the guest's PAE page-directory structures.
2324 *
2325 * @param pVCpu The cross context virtual CPU structure.
2326 */
2327static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2328{
2329 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2330 {
2331 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2332 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2333 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2334 }
2335}
2336
2337
2338/**
2339 * Gets the PGM CR3 value masked according to the current guest mode.
2340 *
2341 * @returns The masked PGM CR3 value.
2342 * @param pVCpu The cross context virtual CPU structure.
2343 * @param uCr3 The raw guest CR3 value.
2344 */
2345DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2346{
2347 RTGCPHYS GCPhysCR3;
2348 switch (pVCpu->pgm.s.enmGuestMode)
2349 {
2350 case PGMMODE_PAE:
2351 case PGMMODE_PAE_NX:
2352 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK);
2353 break;
2354 case PGMMODE_AMD64:
2355 case PGMMODE_AMD64_NX:
2356 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
2357 break;
2358#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2359 case PGMMODE_EPT:
2360 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_EPT_PAGE_MASK);
2361 break;
2362#endif
2363 default:
2364 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
2365 break;
2366 }
2367 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2368 return GCPhysCR3;
2369}
2370
2371
2372/**
2373 * Performs and schedules necessary updates following a CR3 load or reload.
2374 *
2375 * This will normally involve mapping the guest PD or nPDPT
2376 *
2377 * @returns VBox status code.
2378 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2379 * safely be ignored and overridden since the FF will be set too then.
2380 * @param pVCpu The cross context virtual CPU structure.
2381 * @param cr3 The new cr3.
2382 * @param fGlobal Indicates whether this is a global flush or not.
2383 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2384 */
2385VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
2386{
2387 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2388 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2389
2390 VMCPU_ASSERT_EMT(pVCpu);
2391
2392 /*
2393 * Always flag the necessary updates; necessary for hardware acceleration
2394 */
2395 /** @todo optimize this, it shouldn't always be necessary. */
2396 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2397 if (fGlobal)
2398 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2399 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2400
2401 /*
2402 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2403 */
2404 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2405 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2406#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2407 if ( !fPdpesMapped
2408 && CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2409 {
2410 PGMPTWALK Walk;
2411 PGMPTWALKGST GstWalk;
2412 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
2413 if (RT_SUCCESS(rc))
2414 GCPhysCR3 = Walk.GCPhys;
2415 else
2416 {
2417 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2418 return rc;
2419 }
2420 }
2421#endif
2422 int rc = VINF_SUCCESS;
2423 if (GCPhysOldCR3 != GCPhysCR3)
2424 {
2425 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2426 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2427 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2428
2429 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2430 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2431 if (RT_LIKELY(rc == VINF_SUCCESS))
2432 { }
2433 else
2434 {
2435 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2436 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2437 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2438 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2439 }
2440
2441 if (fGlobal)
2442 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2443 else
2444 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2445 }
2446 else
2447 {
2448#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2449 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2450 if (pPool->cDirtyPages)
2451 {
2452 PGM_LOCK_VOID(pVM);
2453 pgmPoolResetDirtyPages(pVM);
2454 PGM_UNLOCK(pVM);
2455 }
2456#endif
2457 if (fGlobal)
2458 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2459 else
2460 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2461
2462 /*
2463 * Flush PAE PDPTEs.
2464 */
2465 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2466 pgmGstFlushPaePdpes(pVCpu);
2467 }
2468
2469 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2470 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2471 return rc;
2472}
2473
2474
2475/**
2476 * Performs and schedules necessary updates following a CR3 load or reload when
2477 * using nested or extended paging.
2478 *
2479 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2480 * TLB and triggering a SyncCR3.
2481 *
2482 * This will normally involve mapping the guest PD or nPDPT
2483 *
2484 * @returns VBox status code.
2485 * @retval VINF_SUCCESS.
2486 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2487 * paging modes). This can safely be ignored and overridden since the
2488 * FF will be set too then.
2489 * @param pVCpu The cross context virtual CPU structure.
2490 * @param cr3 The new CR3.
2491 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2492 */
2493VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
2494{
2495 VMCPU_ASSERT_EMT(pVCpu);
2496 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2497
2498 /* We assume we're only called in nested paging mode. */
2499 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2500
2501 /*
2502 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2503 */
2504 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2505#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2506 if ( !fPdpesMapped
2507 && CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2508 {
2509 PGMPTWALK Walk;
2510 PGMPTWALKGST GstWalk;
2511 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
2512 if (RT_SUCCESS(rc))
2513 GCPhysCR3 = Walk.GCPhys;
2514 else
2515 {
2516 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2517 return VERR_PGM_PAE_PDPE_RSVD;
2518 }
2519 }
2520#endif
2521 int rc = VINF_SUCCESS;
2522 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2523 {
2524 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2525 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2526 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2527
2528 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2529 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2530
2531 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2532 }
2533 /*
2534 * Flush PAE PDPTEs.
2535 */
2536 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2537 pgmGstFlushPaePdpes(pVCpu);
2538
2539 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2540 return rc;
2541}
2542
2543
2544/**
2545 * Synchronize the paging structures.
2546 *
2547 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2548 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2549 * in several places, most importantly whenever the CR3 is loaded.
2550 *
2551 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2552 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2553 * the VMM into guest context.
2554 * @param pVCpu The cross context virtual CPU structure.
2555 * @param cr0 Guest context CR0 register
2556 * @param cr3 Guest context CR3 register
2557 * @param cr4 Guest context CR4 register
2558 * @param fGlobal Including global page directories or not
2559 */
2560VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2561{
2562 int rc;
2563
2564 VMCPU_ASSERT_EMT(pVCpu);
2565
2566 /*
2567 * The pool may have pending stuff and even require a return to ring-3 to
2568 * clear the whole thing.
2569 */
2570 rc = pgmPoolSyncCR3(pVCpu);
2571 if (rc != VINF_SUCCESS)
2572 return rc;
2573
2574 /*
2575 * We might be called when we shouldn't.
2576 *
2577 * The mode switching will ensure that the PD is resynced after every mode
2578 * switch. So, if we find ourselves here when in protected or real mode
2579 * we can safely clear the FF and return immediately.
2580 */
2581 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2582 {
2583 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2584 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2585 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2587 return VINF_SUCCESS;
2588 }
2589
2590 /* If global pages are not supported, then all flushes are global. */
2591 if (!(cr4 & X86_CR4_PGE))
2592 fGlobal = true;
2593 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2594 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2595
2596 /*
2597 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2598 * This should be done before SyncCR3.
2599 */
2600 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2601 {
2602 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2603
2604 RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2605 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2606 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2607 {
2608 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2609 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2610 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2611 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2612 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
2613 }
2614
2615 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2616 if ( rc == VINF_PGM_SYNC_CR3
2617 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2618 {
2619 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2620#ifdef IN_RING3
2621 rc = pgmPoolSyncCR3(pVCpu);
2622#else
2623 if (rc == VINF_PGM_SYNC_CR3)
2624 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2625 return VINF_PGM_SYNC_CR3;
2626#endif
2627 }
2628 AssertRCReturn(rc, rc);
2629 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2630 }
2631
2632 /*
2633 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2634 */
2635 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2636
2637 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2638 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2639 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2640 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2641
2642 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2643 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2644 if (rc == VINF_SUCCESS)
2645 {
2646 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2647 {
2648 /* Go back to ring 3 if a pgm pool sync is again pending. */
2649 return VINF_PGM_SYNC_CR3;
2650 }
2651
2652 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2653 {
2654 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2655 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2656 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2657 }
2658 }
2659
2660 /*
2661 * Now flush the CR3 (guest context).
2662 */
2663 if (rc == VINF_SUCCESS)
2664 PGM_INVL_VCPU_TLBS(pVCpu);
2665 return rc;
2666}
2667
2668
2669/**
2670 * Maps all the PAE PDPE entries.
2671 *
2672 * @returns VBox status code.
2673 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2674 * @param paPaePdpes The new PAE PDPE values.
2675 *
2676 * @remarks This function may be invoked during the process of changing the guest
2677 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2678 * reflect PAE paging just yet.
2679 */
2680VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2681{
2682 Assert(paPaePdpes);
2683 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2684 {
2685 X86PDPE const PaePdpe = paPaePdpes[i];
2686
2687 /*
2688 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2689 * are deferred.[1] Also, different situations require different handling of invalid
2690 * PDPE entries. Here we assume the caller has already validated or doesn't require
2691 * validation of the PDPEs.
2692 *
2693 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2694 */
2695 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2696 {
2697 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2698 RTHCPTR HCPtr;
2699 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2700
2701 PGM_LOCK_VOID(pVM);
2702 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2703 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2704 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2705 PGM_UNLOCK(pVM);
2706 if (RT_SUCCESS(rc))
2707 {
2708# ifdef IN_RING3
2709 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2710 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2711# else
2712 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2713 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2714# endif
2715 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2716 continue;
2717 }
2718 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2719 }
2720 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2721 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2722 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2723 }
2724
2725 return VINF_SUCCESS;
2726}
2727
2728
2729/**
2730 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2731 *
2732 * @returns VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2734 * @param cr3 The guest CR3 value.
2735 *
2736 * @remarks This function may be invoked during the process of changing the guest
2737 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2738 * PAE paging just yet.
2739 */
2740VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2741{
2742 /*
2743 * Read the page-directory-pointer table (PDPT) at CR3.
2744 */
2745 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2746 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2747 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2748
2749#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2750 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2751 {
2752 PGMPTWALK Walk;
2753 PGMPTWALKGST GstWalk;
2754 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
2755 if (RT_SUCCESS(rc))
2756 GCPhysCR3 = Walk.GCPhys;
2757 else
2758 {
2759 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2760 return rc;
2761 }
2762 }
2763#endif
2764
2765 PGM_LOCK_VOID(pVM);
2766 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
2767 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
2768
2769 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2770 RTHCPTR HCPtrGuestCr3;
2771 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3);
2772 PGM_UNLOCK(pVM);
2773 AssertRCReturn(rc, rc);
2774 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2775
2776 /*
2777 * Validate the page-directory-pointer table entries (PDPE).
2778 */
2779 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2780 {
2781 /*
2782 * Map the PDPT.
2783 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2784 * that PGMFlushTLB will be called soon and only a change to CR3 then
2785 * will cause the shadow page tables to be updated.
2786 */
2787# ifdef IN_RING3
2788 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2789 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2790# else
2791 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2792 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2793# endif
2794
2795 /*
2796 * Update CPUM.
2797 * We do this prior to mapping the PDPEs to keep the order consistent
2798 * with what's used in HM. In practice, it doesn't really matter.
2799 */
2800 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2801
2802 /*
2803 * Map the PDPEs.
2804 */
2805 return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2806 }
2807 return VERR_PGM_PAE_PDPE_RSVD;
2808}
2809
2810
2811/**
2812 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2813 *
2814 * @returns VBox status code, with the following informational code for
2815 * VM scheduling.
2816 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2817 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2818 *
2819 * @param pVCpu The cross context virtual CPU structure.
2820 * @param cr0 The new cr0.
2821 * @param cr4 The new cr4.
2822 * @param efer The new extended feature enable register.
2823 * @param fForce Whether to force a mode change.
2824 */
2825VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2826{
2827 VMCPU_ASSERT_EMT(pVCpu);
2828
2829 /*
2830 * Calc the new guest mode.
2831 *
2832 * Note! We check PG before PE and without requiring PE because of the
2833 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2834 */
2835 PGMMODE enmGuestMode;
2836 if (cr0 & X86_CR0_PG)
2837 {
2838 if (!(cr4 & X86_CR4_PAE))
2839 {
2840 bool const fPse = !!(cr4 & X86_CR4_PSE);
2841 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2842 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2843 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2844 enmGuestMode = PGMMODE_32_BIT;
2845 }
2846 else if (!(efer & MSR_K6_EFER_LME))
2847 {
2848 if (!(efer & MSR_K6_EFER_NXE))
2849 enmGuestMode = PGMMODE_PAE;
2850 else
2851 enmGuestMode = PGMMODE_PAE_NX;
2852 }
2853 else
2854 {
2855 if (!(efer & MSR_K6_EFER_NXE))
2856 enmGuestMode = PGMMODE_AMD64;
2857 else
2858 enmGuestMode = PGMMODE_AMD64_NX;
2859 }
2860 }
2861 else if (!(cr0 & X86_CR0_PE))
2862 enmGuestMode = PGMMODE_REAL;
2863 else
2864 enmGuestMode = PGMMODE_PROTECTED;
2865
2866 /*
2867 * Did it change?
2868 */
2869 if ( !fForce
2870 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2871 return VINF_SUCCESS;
2872
2873 /* Flush the TLB */
2874 PGM_INVL_VCPU_TLBS(pVCpu);
2875 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2876}
2877
2878
2879/**
2880 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2881 *
2882 * @returns PGM_TYPE_*.
2883 * @param pgmMode The mode value to convert.
2884 */
2885DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2886{
2887 switch (pgmMode)
2888 {
2889 case PGMMODE_REAL: return PGM_TYPE_REAL;
2890 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2891 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2892 case PGMMODE_PAE:
2893 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2894 case PGMMODE_AMD64:
2895 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2896 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2897 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2898 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2899 case PGMMODE_EPT: return PGM_TYPE_EPT;
2900 case PGMMODE_NONE: return PGM_TYPE_NONE;
2901 default:
2902 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2903 }
2904}
2905
2906
2907/**
2908 * Calculates the shadow paging mode.
2909 *
2910 * @returns The shadow paging mode.
2911 * @param pVM The cross context VM structure.
2912 * @param enmGuestMode The guest mode.
2913 * @param enmHostMode The host mode.
2914 * @param enmShadowMode The current shadow mode.
2915 */
2916static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
2917{
2918 switch (enmGuestMode)
2919 {
2920 /*
2921 * When switching to real or protected mode we don't change
2922 * anything since it's likely that we'll switch back pretty soon.
2923 *
2924 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2925 * and is supposed to determine which shadow paging and switcher to
2926 * use during init.
2927 */
2928 case PGMMODE_REAL:
2929 case PGMMODE_PROTECTED:
2930 if ( enmShadowMode != PGMMODE_INVALID
2931 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
2932 break; /* (no change) */
2933
2934 switch (enmHostMode)
2935 {
2936 case SUPPAGINGMODE_32_BIT:
2937 case SUPPAGINGMODE_32_BIT_GLOBAL:
2938 enmShadowMode = PGMMODE_32_BIT;
2939 break;
2940
2941 case SUPPAGINGMODE_PAE:
2942 case SUPPAGINGMODE_PAE_NX:
2943 case SUPPAGINGMODE_PAE_GLOBAL:
2944 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2945 enmShadowMode = PGMMODE_PAE;
2946 break;
2947
2948 case SUPPAGINGMODE_AMD64:
2949 case SUPPAGINGMODE_AMD64_GLOBAL:
2950 case SUPPAGINGMODE_AMD64_NX:
2951 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2952 enmShadowMode = PGMMODE_PAE;
2953 break;
2954
2955 default:
2956 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2957 }
2958 break;
2959
2960 case PGMMODE_32_BIT:
2961 switch (enmHostMode)
2962 {
2963 case SUPPAGINGMODE_32_BIT:
2964 case SUPPAGINGMODE_32_BIT_GLOBAL:
2965 enmShadowMode = PGMMODE_32_BIT;
2966 break;
2967
2968 case SUPPAGINGMODE_PAE:
2969 case SUPPAGINGMODE_PAE_NX:
2970 case SUPPAGINGMODE_PAE_GLOBAL:
2971 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2972 enmShadowMode = PGMMODE_PAE;
2973 break;
2974
2975 case SUPPAGINGMODE_AMD64:
2976 case SUPPAGINGMODE_AMD64_GLOBAL:
2977 case SUPPAGINGMODE_AMD64_NX:
2978 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2979 enmShadowMode = PGMMODE_PAE;
2980 break;
2981
2982 default:
2983 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2984 }
2985 break;
2986
2987 case PGMMODE_PAE:
2988 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
2989 switch (enmHostMode)
2990 {
2991 case SUPPAGINGMODE_32_BIT:
2992 case SUPPAGINGMODE_32_BIT_GLOBAL:
2993 enmShadowMode = PGMMODE_PAE;
2994 break;
2995
2996 case SUPPAGINGMODE_PAE:
2997 case SUPPAGINGMODE_PAE_NX:
2998 case SUPPAGINGMODE_PAE_GLOBAL:
2999 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3000 enmShadowMode = PGMMODE_PAE;
3001 break;
3002
3003 case SUPPAGINGMODE_AMD64:
3004 case SUPPAGINGMODE_AMD64_GLOBAL:
3005 case SUPPAGINGMODE_AMD64_NX:
3006 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3007 enmShadowMode = PGMMODE_PAE;
3008 break;
3009
3010 default:
3011 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3012 }
3013 break;
3014
3015 case PGMMODE_AMD64:
3016 case PGMMODE_AMD64_NX:
3017 switch (enmHostMode)
3018 {
3019 case SUPPAGINGMODE_32_BIT:
3020 case SUPPAGINGMODE_32_BIT_GLOBAL:
3021 enmShadowMode = PGMMODE_AMD64;
3022 break;
3023
3024 case SUPPAGINGMODE_PAE:
3025 case SUPPAGINGMODE_PAE_NX:
3026 case SUPPAGINGMODE_PAE_GLOBAL:
3027 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3028 enmShadowMode = PGMMODE_AMD64;
3029 break;
3030
3031 case SUPPAGINGMODE_AMD64:
3032 case SUPPAGINGMODE_AMD64_GLOBAL:
3033 case SUPPAGINGMODE_AMD64_NX:
3034 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3035 enmShadowMode = PGMMODE_AMD64;
3036 break;
3037
3038 default:
3039 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3040 }
3041 break;
3042
3043 default:
3044 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3045 }
3046
3047 /*
3048 * Override the shadow mode when NEM or nested paging is active.
3049 */
3050 if (VM_IS_NEM_ENABLED(pVM))
3051 {
3052 pVM->pgm.s.fNestedPaging = true;
3053 enmShadowMode = PGMMODE_NONE;
3054 }
3055 else
3056 {
3057 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3058 pVM->pgm.s.fNestedPaging = fNestedPaging;
3059 if (fNestedPaging)
3060 {
3061 if (HMIsVmxActive(pVM))
3062 enmShadowMode = PGMMODE_EPT;
3063 else
3064 {
3065 /* The nested SVM paging depends on the host one. */
3066 Assert(HMIsSvmActive(pVM));
3067 if ( enmGuestMode == PGMMODE_AMD64
3068 || enmGuestMode == PGMMODE_AMD64_NX)
3069 enmShadowMode = PGMMODE_NESTED_AMD64;
3070 else
3071 switch (pVM->pgm.s.enmHostMode)
3072 {
3073 case SUPPAGINGMODE_32_BIT:
3074 case SUPPAGINGMODE_32_BIT_GLOBAL:
3075 enmShadowMode = PGMMODE_NESTED_32BIT;
3076 break;
3077
3078 case SUPPAGINGMODE_PAE:
3079 case SUPPAGINGMODE_PAE_GLOBAL:
3080 case SUPPAGINGMODE_PAE_NX:
3081 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3082 enmShadowMode = PGMMODE_NESTED_PAE;
3083 break;
3084
3085 case SUPPAGINGMODE_AMD64:
3086 case SUPPAGINGMODE_AMD64_GLOBAL:
3087 case SUPPAGINGMODE_AMD64_NX:
3088 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3089 enmShadowMode = PGMMODE_NESTED_AMD64;
3090 break;
3091
3092 default:
3093 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3094 }
3095 }
3096 }
3097#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3098 else
3099 {
3100 /* Nested paging is a requirement for nested VT-x. */
3101 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3102 }
3103#endif
3104 }
3105
3106 return enmShadowMode;
3107}
3108
3109
3110/**
3111 * Performs the actual mode change.
3112 * This is called by PGMChangeMode and pgmR3InitPaging().
3113 *
3114 * @returns VBox status code. May suspend or power off the VM on error, but this
3115 * will trigger using FFs and not informational status codes.
3116 *
3117 * @param pVM The cross context VM structure.
3118 * @param pVCpu The cross context virtual CPU structure.
3119 * @param enmGuestMode The new guest mode. This is assumed to be different from
3120 * the current mode.
3121 */
3122VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3123{
3124 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3125 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3126
3127 /*
3128 * Calc the shadow mode and switcher.
3129 */
3130 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3131
3132 /*
3133 * Exit old mode(s).
3134 */
3135 /* shadow */
3136 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3137 {
3138 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3139 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3140 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3141 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3142 {
3143 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3144 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3145 }
3146 }
3147 else
3148 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3149
3150 /* guest */
3151 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3152 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3153 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3154 {
3155 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3156 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3157 }
3158 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3159
3160 /*
3161 * Change the paging mode data indexes.
3162 */
3163 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3164 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3165 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3166 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3167 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3168 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3169 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3170#ifdef IN_RING3
3171 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3172#endif
3173
3174 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3175 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3176 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3177 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3178 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3179 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3180 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3181#ifdef IN_RING3
3182 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3183#endif
3184
3185 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3186 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3187 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3188 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3189 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3190 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3191 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3192 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3193 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3194 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3195#ifdef VBOX_STRICT
3196 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3197#endif
3198
3199 /*
3200 * Enter new shadow mode (if changed).
3201 */
3202 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3203 {
3204 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3205 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3206 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3207 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3208 }
3209
3210 /*
3211 * Always flag the necessary updates
3212 */
3213 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3214
3215 /*
3216 * Enter the new guest and shadow+guest modes.
3217 */
3218 /* Calc the new CR3 value. */
3219 RTGCPHYS GCPhysCR3;
3220 switch (enmGuestMode)
3221 {
3222 case PGMMODE_REAL:
3223 case PGMMODE_PROTECTED:
3224 GCPhysCR3 = NIL_RTGCPHYS;
3225 break;
3226
3227 case PGMMODE_32_BIT:
3228 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3229 break;
3230
3231 case PGMMODE_PAE_NX:
3232 case PGMMODE_PAE:
3233 if (!pVM->cpum.ro.GuestFeatures.fPae)
3234#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3235 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3236 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3237#else
3238 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3239
3240#endif
3241 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3242 break;
3243
3244#ifdef VBOX_WITH_64_BITS_GUESTS
3245 case PGMMODE_AMD64_NX:
3246 case PGMMODE_AMD64:
3247 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3248 break;
3249#endif
3250#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3251 case PGMMODE_EPT:
3252 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_EPT_PAGE_MASK;
3253 break;
3254#endif
3255 default:
3256 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3257 }
3258
3259#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3260 /* Update the guest SLAT mode if it's a nested-guest. */
3261 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu)
3262 && PGMMODE_WITH_PAGING(enmGuestMode))
3263 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3264 else
3265 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3266#endif
3267
3268 /* Enter the new guest mode. */
3269 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3270 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3271 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3272
3273 /* Set the new guest CR3. */
3274 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3275
3276 /* status codes. */
3277 AssertRC(rc);
3278 AssertRC(rc2);
3279 if (RT_SUCCESS(rc))
3280 {
3281 rc = rc2;
3282 if (RT_SUCCESS(rc)) /* no informational status codes. */
3283 rc = VINF_SUCCESS;
3284 }
3285
3286 /*
3287 * Notify HM.
3288 */
3289 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3290 return rc;
3291}
3292
3293
3294/**
3295 * Called by CPUM or REM when CR0.WP changes to 1.
3296 *
3297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3298 * @thread EMT
3299 */
3300VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3301{
3302 /*
3303 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3304 *
3305 * Use the counter to judge whether there might be pool pages with active
3306 * hacks in them. If there are, we will be running the risk of messing up
3307 * the guest by allowing it to write to read-only pages. Thus, we have to
3308 * clear the page pool ASAP if there is the slightest chance.
3309 */
3310 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3311 {
3312 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3313
3314 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3315 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3316 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3317 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3318 }
3319}
3320
3321
3322/**
3323 * Gets the current guest paging mode.
3324 *
3325 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3326 *
3327 * @returns The current paging mode.
3328 * @param pVCpu The cross context virtual CPU structure.
3329 */
3330VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3331{
3332 return pVCpu->pgm.s.enmGuestMode;
3333}
3334
3335
3336/**
3337 * Gets the current shadow paging mode.
3338 *
3339 * @returns The current paging mode.
3340 * @param pVCpu The cross context virtual CPU structure.
3341 */
3342VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3343{
3344 return pVCpu->pgm.s.enmShadowMode;
3345}
3346
3347
3348/**
3349 * Gets the current host paging mode.
3350 *
3351 * @returns The current paging mode.
3352 * @param pVM The cross context VM structure.
3353 */
3354VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3355{
3356 switch (pVM->pgm.s.enmHostMode)
3357 {
3358 case SUPPAGINGMODE_32_BIT:
3359 case SUPPAGINGMODE_32_BIT_GLOBAL:
3360 return PGMMODE_32_BIT;
3361
3362 case SUPPAGINGMODE_PAE:
3363 case SUPPAGINGMODE_PAE_GLOBAL:
3364 return PGMMODE_PAE;
3365
3366 case SUPPAGINGMODE_PAE_NX:
3367 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3368 return PGMMODE_PAE_NX;
3369
3370 case SUPPAGINGMODE_AMD64:
3371 case SUPPAGINGMODE_AMD64_GLOBAL:
3372 return PGMMODE_AMD64;
3373
3374 case SUPPAGINGMODE_AMD64_NX:
3375 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3376 return PGMMODE_AMD64_NX;
3377
3378 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3379 }
3380
3381 return PGMMODE_INVALID;
3382}
3383
3384
3385/**
3386 * Get mode name.
3387 *
3388 * @returns read-only name string.
3389 * @param enmMode The mode which name is desired.
3390 */
3391VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3392{
3393 switch (enmMode)
3394 {
3395 case PGMMODE_REAL: return "Real";
3396 case PGMMODE_PROTECTED: return "Protected";
3397 case PGMMODE_32_BIT: return "32-bit";
3398 case PGMMODE_PAE: return "PAE";
3399 case PGMMODE_PAE_NX: return "PAE+NX";
3400 case PGMMODE_AMD64: return "AMD64";
3401 case PGMMODE_AMD64_NX: return "AMD64+NX";
3402 case PGMMODE_NESTED_32BIT: return "Nested-32";
3403 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3404 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3405 case PGMMODE_EPT: return "EPT";
3406 case PGMMODE_NONE: return "None";
3407 default: return "unknown mode value";
3408 }
3409}
3410
3411
3412#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3413/**
3414 * Gets the SLAT mode name.
3415 *
3416 * @returns The read-only SLAT mode descriptive string.
3417 * @param enmSlatMode The SLAT mode value.
3418 */
3419VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3420{
3421 switch (enmSlatMode)
3422 {
3423 case PGMSLAT_DIRECT: return "Direct";
3424 case PGMSLAT_EPT: return "EPT";
3425 case PGMSLAT_32BIT: return "32-bit";
3426 case PGMSLAT_PAE: return "PAE";
3427 case PGMSLAT_AMD64: return "AMD64";
3428 default: return "Unknown";
3429 }
3430}
3431#endif
3432
3433
3434/**
3435 * Gets the physical address represented in the guest CR3 as PGM sees it.
3436 *
3437 * This is mainly for logging and debugging.
3438 *
3439 * @returns PGM's guest CR3 value.
3440 * @param pVCpu The cross context virtual CPU structure.
3441 */
3442VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3443{
3444 return pVCpu->pgm.s.GCPhysCR3;
3445}
3446
3447
3448
3449/**
3450 * Notification from CPUM that the EFER.NXE bit has changed.
3451 *
3452 * @param pVCpu The cross context virtual CPU structure of the CPU for
3453 * which EFER changed.
3454 * @param fNxe The new NXE state.
3455 */
3456VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3457{
3458/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3459 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3460
3461 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3462 if (fNxe)
3463 {
3464 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3465 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3466 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3467 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3468 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3469 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3470 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3471 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3472 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3473 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3474 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3475
3476 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3477 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3478 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3479 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3480 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3481 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3482 }
3483 else
3484 {
3485 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3486 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3487 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3488 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3489 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3490 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3491 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3492 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3493 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3494 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3495 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3496
3497 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3498 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3499 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3500 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3501 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3502 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3503 }
3504}
3505
3506
3507/**
3508 * Check if any pgm pool pages are marked dirty (not monitored)
3509 *
3510 * @returns bool locked/not locked
3511 * @param pVM The cross context VM structure.
3512 */
3513VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3514{
3515 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3516}
3517
3518
3519/**
3520 * Check if this VCPU currently owns the PGM lock.
3521 *
3522 * @returns bool owner/not owner
3523 * @param pVM The cross context VM structure.
3524 */
3525VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3526{
3527 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3528}
3529
3530
3531/**
3532 * Enable or disable large page usage
3533 *
3534 * @returns VBox status code.
3535 * @param pVM The cross context VM structure.
3536 * @param fUseLargePages Use/not use large pages
3537 */
3538VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3539{
3540 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3541
3542 pVM->pgm.s.fUseLargePages = fUseLargePages;
3543 return VINF_SUCCESS;
3544}
3545
3546
3547/**
3548 * Acquire the PGM lock.
3549 *
3550 * @returns VBox status code
3551 * @param pVM The cross context VM structure.
3552 * @param fVoid Set if the caller cannot handle failure returns.
3553 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3554 */
3555#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3556int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3557#else
3558int pgmLock(PVMCC pVM, bool fVoid)
3559#endif
3560{
3561#if defined(VBOX_STRICT)
3562 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3563#else
3564 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3565#endif
3566 if (RT_SUCCESS(rc))
3567 return rc;
3568 if (fVoid)
3569 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3570 else
3571 AssertRC(rc);
3572 return rc;
3573}
3574
3575
3576/**
3577 * Release the PGM lock.
3578 *
3579 * @returns VBox status code
3580 * @param pVM The cross context VM structure.
3581 */
3582void pgmUnlock(PVMCC pVM)
3583{
3584 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3585 pVM->pgm.s.cDeprecatedPageLocks = 0;
3586 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3587 if (rc == VINF_SEM_NESTED)
3588 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3589}
3590
3591
3592#if !defined(IN_R0) || defined(LOG_ENABLED)
3593
3594/** Format handler for PGMPAGE.
3595 * @copydoc FNRTSTRFORMATTYPE */
3596static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3597 const char *pszType, void const *pvValue,
3598 int cchWidth, int cchPrecision, unsigned fFlags,
3599 void *pvUser)
3600{
3601 size_t cch;
3602 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3603 if (RT_VALID_PTR(pPage))
3604 {
3605 char szTmp[64+80];
3606
3607 cch = 0;
3608
3609 /* The single char state stuff. */
3610 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3611 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3612
3613# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3614 if (IS_PART_INCLUDED(5))
3615 {
3616 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3617 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3618 }
3619
3620 /* The type. */
3621 if (IS_PART_INCLUDED(4))
3622 {
3623 szTmp[cch++] = ':';
3624 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3625 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3626 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3627 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3628 }
3629
3630 /* The numbers. */
3631 if (IS_PART_INCLUDED(3))
3632 {
3633 szTmp[cch++] = ':';
3634 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3635 }
3636
3637 if (IS_PART_INCLUDED(2))
3638 {
3639 szTmp[cch++] = ':';
3640 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3641 }
3642
3643 if (IS_PART_INCLUDED(6))
3644 {
3645 szTmp[cch++] = ':';
3646 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3647 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3648 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3649 }
3650# undef IS_PART_INCLUDED
3651
3652 cch = pfnOutput(pvArgOutput, szTmp, cch);
3653 }
3654 else
3655 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3656 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3657 return cch;
3658}
3659
3660
3661/** Format handler for PGMRAMRANGE.
3662 * @copydoc FNRTSTRFORMATTYPE */
3663static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3664 const char *pszType, void const *pvValue,
3665 int cchWidth, int cchPrecision, unsigned fFlags,
3666 void *pvUser)
3667{
3668 size_t cch;
3669 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3670 if (RT_VALID_PTR(pRam))
3671 {
3672 char szTmp[80];
3673 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3674 cch = pfnOutput(pvArgOutput, szTmp, cch);
3675 }
3676 else
3677 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3678 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3679 return cch;
3680}
3681
3682/** Format type andlers to be registered/deregistered. */
3683static const struct
3684{
3685 char szType[24];
3686 PFNRTSTRFORMATTYPE pfnHandler;
3687} g_aPgmFormatTypes[] =
3688{
3689 { "pgmpage", pgmFormatTypeHandlerPage },
3690 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3691};
3692
3693#endif /* !IN_R0 || LOG_ENABLED */
3694
3695/**
3696 * Registers the global string format types.
3697 *
3698 * This should be called at module load time or in some other manner that ensure
3699 * that it's called exactly one time.
3700 *
3701 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3702 */
3703VMMDECL(int) PGMRegisterStringFormatTypes(void)
3704{
3705#if !defined(IN_R0) || defined(LOG_ENABLED)
3706 int rc = VINF_SUCCESS;
3707 unsigned i;
3708 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3709 {
3710 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3711# ifdef IN_RING0
3712 if (rc == VERR_ALREADY_EXISTS)
3713 {
3714 /* in case of cleanup failure in ring-0 */
3715 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3716 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3717 }
3718# endif
3719 }
3720 if (RT_FAILURE(rc))
3721 while (i-- > 0)
3722 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3723
3724 return rc;
3725#else
3726 return VINF_SUCCESS;
3727#endif
3728}
3729
3730
3731/**
3732 * Deregisters the global string format types.
3733 *
3734 * This should be called at module unload time or in some other manner that
3735 * ensure that it's called exactly one time.
3736 */
3737VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3738{
3739#if !defined(IN_R0) || defined(LOG_ENABLED)
3740 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3741 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3742#endif
3743}
3744
3745
3746#ifdef VBOX_STRICT
3747/**
3748 * Asserts that everything related to the guest CR3 is correctly shadowed.
3749 *
3750 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3751 * and assert the correctness of the guest CR3 mapping before asserting that the
3752 * shadow page tables is in sync with the guest page tables.
3753 *
3754 * @returns Number of conflicts.
3755 * @param pVM The cross context VM structure.
3756 * @param pVCpu The cross context virtual CPU structure.
3757 * @param cr3 The current guest CR3 register value.
3758 * @param cr4 The current guest CR4 register value.
3759 */
3760VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3761{
3762 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3763
3764 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3765 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3766 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3767
3768 PGM_LOCK_VOID(pVM);
3769 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3770 PGM_UNLOCK(pVM);
3771
3772 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3773 return cErrors;
3774}
3775#endif /* VBOX_STRICT */
3776
3777
3778/**
3779 * Updates PGM's copy of the guest's EPT pointer.
3780 *
3781 * @param pVCpu The cross context virtual CPU structure.
3782 * @param uEptPtr The EPT pointer.
3783 *
3784 * @remarks This can be called as part of VM-entry so we might be in the midst of
3785 * switching to VMX non-root mode.
3786 */
3787VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3788{
3789 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3790 PGM_LOCK_VOID(pVM);
3791 pVCpu->pgm.s.uEptPtr = uEptPtr;
3792 PGM_UNLOCK(pVM);
3793}
3794
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette