VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 92541

Last change on this file since 92541 was 92541, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Allow forcing mapping/unmapping of CR3 even when the paging mode deosn't actually change. This is required for VMX/SVM guest transitions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 134.9 KB
Line 
1/* $Id: PGMAll.cpp 92541 2021-11-22 06:35:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
53static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
54 PPGMPTWALKGST pGstWalk);
55static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
56#endif
57static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
58static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
59
60
61#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
62/* Guest - EPT SLAT is identical for all guest paging mode. */
63# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
64# define PGM_GST_TYPE PGM_TYPE_EPT
65# include "PGMGstDefs.h"
66# include "PGMAllGstSlatEpt.cpp.h"
67# undef PGM_GST_TYPE
68#endif
69
70
71/*
72 * Shadow - 32-bit mode
73 */
74#define PGM_SHW_TYPE PGM_TYPE_32BIT
75#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
76#include "PGMAllShw.h"
77
78/* Guest - real mode */
79#define PGM_GST_TYPE PGM_TYPE_REAL
80#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
84#include "PGMGstDefs.h"
85#include "PGMAllGst.h"
86#include "PGMAllBth.h"
87#undef BTH_PGMPOOLKIND_PT_FOR_PT
88#undef BTH_PGMPOOLKIND_ROOT
89#undef PGM_BTH_NAME
90#undef PGM_GST_TYPE
91#undef PGM_GST_NAME
92
93/* Guest - protected mode */
94#define PGM_GST_TYPE PGM_TYPE_PROT
95#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
96#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
97#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
98#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
99#include "PGMGstDefs.h"
100#include "PGMAllGst.h"
101#include "PGMAllBth.h"
102#undef BTH_PGMPOOLKIND_PT_FOR_PT
103#undef BTH_PGMPOOLKIND_ROOT
104#undef PGM_BTH_NAME
105#undef PGM_GST_TYPE
106#undef PGM_GST_NAME
107
108/* Guest - 32-bit mode */
109#define PGM_GST_TYPE PGM_TYPE_32BIT
110#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
111#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
112#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
113#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
114#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
115#include "PGMGstDefs.h"
116#include "PGMAllGst.h"
117#include "PGMAllBth.h"
118#undef BTH_PGMPOOLKIND_PT_FOR_BIG
119#undef BTH_PGMPOOLKIND_PT_FOR_PT
120#undef BTH_PGMPOOLKIND_ROOT
121#undef PGM_BTH_NAME
122#undef PGM_GST_TYPE
123#undef PGM_GST_NAME
124
125#undef PGM_SHW_TYPE
126#undef PGM_SHW_NAME
127
128
129/*
130 * Shadow - PAE mode
131 */
132#define PGM_SHW_TYPE PGM_TYPE_PAE
133#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
134#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
135#include "PGMAllShw.h"
136
137/* Guest - real mode */
138#define PGM_GST_TYPE PGM_TYPE_REAL
139#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
142#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
143#include "PGMGstDefs.h"
144#include "PGMAllBth.h"
145#undef BTH_PGMPOOLKIND_PT_FOR_PT
146#undef BTH_PGMPOOLKIND_ROOT
147#undef PGM_BTH_NAME
148#undef PGM_GST_TYPE
149#undef PGM_GST_NAME
150
151/* Guest - protected mode */
152#define PGM_GST_TYPE PGM_TYPE_PROT
153#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
154#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
155#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
156#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
157#include "PGMGstDefs.h"
158#include "PGMAllBth.h"
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef BTH_PGMPOOLKIND_ROOT
161#undef PGM_BTH_NAME
162#undef PGM_GST_TYPE
163#undef PGM_GST_NAME
164
165/* Guest - 32-bit mode */
166#define PGM_GST_TYPE PGM_TYPE_32BIT
167#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
168#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
169#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
170#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
171#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
172#include "PGMGstDefs.h"
173#include "PGMAllBth.h"
174#undef BTH_PGMPOOLKIND_PT_FOR_BIG
175#undef BTH_PGMPOOLKIND_PT_FOR_PT
176#undef BTH_PGMPOOLKIND_ROOT
177#undef PGM_BTH_NAME
178#undef PGM_GST_TYPE
179#undef PGM_GST_NAME
180
181
182/* Guest - PAE mode */
183#define PGM_GST_TYPE PGM_TYPE_PAE
184#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
185#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
186#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
187#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
188#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
189#include "PGMGstDefs.h"
190#include "PGMAllGst.h"
191#include "PGMAllBth.h"
192#undef BTH_PGMPOOLKIND_PT_FOR_BIG
193#undef BTH_PGMPOOLKIND_PT_FOR_PT
194#undef BTH_PGMPOOLKIND_ROOT
195#undef PGM_BTH_NAME
196#undef PGM_GST_TYPE
197#undef PGM_GST_NAME
198
199#undef PGM_SHW_TYPE
200#undef PGM_SHW_NAME
201
202
203/*
204 * Shadow - AMD64 mode
205 */
206#define PGM_SHW_TYPE PGM_TYPE_AMD64
207#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
208#include "PGMAllShw.h"
209
210/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
211/** @todo retire this hack. */
212#define PGM_GST_TYPE PGM_TYPE_PROT
213#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
214#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
215#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
216#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
217#include "PGMGstDefs.h"
218#include "PGMAllBth.h"
219#undef BTH_PGMPOOLKIND_PT_FOR_PT
220#undef BTH_PGMPOOLKIND_ROOT
221#undef PGM_BTH_NAME
222#undef PGM_GST_TYPE
223#undef PGM_GST_NAME
224
225#ifdef VBOX_WITH_64_BITS_GUESTS
226/* Guest - AMD64 mode */
227# define PGM_GST_TYPE PGM_TYPE_AMD64
228# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
229# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
230# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
231# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
232# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
233# include "PGMGstDefs.h"
234# include "PGMAllGst.h"
235# include "PGMAllBth.h"
236# undef BTH_PGMPOOLKIND_PT_FOR_BIG
237# undef BTH_PGMPOOLKIND_PT_FOR_PT
238# undef BTH_PGMPOOLKIND_ROOT
239# undef PGM_BTH_NAME
240# undef PGM_GST_TYPE
241# undef PGM_GST_NAME
242#endif /* VBOX_WITH_64_BITS_GUESTS */
243
244#undef PGM_SHW_TYPE
245#undef PGM_SHW_NAME
246
247
248/*
249 * Shadow - 32-bit nested paging mode.
250 */
251#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
252#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
253#include "PGMAllShw.h"
254
255/* Guest - real mode */
256#define PGM_GST_TYPE PGM_TYPE_REAL
257#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
258#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
259#include "PGMGstDefs.h"
260#include "PGMAllBth.h"
261#undef PGM_BTH_NAME
262#undef PGM_GST_TYPE
263#undef PGM_GST_NAME
264
265/* Guest - protected mode */
266#define PGM_GST_TYPE PGM_TYPE_PROT
267#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
268#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
269#include "PGMGstDefs.h"
270#include "PGMAllBth.h"
271#undef PGM_BTH_NAME
272#undef PGM_GST_TYPE
273#undef PGM_GST_NAME
274
275/* Guest - 32-bit mode */
276#define PGM_GST_TYPE PGM_TYPE_32BIT
277#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
278#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
279#include "PGMGstDefs.h"
280#include "PGMAllBth.h"
281#undef PGM_BTH_NAME
282#undef PGM_GST_TYPE
283#undef PGM_GST_NAME
284
285/* Guest - PAE mode */
286#define PGM_GST_TYPE PGM_TYPE_PAE
287#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
288#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
289#include "PGMGstDefs.h"
290#include "PGMAllBth.h"
291#undef PGM_BTH_NAME
292#undef PGM_GST_TYPE
293#undef PGM_GST_NAME
294
295#ifdef VBOX_WITH_64_BITS_GUESTS
296/* Guest - AMD64 mode */
297# define PGM_GST_TYPE PGM_TYPE_AMD64
298# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
299# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
300# include "PGMGstDefs.h"
301# include "PGMAllBth.h"
302# undef PGM_BTH_NAME
303# undef PGM_GST_TYPE
304# undef PGM_GST_NAME
305#endif /* VBOX_WITH_64_BITS_GUESTS */
306
307#undef PGM_SHW_TYPE
308#undef PGM_SHW_NAME
309
310
311/*
312 * Shadow - PAE nested paging mode.
313 */
314#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
315#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
316#include "PGMAllShw.h"
317
318/* Guest - real mode */
319#define PGM_GST_TYPE PGM_TYPE_REAL
320#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
321#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
322#include "PGMGstDefs.h"
323#include "PGMAllBth.h"
324#undef PGM_BTH_NAME
325#undef PGM_GST_TYPE
326#undef PGM_GST_NAME
327
328/* Guest - protected mode */
329#define PGM_GST_TYPE PGM_TYPE_PROT
330#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
331#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
332#include "PGMGstDefs.h"
333#include "PGMAllBth.h"
334#undef PGM_BTH_NAME
335#undef PGM_GST_TYPE
336#undef PGM_GST_NAME
337
338/* Guest - 32-bit mode */
339#define PGM_GST_TYPE PGM_TYPE_32BIT
340#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
341#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
342#include "PGMGstDefs.h"
343#include "PGMAllBth.h"
344#undef PGM_BTH_NAME
345#undef PGM_GST_TYPE
346#undef PGM_GST_NAME
347
348/* Guest - PAE mode */
349#define PGM_GST_TYPE PGM_TYPE_PAE
350#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
351#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
352#include "PGMGstDefs.h"
353#include "PGMAllBth.h"
354#undef PGM_BTH_NAME
355#undef PGM_GST_TYPE
356#undef PGM_GST_NAME
357
358#ifdef VBOX_WITH_64_BITS_GUESTS
359/* Guest - AMD64 mode */
360# define PGM_GST_TYPE PGM_TYPE_AMD64
361# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
363# include "PGMGstDefs.h"
364# include "PGMAllBth.h"
365# undef PGM_BTH_NAME
366# undef PGM_GST_TYPE
367# undef PGM_GST_NAME
368#endif /* VBOX_WITH_64_BITS_GUESTS */
369
370#undef PGM_SHW_TYPE
371#undef PGM_SHW_NAME
372
373
374/*
375 * Shadow - AMD64 nested paging mode.
376 */
377#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
378#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
379#include "PGMAllShw.h"
380
381/* Guest - real mode */
382#define PGM_GST_TYPE PGM_TYPE_REAL
383#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
384#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
385#include "PGMGstDefs.h"
386#include "PGMAllBth.h"
387#undef PGM_BTH_NAME
388#undef PGM_GST_TYPE
389#undef PGM_GST_NAME
390
391/* Guest - protected mode */
392#define PGM_GST_TYPE PGM_TYPE_PROT
393#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
394#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
395#include "PGMGstDefs.h"
396#include "PGMAllBth.h"
397#undef PGM_BTH_NAME
398#undef PGM_GST_TYPE
399#undef PGM_GST_NAME
400
401/* Guest - 32-bit mode */
402#define PGM_GST_TYPE PGM_TYPE_32BIT
403#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
404#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
405#include "PGMGstDefs.h"
406#include "PGMAllBth.h"
407#undef PGM_BTH_NAME
408#undef PGM_GST_TYPE
409#undef PGM_GST_NAME
410
411/* Guest - PAE mode */
412#define PGM_GST_TYPE PGM_TYPE_PAE
413#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
414#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
415#include "PGMGstDefs.h"
416#include "PGMAllBth.h"
417#undef PGM_BTH_NAME
418#undef PGM_GST_TYPE
419#undef PGM_GST_NAME
420
421#ifdef VBOX_WITH_64_BITS_GUESTS
422/* Guest - AMD64 mode */
423# define PGM_GST_TYPE PGM_TYPE_AMD64
424# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
425# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
426# include "PGMGstDefs.h"
427# include "PGMAllBth.h"
428# undef PGM_BTH_NAME
429# undef PGM_GST_TYPE
430# undef PGM_GST_NAME
431#endif /* VBOX_WITH_64_BITS_GUESTS */
432
433#undef PGM_SHW_TYPE
434#undef PGM_SHW_NAME
435
436
437/*
438 * Shadow - EPT.
439 */
440#define PGM_SHW_TYPE PGM_TYPE_EPT
441#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
442#include "PGMAllShw.h"
443
444/* Guest - real mode */
445#define PGM_GST_TYPE PGM_TYPE_REAL
446#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
447#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
448#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
449#include "PGMGstDefs.h"
450#include "PGMAllBth.h"
451#undef BTH_PGMPOOLKIND_PT_FOR_PT
452#undef PGM_BTH_NAME
453#undef PGM_GST_TYPE
454#undef PGM_GST_NAME
455
456/* Guest - protected mode */
457#define PGM_GST_TYPE PGM_TYPE_PROT
458#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
459#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
460#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
461#include "PGMGstDefs.h"
462#include "PGMAllBth.h"
463#undef BTH_PGMPOOLKIND_PT_FOR_PT
464#undef PGM_BTH_NAME
465#undef PGM_GST_TYPE
466#undef PGM_GST_NAME
467
468/* Guest - 32-bit mode */
469#define PGM_GST_TYPE PGM_TYPE_32BIT
470#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
471#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
472#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
473#include "PGMGstDefs.h"
474#include "PGMAllBth.h"
475#undef BTH_PGMPOOLKIND_PT_FOR_PT
476#undef PGM_BTH_NAME
477#undef PGM_GST_TYPE
478#undef PGM_GST_NAME
479
480/* Guest - PAE mode */
481#define PGM_GST_TYPE PGM_TYPE_PAE
482#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
483#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
484#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
485#include "PGMGstDefs.h"
486#include "PGMAllBth.h"
487#undef BTH_PGMPOOLKIND_PT_FOR_PT
488#undef PGM_BTH_NAME
489#undef PGM_GST_TYPE
490#undef PGM_GST_NAME
491
492#ifdef VBOX_WITH_64_BITS_GUESTS
493/* Guest - AMD64 mode */
494# define PGM_GST_TYPE PGM_TYPE_AMD64
495# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
496# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
497# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
498# include "PGMGstDefs.h"
499# include "PGMAllBth.h"
500# undef BTH_PGMPOOLKIND_PT_FOR_PT
501# undef PGM_BTH_NAME
502# undef PGM_GST_TYPE
503# undef PGM_GST_NAME
504#endif /* VBOX_WITH_64_BITS_GUESTS */
505
506#undef PGM_SHW_TYPE
507#undef PGM_SHW_NAME
508
509
510/*
511 * Shadow - NEM / None.
512 */
513#define PGM_SHW_TYPE PGM_TYPE_NONE
514#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
515#include "PGMAllShw.h"
516
517/* Guest - real mode */
518#define PGM_GST_TYPE PGM_TYPE_REAL
519#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
520#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
521#include "PGMGstDefs.h"
522#include "PGMAllBth.h"
523#undef PGM_BTH_NAME
524#undef PGM_GST_TYPE
525#undef PGM_GST_NAME
526
527/* Guest - protected mode */
528#define PGM_GST_TYPE PGM_TYPE_PROT
529#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
530#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
531#include "PGMGstDefs.h"
532#include "PGMAllBth.h"
533#undef PGM_BTH_NAME
534#undef PGM_GST_TYPE
535#undef PGM_GST_NAME
536
537/* Guest - 32-bit mode */
538#define PGM_GST_TYPE PGM_TYPE_32BIT
539#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
540#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
541#include "PGMGstDefs.h"
542#include "PGMAllBth.h"
543#undef PGM_BTH_NAME
544#undef PGM_GST_TYPE
545#undef PGM_GST_NAME
546
547/* Guest - PAE mode */
548#define PGM_GST_TYPE PGM_TYPE_PAE
549#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
550#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
551#include "PGMGstDefs.h"
552#include "PGMAllBth.h"
553#undef PGM_BTH_NAME
554#undef PGM_GST_TYPE
555#undef PGM_GST_NAME
556
557#ifdef VBOX_WITH_64_BITS_GUESTS
558/* Guest - AMD64 mode */
559# define PGM_GST_TYPE PGM_TYPE_AMD64
560# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
561# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
562# include "PGMGstDefs.h"
563# include "PGMAllBth.h"
564# undef PGM_BTH_NAME
565# undef PGM_GST_TYPE
566# undef PGM_GST_NAME
567#endif /* VBOX_WITH_64_BITS_GUESTS */
568
569#undef PGM_SHW_TYPE
570#undef PGM_SHW_NAME
571
572
573
574/**
575 * Guest mode data array.
576 */
577PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
578{
579 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
580 {
581 PGM_TYPE_REAL,
582 PGM_GST_NAME_REAL(GetPage),
583 PGM_GST_NAME_REAL(ModifyPage),
584 PGM_GST_NAME_REAL(Enter),
585 PGM_GST_NAME_REAL(Exit),
586#ifdef IN_RING3
587 PGM_GST_NAME_REAL(Relocate),
588#endif
589 },
590 {
591 PGM_TYPE_PROT,
592 PGM_GST_NAME_PROT(GetPage),
593 PGM_GST_NAME_PROT(ModifyPage),
594 PGM_GST_NAME_PROT(Enter),
595 PGM_GST_NAME_PROT(Exit),
596#ifdef IN_RING3
597 PGM_GST_NAME_PROT(Relocate),
598#endif
599 },
600 {
601 PGM_TYPE_32BIT,
602 PGM_GST_NAME_32BIT(GetPage),
603 PGM_GST_NAME_32BIT(ModifyPage),
604 PGM_GST_NAME_32BIT(Enter),
605 PGM_GST_NAME_32BIT(Exit),
606#ifdef IN_RING3
607 PGM_GST_NAME_32BIT(Relocate),
608#endif
609 },
610 {
611 PGM_TYPE_PAE,
612 PGM_GST_NAME_PAE(GetPage),
613 PGM_GST_NAME_PAE(ModifyPage),
614 PGM_GST_NAME_PAE(Enter),
615 PGM_GST_NAME_PAE(Exit),
616#ifdef IN_RING3
617 PGM_GST_NAME_PAE(Relocate),
618#endif
619 },
620#ifdef VBOX_WITH_64_BITS_GUESTS
621 {
622 PGM_TYPE_AMD64,
623 PGM_GST_NAME_AMD64(GetPage),
624 PGM_GST_NAME_AMD64(ModifyPage),
625 PGM_GST_NAME_AMD64(Enter),
626 PGM_GST_NAME_AMD64(Exit),
627# ifdef IN_RING3
628 PGM_GST_NAME_AMD64(Relocate),
629# endif
630 },
631#endif
632};
633
634
635/**
636 * The shadow mode data array.
637 */
638PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
639{
640 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
641 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
642 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
643 {
644 PGM_TYPE_32BIT,
645 PGM_SHW_NAME_32BIT(GetPage),
646 PGM_SHW_NAME_32BIT(ModifyPage),
647 PGM_SHW_NAME_32BIT(Enter),
648 PGM_SHW_NAME_32BIT(Exit),
649#ifdef IN_RING3
650 PGM_SHW_NAME_32BIT(Relocate),
651#endif
652 },
653 {
654 PGM_TYPE_PAE,
655 PGM_SHW_NAME_PAE(GetPage),
656 PGM_SHW_NAME_PAE(ModifyPage),
657 PGM_SHW_NAME_PAE(Enter),
658 PGM_SHW_NAME_PAE(Exit),
659#ifdef IN_RING3
660 PGM_SHW_NAME_PAE(Relocate),
661#endif
662 },
663 {
664 PGM_TYPE_AMD64,
665 PGM_SHW_NAME_AMD64(GetPage),
666 PGM_SHW_NAME_AMD64(ModifyPage),
667 PGM_SHW_NAME_AMD64(Enter),
668 PGM_SHW_NAME_AMD64(Exit),
669#ifdef IN_RING3
670 PGM_SHW_NAME_AMD64(Relocate),
671#endif
672 },
673 {
674 PGM_TYPE_NESTED_32BIT,
675 PGM_SHW_NAME_NESTED_32BIT(GetPage),
676 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
677 PGM_SHW_NAME_NESTED_32BIT(Enter),
678 PGM_SHW_NAME_NESTED_32BIT(Exit),
679#ifdef IN_RING3
680 PGM_SHW_NAME_NESTED_32BIT(Relocate),
681#endif
682 },
683 {
684 PGM_TYPE_NESTED_PAE,
685 PGM_SHW_NAME_NESTED_PAE(GetPage),
686 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
687 PGM_SHW_NAME_NESTED_PAE(Enter),
688 PGM_SHW_NAME_NESTED_PAE(Exit),
689#ifdef IN_RING3
690 PGM_SHW_NAME_NESTED_PAE(Relocate),
691#endif
692 },
693 {
694 PGM_TYPE_NESTED_AMD64,
695 PGM_SHW_NAME_NESTED_AMD64(GetPage),
696 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
697 PGM_SHW_NAME_NESTED_AMD64(Enter),
698 PGM_SHW_NAME_NESTED_AMD64(Exit),
699#ifdef IN_RING3
700 PGM_SHW_NAME_NESTED_AMD64(Relocate),
701#endif
702 },
703 {
704 PGM_TYPE_EPT,
705 PGM_SHW_NAME_EPT(GetPage),
706 PGM_SHW_NAME_EPT(ModifyPage),
707 PGM_SHW_NAME_EPT(Enter),
708 PGM_SHW_NAME_EPT(Exit),
709#ifdef IN_RING3
710 PGM_SHW_NAME_EPT(Relocate),
711#endif
712 },
713 {
714 PGM_TYPE_NONE,
715 PGM_SHW_NAME_NONE(GetPage),
716 PGM_SHW_NAME_NONE(ModifyPage),
717 PGM_SHW_NAME_NONE(Enter),
718 PGM_SHW_NAME_NONE(Exit),
719#ifdef IN_RING3
720 PGM_SHW_NAME_NONE(Relocate),
721#endif
722 },
723};
724
725
726/**
727 * The guest+shadow mode data array.
728 */
729PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
730{
731#if !defined(IN_RING3) && !defined(VBOX_STRICT)
732# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
733# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
734 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
735
736#elif !defined(IN_RING3) && defined(VBOX_STRICT)
737# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
738# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
739 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
740
741#elif defined(IN_RING3) && !defined(VBOX_STRICT)
742# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
743# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
744 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
745
746#elif defined(IN_RING3) && defined(VBOX_STRICT)
747# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
748# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
749 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
750
751#else
752# error "Misconfig."
753#endif
754
755 /* 32-bit shadow paging mode: */
756 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
757 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
758 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
760 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
761 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
767
768 /* PAE shadow paging mode: */
769 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
770 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
771 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
772 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
773 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
774 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
775 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
780
781 /* AMD64 shadow paging mode: */
782 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
783 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
784 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
785 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
786 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
787#ifdef VBOX_WITH_64_BITS_GUESTS
788 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
789#else
790 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
791#endif
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
797
798 /* 32-bit nested paging mode: */
799 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
800 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
801 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
802 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
804#ifdef VBOX_WITH_64_BITS_GUESTS
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
806#else
807 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
808#endif
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
814
815 /* PAE nested paging mode: */
816 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
817 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
821#ifdef VBOX_WITH_64_BITS_GUESTS
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
823#else
824 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
825#endif
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
831
832 /* AMD64 nested paging mode: */
833 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
834 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
838#ifdef VBOX_WITH_64_BITS_GUESTS
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
840#else
841 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
842#endif
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
848
849 /* EPT nested paging mode: */
850 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
851 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
855#ifdef VBOX_WITH_64_BITS_GUESTS
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
857#else
858 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
859#endif
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
865
866 /* NONE / NEM: */
867 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
868 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
872#ifdef VBOX_WITH_64_BITS_GUESTS
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
874#else
875 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
876#endif
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
882
883
884#undef PGMMODEDATABTH_ENTRY
885#undef PGMMODEDATABTH_NULL_ENTRY
886};
887
888
889#ifdef IN_RING0
890/**
891 * #PF Handler.
892 *
893 * @returns VBox status code (appropriate for trap handling and GC return).
894 * @param pVCpu The cross context virtual CPU structure.
895 * @param uErr The trap error code.
896 * @param pRegFrame Trap register frame.
897 * @param pvFault The fault address.
898 */
899VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
900{
901 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
902
903 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
904 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
905 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
906
907
908# ifdef VBOX_WITH_STATISTICS
909 /*
910 * Error code stats.
911 */
912 if (uErr & X86_TRAP_PF_US)
913 {
914 if (!(uErr & X86_TRAP_PF_P))
915 {
916 if (uErr & X86_TRAP_PF_RW)
917 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
918 else
919 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
920 }
921 else if (uErr & X86_TRAP_PF_RW)
922 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
923 else if (uErr & X86_TRAP_PF_RSVD)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
925 else if (uErr & X86_TRAP_PF_ID)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
927 else
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
929 }
930 else
931 { /* Supervisor */
932 if (!(uErr & X86_TRAP_PF_P))
933 {
934 if (uErr & X86_TRAP_PF_RW)
935 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
936 else
937 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
938 }
939 else if (uErr & X86_TRAP_PF_RW)
940 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
941 else if (uErr & X86_TRAP_PF_ID)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
943 else if (uErr & X86_TRAP_PF_RSVD)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
945 }
946# endif /* VBOX_WITH_STATISTICS */
947
948 /*
949 * Call the worker.
950 */
951 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
952 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
953 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
954 bool fLockTaken = false;
955 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
956 if (fLockTaken)
957 {
958 PGM_LOCK_ASSERT_OWNER(pVM);
959 PGM_UNLOCK(pVM);
960 }
961 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
962
963 /*
964 * Return code tweaks.
965 */
966 if (rc != VINF_SUCCESS)
967 {
968 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
969 rc = VINF_SUCCESS;
970
971 /* Note: hack alert for difficult to reproduce problem. */
972 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
973 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
974 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
975 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
976 {
977 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
978 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
979 rc = VINF_SUCCESS;
980 }
981 }
982
983 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
984 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
985 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
986 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
987 return rc;
988}
989#endif /* IN_RING0 */
990
991
992/**
993 * Prefetch a page
994 *
995 * Typically used to sync commonly used pages before entering raw mode
996 * after a CR3 reload.
997 *
998 * @returns VBox status code suitable for scheduling.
999 * @retval VINF_SUCCESS on success.
1000 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1001 * @param pVCpu The cross context virtual CPU structure.
1002 * @param GCPtrPage Page to invalidate.
1003 */
1004VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1005{
1006 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1007
1008 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1009 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1010 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1011 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1012
1013 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1014 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1015 return rc;
1016}
1017
1018
1019/**
1020 * Emulation of the invlpg instruction (HC only actually).
1021 *
1022 * @returns Strict VBox status code, special care required.
1023 * @retval VINF_PGM_SYNC_CR3 - handled.
1024 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1025 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1026 *
1027 * @param pVCpu The cross context virtual CPU structure.
1028 * @param GCPtrPage Page to invalidate.
1029 *
1030 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1031 * safe, but there could be edge cases!
1032 *
1033 * @todo Flush page or page directory only if necessary!
1034 * @todo VBOXSTRICTRC
1035 */
1036VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1037{
1038 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1039 int rc;
1040 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1041
1042 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1043
1044 /*
1045 * Call paging mode specific worker.
1046 */
1047 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1048 PGM_LOCK_VOID(pVM);
1049
1050 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1051 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1052 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1053 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1054
1055 PGM_UNLOCK(pVM);
1056 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1057
1058 /* Ignore all irrelevant error codes. */
1059 if ( rc == VERR_PAGE_NOT_PRESENT
1060 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1061 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1062 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1063 rc = VINF_SUCCESS;
1064
1065 return rc;
1066}
1067
1068
1069/**
1070 * Executes an instruction using the interpreter.
1071 *
1072 * @returns VBox status code (appropriate for trap handling and GC return).
1073 * @param pVM The cross context VM structure.
1074 * @param pVCpu The cross context virtual CPU structure.
1075 * @param pRegFrame Register frame.
1076 * @param pvFault Fault address.
1077 */
1078VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1079{
1080 NOREF(pVM);
1081 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1082 if (rc == VERR_EM_INTERPRETER)
1083 rc = VINF_EM_RAW_EMULATE_INSTR;
1084 if (rc != VINF_SUCCESS)
1085 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1086 return rc;
1087}
1088
1089
1090/**
1091 * Gets effective page information (from the VMM page directory).
1092 *
1093 * @returns VBox status code.
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param GCPtr Guest Context virtual address of the page.
1096 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1097 * @param pHCPhys Where to store the HC physical address of the page.
1098 * This is page aligned.
1099 * @remark You should use PGMMapGetPage() for pages in a mapping.
1100 */
1101VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1102{
1103 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1104 PGM_LOCK_VOID(pVM);
1105
1106 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1107 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1108 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1109 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1110
1111 PGM_UNLOCK(pVM);
1112 return rc;
1113}
1114
1115
1116/**
1117 * Modify page flags for a range of pages in the shadow context.
1118 *
1119 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1120 *
1121 * @returns VBox status code.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param GCPtr Virtual address of the first page in the range.
1124 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1125 * @param fMask The AND mask - page flags X86_PTE_*.
1126 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1127 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1128 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1129 */
1130DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1131{
1132 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1133 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1134
1135 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1136
1137 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1138 PGM_LOCK_VOID(pVM);
1139
1140 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1141 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1142 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1143 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1144
1145 PGM_UNLOCK(pVM);
1146 return rc;
1147}
1148
1149
1150/**
1151 * Changing the page flags for a single page in the shadow page tables so as to
1152 * make it read-only.
1153 *
1154 * @returns VBox status code.
1155 * @param pVCpu The cross context virtual CPU structure.
1156 * @param GCPtr Virtual address of the first page in the range.
1157 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1158 */
1159VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1160{
1161 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1162}
1163
1164
1165/**
1166 * Changing the page flags for a single page in the shadow page tables so as to
1167 * make it writable.
1168 *
1169 * The call must know with 101% certainty that the guest page tables maps this
1170 * as writable too. This function will deal shared, zero and write monitored
1171 * pages.
1172 *
1173 * @returns VBox status code.
1174 * @param pVCpu The cross context virtual CPU structure.
1175 * @param GCPtr Virtual address of the first page in the range.
1176 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1177 */
1178VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1179{
1180 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1181 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1182 return VINF_SUCCESS;
1183}
1184
1185
1186/**
1187 * Changing the page flags for a single page in the shadow page tables so as to
1188 * make it not present.
1189 *
1190 * @returns VBox status code.
1191 * @param pVCpu The cross context virtual CPU structure.
1192 * @param GCPtr Virtual address of the first page in the range.
1193 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1194 */
1195VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1196{
1197 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1198}
1199
1200
1201/**
1202 * Changing the page flags for a single page in the shadow page tables so as to
1203 * make it supervisor and writable.
1204 *
1205 * This if for dealing with CR0.WP=0 and readonly user pages.
1206 *
1207 * @returns VBox status code.
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param GCPtr Virtual address of the first page in the range.
1210 * @param fBigPage Whether or not this is a big page. If it is, we have to
1211 * change the shadow PDE as well. If it isn't, the caller
1212 * has checked that the shadow PDE doesn't need changing.
1213 * We ASSUME 4KB pages backing the big page here!
1214 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1215 */
1216int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1217{
1218 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1219 if (rc == VINF_SUCCESS && fBigPage)
1220 {
1221 /* this is a bit ugly... */
1222 switch (pVCpu->pgm.s.enmShadowMode)
1223 {
1224 case PGMMODE_32_BIT:
1225 {
1226 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1227 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1228 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1229 pPde->u |= X86_PDE_RW;
1230 Log(("-> PDE=%#llx (32)\n", pPde->u));
1231 break;
1232 }
1233 case PGMMODE_PAE:
1234 case PGMMODE_PAE_NX:
1235 {
1236 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1237 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1238 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1239 pPde->u |= X86_PDE_RW;
1240 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1241 break;
1242 }
1243 default:
1244 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1245 }
1246 }
1247 return rc;
1248}
1249
1250
1251/**
1252 * Gets the shadow page directory for the specified address, PAE.
1253 *
1254 * @returns Pointer to the shadow PD.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr The address.
1257 * @param uGstPdpe Guest PDPT entry. Valid.
1258 * @param ppPD Receives address of page directory
1259 */
1260int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1261{
1262 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1263 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1264 PPGMPOOLPAGE pShwPage;
1265 int rc;
1266 PGM_LOCK_ASSERT_OWNER(pVM);
1267
1268
1269 /* Allocate page directory if not present. */
1270 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1271 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1272 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1273 X86PGPAEUINT const uPdpe = pPdpe->u;
1274 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1275 {
1276 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1277 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1278 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1279
1280 pgmPoolCacheUsed(pPool, pShwPage);
1281
1282 /* Update the entry if necessary. */
1283 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1284 if (uPdpeNew == uPdpe)
1285 { /* likely */ }
1286 else
1287 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1288 }
1289 else
1290 {
1291 RTGCPTR64 GCPdPt;
1292 PGMPOOLKIND enmKind;
1293 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1294 {
1295 /* AMD-V nested paging or real/protected mode without paging. */
1296 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1297 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1298 }
1299 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1300 {
1301 if (uGstPdpe & X86_PDPE_P)
1302 {
1303 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1304 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1305 }
1306 else
1307 {
1308 /* PD not present; guest must reload CR3 to change it.
1309 * No need to monitor anything in this case. */
1310 /** @todo r=bird: WTF is hit?!? */
1311 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1312 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1313 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1314 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1315 }
1316 }
1317 else
1318 {
1319 GCPdPt = CPUMGetGuestCR3(pVCpu);
1320 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1321 }
1322
1323 /* Create a reference back to the PDPT by using the index in its shadow page. */
1324 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1325 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1326 &pShwPage);
1327 AssertRCReturn(rc, rc);
1328
1329 /* Hook it up. */
1330 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1331 }
1332 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1333
1334 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1335 return VINF_SUCCESS;
1336}
1337
1338
1339/**
1340 * Gets the pointer to the shadow page directory entry for an address, PAE.
1341 *
1342 * @returns Pointer to the PDE.
1343 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1344 * @param GCPtr The address.
1345 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1346 */
1347DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1348{
1349 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1350 PGM_LOCK_ASSERT_OWNER(pVM);
1351
1352 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1353 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1354 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1355 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1356 if (!(uPdpe & X86_PDPE_P))
1357 {
1358 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1359 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1360 }
1361 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1362
1363 /* Fetch the pgm pool shadow descriptor. */
1364 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1365 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1366
1367 *ppShwPde = pShwPde;
1368 return VINF_SUCCESS;
1369}
1370
1371
1372/**
1373 * Syncs the SHADOW page directory pointer for the specified address.
1374 *
1375 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1376 *
1377 * The caller is responsible for making sure the guest has a valid PD before
1378 * calling this function.
1379 *
1380 * @returns VBox status code.
1381 * @param pVCpu The cross context virtual CPU structure.
1382 * @param GCPtr The address.
1383 * @param uGstPml4e Guest PML4 entry (valid).
1384 * @param uGstPdpe Guest PDPT entry (valid).
1385 * @param ppPD Receives address of page directory
1386 */
1387static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1388{
1389 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1390 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1391 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1392 int rc;
1393
1394 PGM_LOCK_ASSERT_OWNER(pVM);
1395
1396 /*
1397 * PML4.
1398 */
1399 PPGMPOOLPAGE pShwPage;
1400 {
1401 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1402 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1403 X86PGPAEUINT const uPml4e = pPml4e->u;
1404
1405 /* Allocate page directory pointer table if not present. */
1406 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1407 {
1408 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1409 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1410
1411 pgmPoolCacheUsed(pPool, pShwPage);
1412
1413 /* Update the entry if needed. */
1414 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1415 | (uPml4e & PGM_PML4_FLAGS);
1416 if (uPml4e == uPml4eNew)
1417 { /* likely */ }
1418 else
1419 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1420 }
1421 else
1422 {
1423 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1424
1425 RTGCPTR64 GCPml4;
1426 PGMPOOLKIND enmKind;
1427 if (fNestedPagingOrNoGstPaging)
1428 {
1429 /* AMD-V nested paging or real/protected mode without paging */
1430 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1431 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1432 }
1433 else
1434 {
1435 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1436 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1437 }
1438
1439 /* Create a reference back to the PDPT by using the index in its shadow page. */
1440 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1441 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1442 &pShwPage);
1443 AssertRCReturn(rc, rc);
1444
1445 /* Hook it up. */
1446 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1447 | (uPml4e & PGM_PML4_FLAGS));
1448 }
1449 }
1450
1451 /*
1452 * PDPT.
1453 */
1454 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1455 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1456 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1457 X86PGPAEUINT const uPdpe = pPdpe->u;
1458
1459 /* Allocate page directory if not present. */
1460 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1461 {
1462 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1463 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1464
1465 pgmPoolCacheUsed(pPool, pShwPage);
1466
1467 /* Update the entry if needed. */
1468 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1469 | (uPdpe & PGM_PDPT_FLAGS);
1470 if (uPdpe == uPdpeNew)
1471 { /* likely */ }
1472 else
1473 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1474 }
1475 else
1476 {
1477 RTGCPTR64 GCPdPt;
1478 PGMPOOLKIND enmKind;
1479 if (fNestedPagingOrNoGstPaging)
1480 {
1481 /* AMD-V nested paging or real/protected mode without paging */
1482 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1483 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1484 }
1485 else
1486 {
1487 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1488 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1489 }
1490
1491 /* Create a reference back to the PDPT by using the index in its shadow page. */
1492 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1493 pShwPage->idx, iPdPt, false /*fLockPage*/,
1494 &pShwPage);
1495 AssertRCReturn(rc, rc);
1496
1497 /* Hook it up. */
1498 ASMAtomicWriteU64(&pPdpe->u,
1499 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1500 }
1501
1502 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1503 return VINF_SUCCESS;
1504}
1505
1506
1507/**
1508 * Gets the SHADOW page directory pointer for the specified address (long mode).
1509 *
1510 * @returns VBox status code.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 * @param GCPtr The address.
1513 * @param ppPml4e Receives the address of the page map level 4 entry.
1514 * @param ppPdpt Receives the address of the page directory pointer table.
1515 * @param ppPD Receives the address of the page directory.
1516 */
1517DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1518{
1519 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1520 PGM_LOCK_ASSERT_OWNER(pVM);
1521
1522 /*
1523 * PML4
1524 */
1525 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1526 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1527 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1528 if (ppPml4e)
1529 *ppPml4e = (PX86PML4E)pPml4e;
1530 X86PGPAEUINT const uPml4e = pPml4e->u;
1531 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1532 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1533 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1534
1535 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1536 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1537 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1538
1539 /*
1540 * PDPT
1541 */
1542 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1543 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1544 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1545 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1546 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1547
1548 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1549 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1550
1551 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1552 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1553 return VINF_SUCCESS;
1554}
1555
1556
1557/**
1558 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1559 * backing pages in case the PDPT or PML4 entry is missing.
1560 *
1561 * @returns VBox status code.
1562 * @param pVCpu The cross context virtual CPU structure.
1563 * @param GCPtr The address.
1564 * @param ppPdpt Receives address of pdpt
1565 * @param ppPD Receives address of page directory
1566 */
1567static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1568{
1569 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1570 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1571 int rc;
1572
1573 Assert(pVM->pgm.s.fNestedPaging);
1574 PGM_LOCK_ASSERT_OWNER(pVM);
1575
1576 /*
1577 * PML4 level.
1578 */
1579 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1580 Assert(pPml4);
1581
1582 /* Allocate page directory pointer table if not present. */
1583 PPGMPOOLPAGE pShwPage;
1584 {
1585 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1586 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1587 EPTPML4E Pml4e;
1588 Pml4e.u = pPml4e->u;
1589 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1590 {
1591 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1592 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1593 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1594 &pShwPage);
1595 AssertRCReturn(rc, rc);
1596
1597 /* Hook up the new PDPT now. */
1598 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1599 }
1600 else
1601 {
1602 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1603 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1604
1605 pgmPoolCacheUsed(pPool, pShwPage);
1606
1607 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1608 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1609 { }
1610 else
1611 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1612 }
1613 }
1614
1615 /*
1616 * PDPT level.
1617 */
1618 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1619 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1620 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1621
1622 if (ppPdpt)
1623 *ppPdpt = pPdpt;
1624
1625 /* Allocate page directory if not present. */
1626 EPTPDPTE Pdpe;
1627 Pdpe.u = pPdpe->u;
1628 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1629 {
1630 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1631 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1632 pShwPage->idx, iPdPt, false /*fLockPage*/,
1633 &pShwPage);
1634 AssertRCReturn(rc, rc);
1635
1636 /* Hook up the new PD now. */
1637 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1638 }
1639 else
1640 {
1641 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1642 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1643
1644 pgmPoolCacheUsed(pPool, pShwPage);
1645
1646 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1647 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1648 { }
1649 else
1650 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1651 }
1652
1653 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1654 return VINF_SUCCESS;
1655}
1656
1657
1658#ifdef IN_RING0
1659/**
1660 * Synchronizes a range of nested page table entries.
1661 *
1662 * The caller must own the PGM lock.
1663 *
1664 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1665 * @param GCPhys Where to start.
1666 * @param cPages How many pages which entries should be synced.
1667 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1668 * host paging mode for AMD-V).
1669 */
1670int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1671{
1672 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1673
1674/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1675 int rc;
1676 switch (enmShwPagingMode)
1677 {
1678 case PGMMODE_32_BIT:
1679 {
1680 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1681 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1682 break;
1683 }
1684
1685 case PGMMODE_PAE:
1686 case PGMMODE_PAE_NX:
1687 {
1688 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1689 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1690 break;
1691 }
1692
1693 case PGMMODE_AMD64:
1694 case PGMMODE_AMD64_NX:
1695 {
1696 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1697 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1698 break;
1699 }
1700
1701 case PGMMODE_EPT:
1702 {
1703 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1704 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1705 break;
1706 }
1707
1708 default:
1709 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1710 }
1711 return rc;
1712}
1713#endif /* IN_RING0 */
1714
1715
1716/**
1717 * Gets effective Guest OS page information.
1718 *
1719 * When GCPtr is in a big page, the function will return as if it was a normal
1720 * 4KB page. If the need for distinguishing between big and normal page becomes
1721 * necessary at a later point, a PGMGstGetPage() will be created for that
1722 * purpose.
1723 *
1724 * @returns VBox status code.
1725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1726 * @param GCPtr Guest Context virtual address of the page.
1727 * @param pWalk Where to store the page walk information.
1728 */
1729VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1730{
1731 VMCPU_ASSERT_EMT(pVCpu);
1732 Assert(pWalk);
1733 RT_BZERO(pWalk, sizeof(*pWalk));
1734 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1735 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1736 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1737 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1738}
1739
1740
1741/**
1742 * Performs a guest page table walk.
1743 *
1744 * The guest should be in paged protect mode or long mode when making a call to
1745 * this function.
1746 *
1747 * @returns VBox status code.
1748 * @retval VINF_SUCCESS on success.
1749 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1750 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1751 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1752 *
1753 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1754 * @param GCPtr The guest virtual address to walk by.
1755 * @param pWalk Where to return the walk result. This is valid for some
1756 * error codes as well.
1757 * @param pGstWalk The guest mode specific page walk information.
1758 */
1759int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1760{
1761 VMCPU_ASSERT_EMT(pVCpu);
1762 switch (pVCpu->pgm.s.enmGuestMode)
1763 {
1764 case PGMMODE_32_BIT:
1765 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1766 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1767
1768 case PGMMODE_PAE:
1769 case PGMMODE_PAE_NX:
1770 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1771 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1772
1773 case PGMMODE_AMD64:
1774 case PGMMODE_AMD64_NX:
1775 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1776 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1777
1778 case PGMMODE_REAL:
1779 case PGMMODE_PROTECTED:
1780 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1781 return VERR_PGM_NOT_USED_IN_MODE;
1782
1783 case PGMMODE_EPT:
1784 case PGMMODE_NESTED_32BIT:
1785 case PGMMODE_NESTED_PAE:
1786 case PGMMODE_NESTED_AMD64:
1787 default:
1788 AssertFailed();
1789 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1790 return VERR_PGM_NOT_USED_IN_MODE;
1791 }
1792}
1793
1794
1795#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1796/**
1797 * Performs a guest second-level address translation (SLAT).
1798 *
1799 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1800 * function.
1801 *
1802 * @returns VBox status code.
1803 * @retval VINF_SUCCESS on success.
1804 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1805 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1806 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1807 *
1808 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1809 * @param GCPhysNested The nested-guest physical address being translated
1810 * (input).
1811 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1812 * valid. This indicates the SLAT is caused when
1813 * translating a nested-guest linear address.
1814 * @param GCPtrNested The nested-guest virtual address that initiated the
1815 * SLAT. If none, pass NIL_RTGCPTR.
1816 * @param pWalk Where to return the walk result. This is valid for
1817 * some error codes as well.
1818 * @param pGstWalk The second-level paging-mode specific walk
1819 * information.
1820 */
1821static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1822 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1823{
1824 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1825 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1826 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1827 switch (pVCpu->pgm.s.enmGuestSlatMode)
1828 {
1829 case PGMSLAT_EPT:
1830 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1831 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1832
1833 default:
1834 AssertFailed();
1835 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1836 return VERR_PGM_NOT_USED_IN_MODE;
1837 }
1838}
1839
1840
1841/**
1842 * Performs a guest second-level address translation (SLAT) for a nested-guest
1843 * physical address.
1844 *
1845 * This version requires the SLAT mode to be provided by the caller because we could
1846 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1847 * register values.
1848 *
1849 * @returns VBox status code.
1850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1851 * @param enmSlatMode The second-level paging mode to use.
1852 * @param GCPhysNested The nested-guest physical address to translate.
1853 * @param pWalk Where to store the walk result.
1854 * @param pGstWalk Where to store the second-level paging-mode specific
1855 * walk information.
1856 */
1857static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1858 PPGMPTWALKGST pGstWalk)
1859{
1860 switch (enmSlatMode)
1861 {
1862 case PGMSLAT_EPT:
1863 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1864 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1865 &pGstWalk->u.Ept);
1866
1867 default:
1868 AssertFailed();
1869 return VERR_PGM_NOT_USED_IN_MODE;
1870 }
1871}
1872#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1873
1874
1875/**
1876 * Tries to continue the previous walk.
1877 *
1878 * @note Requires the caller to hold the PGM lock from the first
1879 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1880 * we cannot use the pointers.
1881 *
1882 * @returns VBox status code.
1883 * @retval VINF_SUCCESS on success.
1884 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1885 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1886 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1887 *
1888 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1889 * @param GCPtr The guest virtual address to walk by.
1890 * @param pWalk Pointer to the previous walk result and where to return
1891 * the result of this walk. This is valid for some error
1892 * codes as well.
1893 * @param pGstWalk The guest-mode specific walk information.
1894 */
1895int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1896{
1897 /*
1898 * We can only handle successfully walks.
1899 * We also limit ourselves to the next page.
1900 */
1901 if ( pWalk->fSucceeded
1902 && GCPtr - pWalk->GCPtr == PAGE_SIZE)
1903 {
1904 Assert(pWalk->uLevel == 0);
1905 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1906 {
1907 /*
1908 * AMD64
1909 */
1910 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1911 {
1912 /*
1913 * We fall back to full walk if the PDE table changes, if any
1914 * reserved bits are set, or if the effective page access changes.
1915 */
1916 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1917 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1918 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1919 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1920
1921 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1922 {
1923 if (pGstWalk->u.Amd64.pPte)
1924 {
1925 X86PTEPAE Pte;
1926 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1927 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1928 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1929 {
1930 pWalk->GCPtr = GCPtr;
1931 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1932 pGstWalk->u.Amd64.Pte.u = Pte.u;
1933 pGstWalk->u.Amd64.pPte++;
1934 return VINF_SUCCESS;
1935 }
1936 }
1937 }
1938 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1939 {
1940 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1941 if (pGstWalk->u.Amd64.pPde)
1942 {
1943 X86PDEPAE Pde;
1944 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1945 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1946 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1947 {
1948 /* Get the new PTE and check out the first entry. */
1949 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1950 &pGstWalk->u.Amd64.pPt);
1951 if (RT_SUCCESS(rc))
1952 {
1953 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1954 X86PTEPAE Pte;
1955 Pte.u = pGstWalk->u.Amd64.pPte->u;
1956 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1957 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1958 {
1959 pWalk->GCPtr = GCPtr;
1960 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1961 pGstWalk->u.Amd64.Pte.u = Pte.u;
1962 pGstWalk->u.Amd64.Pde.u = Pde.u;
1963 pGstWalk->u.Amd64.pPde++;
1964 return VINF_SUCCESS;
1965 }
1966 }
1967 }
1968 }
1969 }
1970 }
1971 else if (!pWalk->fGigantPage)
1972 {
1973 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
1974 {
1975 pWalk->GCPtr = GCPtr;
1976 pWalk->GCPhys += PAGE_SIZE;
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 else
1981 {
1982 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
1983 {
1984 pWalk->GCPtr = GCPtr;
1985 pWalk->GCPhys += PAGE_SIZE;
1986 return VINF_SUCCESS;
1987 }
1988 }
1989 }
1990 }
1991 /* Case we don't handle. Do full walk. */
1992 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
1993}
1994
1995
1996/**
1997 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1998 *
1999 * @returns VBox status code.
2000 * @param pVCpu The cross context virtual CPU structure.
2001 * @param GCPtr The address of the first page.
2002 * @param cb The size of the range in bytes.
2003 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2004 */
2005VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2006{
2007 VMCPU_ASSERT_EMT(pVCpu);
2008 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2009}
2010
2011
2012/**
2013 * Modify page flags for a range of pages in the guest's tables
2014 *
2015 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2016 *
2017 * @returns VBox status code.
2018 * @param pVCpu The cross context virtual CPU structure.
2019 * @param GCPtr Virtual address of the first page in the range.
2020 * @param cb Size (in bytes) of the range to apply the modification to.
2021 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2022 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2023 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2024 */
2025VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2026{
2027 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2028 VMCPU_ASSERT_EMT(pVCpu);
2029
2030 /*
2031 * Validate input.
2032 */
2033 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2034 Assert(cb);
2035
2036 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2037
2038 /*
2039 * Adjust input.
2040 */
2041 cb += GCPtr & PAGE_OFFSET_MASK;
2042 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2043 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2044
2045 /*
2046 * Call worker.
2047 */
2048 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2049 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2050 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2051 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2052
2053 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2054 return rc;
2055}
2056
2057
2058/**
2059 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2060 *
2061 * @returns @c true if the PDPE is valid, @c false otherwise.
2062 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2063 * @param paPaePdpes The PAE PDPEs to validate.
2064 *
2065 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2066 */
2067VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2068{
2069 Assert(paPaePdpes);
2070 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2071 {
2072 X86PDPE const PaePdpe = paPaePdpes[i];
2073 if ( !(PaePdpe.u & X86_PDPE_P)
2074 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2075 { /* likely */ }
2076 else
2077 return false;
2078 }
2079 return true;
2080}
2081
2082
2083/**
2084 * Performs the lazy mapping of the 32-bit guest PD.
2085 *
2086 * @returns VBox status code.
2087 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2088 * @param ppPd Where to return the pointer to the mapping. This is
2089 * always set.
2090 */
2091int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2092{
2093 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2094 PGM_LOCK_VOID(pVM);
2095
2096 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2097
2098 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2099 PPGMPAGE pPage;
2100 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2101 * guest-physical address here. */
2102 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2103 if (RT_SUCCESS(rc))
2104 {
2105 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2106 if (RT_SUCCESS(rc))
2107 {
2108# ifdef IN_RING3
2109 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2110 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2111# else
2112 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2113 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2114# endif
2115 PGM_UNLOCK(pVM);
2116 return VINF_SUCCESS;
2117 }
2118 AssertRC(rc);
2119 }
2120 PGM_UNLOCK(pVM);
2121
2122 *ppPd = NULL;
2123 return rc;
2124}
2125
2126
2127/**
2128 * Performs the lazy mapping of the PAE guest PDPT.
2129 *
2130 * @returns VBox status code.
2131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2132 * @param ppPdpt Where to return the pointer to the mapping. This is
2133 * always set.
2134 */
2135int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2136{
2137 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2138 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2139 PGM_LOCK_VOID(pVM);
2140
2141 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2142 PPGMPAGE pPage;
2143 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2144 * guest-physical address here. */
2145 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2146 if (RT_SUCCESS(rc))
2147 {
2148 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2149 if (RT_SUCCESS(rc))
2150 {
2151# ifdef IN_RING3
2152 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2153 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2154# else
2155 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2156 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2157# endif
2158 PGM_UNLOCK(pVM);
2159 return VINF_SUCCESS;
2160 }
2161 AssertRC(rc);
2162 }
2163
2164 PGM_UNLOCK(pVM);
2165 *ppPdpt = NULL;
2166 return rc;
2167}
2168
2169
2170/**
2171 * Performs the lazy mapping / updating of a PAE guest PD.
2172 *
2173 * @returns Pointer to the mapping.
2174 * @returns VBox status code.
2175 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2176 * @param iPdpt Which PD entry to map (0..3).
2177 * @param ppPd Where to return the pointer to the mapping. This is
2178 * always set.
2179 */
2180int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2181{
2182 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2183 PGM_LOCK_VOID(pVM);
2184
2185 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2186 Assert(pGuestPDPT);
2187 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2188 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2189 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2190
2191 PPGMPAGE pPage;
2192 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2193 if (RT_SUCCESS(rc))
2194 {
2195 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2196 AssertRC(rc);
2197 if (RT_SUCCESS(rc))
2198 {
2199# ifdef IN_RING3
2200 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2201 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2202# else
2203 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2204 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2205# endif
2206 if (fChanged)
2207 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2208 PGM_UNLOCK(pVM);
2209 return VINF_SUCCESS;
2210 }
2211 }
2212
2213 /* Invalid page or some failure, invalidate the entry. */
2214 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2215 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2216 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2217
2218 PGM_UNLOCK(pVM);
2219 return rc;
2220}
2221
2222
2223/**
2224 * Performs the lazy mapping of the 32-bit guest PD.
2225 *
2226 * @returns VBox status code.
2227 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2228 * @param ppPml4 Where to return the pointer to the mapping. This will
2229 * always be set.
2230 */
2231int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2232{
2233 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2234 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2235 PGM_LOCK_VOID(pVM);
2236
2237 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2238 PPGMPAGE pPage;
2239 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2240 * guest-physical address here. */
2241 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2242 if (RT_SUCCESS(rc))
2243 {
2244 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2245 if (RT_SUCCESS(rc))
2246 {
2247# ifdef IN_RING3
2248 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2249 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2250# else
2251 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2252 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2253# endif
2254 PGM_UNLOCK(pVM);
2255 return VINF_SUCCESS;
2256 }
2257 }
2258
2259 PGM_UNLOCK(pVM);
2260 *ppPml4 = NULL;
2261 return rc;
2262}
2263
2264
2265#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2266 /**
2267 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2268 *
2269 * @returns VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2271 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2272 * always be set.
2273 */
2274int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2275{
2276 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2277 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2278 PGM_LOCK_VOID(pVM);
2279
2280 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2281 PPGMPAGE pPage;
2282 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2283 if (RT_SUCCESS(rc))
2284 {
2285 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2286 if (RT_SUCCESS(rc))
2287 {
2288# ifdef IN_RING3
2289 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2290 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2291# else
2292 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2293 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2294# endif
2295 PGM_UNLOCK(pVM);
2296 return VINF_SUCCESS;
2297 }
2298 }
2299
2300 PGM_UNLOCK(pVM);
2301 *ppEptPml4 = NULL;
2302 return rc;
2303}
2304#endif
2305
2306
2307/**
2308 * Gets the current CR3 register value for the shadow memory context.
2309 * @returns CR3 value.
2310 * @param pVCpu The cross context virtual CPU structure.
2311 */
2312VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2313{
2314 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2315 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2316 return pPoolPage->Core.Key;
2317}
2318
2319
2320/**
2321 * Forces lazy remapping of the guest's PAE page-directory structures.
2322 *
2323 * @param pVCpu The cross context virtual CPU structure.
2324 */
2325static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2326{
2327 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2328 {
2329 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2330 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2331 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2332 }
2333}
2334
2335
2336/**
2337 * Gets the PGM CR3 value masked according to the current guest mode.
2338 *
2339 * @returns The masked PGM CR3 value.
2340 * @param pVCpu The cross context virtual CPU structure.
2341 * @param uCr3 The raw guest CR3 value.
2342 */
2343DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2344{
2345 RTGCPHYS GCPhysCR3;
2346 switch (pVCpu->pgm.s.enmGuestMode)
2347 {
2348 case PGMMODE_PAE:
2349 case PGMMODE_PAE_NX:
2350 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK);
2351 break;
2352 case PGMMODE_AMD64:
2353 case PGMMODE_AMD64_NX:
2354 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
2355 break;
2356#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2357 case PGMMODE_EPT:
2358 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_EPT_PAGE_MASK);
2359 break;
2360#endif
2361 default:
2362 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
2363 break;
2364 }
2365 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2366 return GCPhysCR3;
2367}
2368
2369
2370/**
2371 * Performs and schedules necessary updates following a CR3 load or reload.
2372 *
2373 * This will normally involve mapping the guest PD or nPDPT
2374 *
2375 * @returns VBox status code.
2376 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2377 * safely be ignored and overridden since the FF will be set too then.
2378 * @param pVCpu The cross context virtual CPU structure.
2379 * @param cr3 The new cr3.
2380 * @param fGlobal Indicates whether this is a global flush or not.
2381 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2382 */
2383VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
2384{
2385 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2386 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2387
2388 VMCPU_ASSERT_EMT(pVCpu);
2389
2390 /*
2391 * Always flag the necessary updates; necessary for hardware acceleration
2392 */
2393 /** @todo optimize this, it shouldn't always be necessary. */
2394 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2395 if (fGlobal)
2396 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2397 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2398
2399 /*
2400 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2401 */
2402 int rc = VINF_SUCCESS;
2403 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2404 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2405 if (GCPhysOldCR3 != GCPhysCR3)
2406 {
2407 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2408 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2409 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2410
2411 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2412 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2413 if (RT_LIKELY(rc == VINF_SUCCESS))
2414 { }
2415 else
2416 {
2417 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2418 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2419 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2420 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2421 }
2422
2423 if (fGlobal)
2424 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2425 else
2426 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2427 }
2428 else
2429 {
2430#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2431 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2432 if (pPool->cDirtyPages)
2433 {
2434 PGM_LOCK_VOID(pVM);
2435 pgmPoolResetDirtyPages(pVM);
2436 PGM_UNLOCK(pVM);
2437 }
2438#endif
2439 if (fGlobal)
2440 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2441 else
2442 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2443
2444 /*
2445 * Flush PAE PDPTEs.
2446 */
2447 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2448 pgmGstFlushPaePdpes(pVCpu);
2449 }
2450
2451 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2452 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2453 return rc;
2454}
2455
2456
2457/**
2458 * Performs and schedules necessary updates following a CR3 load or reload when
2459 * using nested or extended paging.
2460 *
2461 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2462 * TLB and triggering a SyncCR3.
2463 *
2464 * This will normally involve mapping the guest PD or nPDPT
2465 *
2466 * @returns VBox status code.
2467 * @retval VINF_SUCCESS.
2468 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2469 * paging modes). This can safely be ignored and overridden since the
2470 * FF will be set too then.
2471 * @param pVCpu The cross context virtual CPU structure.
2472 * @param cr3 The new CR3.
2473 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2474 */
2475VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
2476{
2477 VMCPU_ASSERT_EMT(pVCpu);
2478 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2479
2480 /* We assume we're only called in nested paging mode. */
2481 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2482
2483 /*
2484 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2485 */
2486 int rc = VINF_SUCCESS;
2487 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2488 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2489 {
2490 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2491 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2492 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2493
2494 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2495 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2496
2497 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2498 }
2499 /*
2500 * Flush PAE PDPTEs.
2501 */
2502 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2503 pgmGstFlushPaePdpes(pVCpu);
2504
2505 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2506 return rc;
2507}
2508
2509
2510/**
2511 * Synchronize the paging structures.
2512 *
2513 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2514 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2515 * in several places, most importantly whenever the CR3 is loaded.
2516 *
2517 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2518 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2519 * the VMM into guest context.
2520 * @param pVCpu The cross context virtual CPU structure.
2521 * @param cr0 Guest context CR0 register
2522 * @param cr3 Guest context CR3 register
2523 * @param cr4 Guest context CR4 register
2524 * @param fGlobal Including global page directories or not
2525 */
2526VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2527{
2528 int rc;
2529
2530 VMCPU_ASSERT_EMT(pVCpu);
2531
2532 /*
2533 * The pool may have pending stuff and even require a return to ring-3 to
2534 * clear the whole thing.
2535 */
2536 rc = pgmPoolSyncCR3(pVCpu);
2537 if (rc != VINF_SUCCESS)
2538 return rc;
2539
2540 /*
2541 * We might be called when we shouldn't.
2542 *
2543 * The mode switching will ensure that the PD is resynced after every mode
2544 * switch. So, if we find ourselves here when in protected or real mode
2545 * we can safely clear the FF and return immediately.
2546 */
2547 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2548 {
2549 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2550 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2551 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2552 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2553 return VINF_SUCCESS;
2554 }
2555
2556 /* If global pages are not supported, then all flushes are global. */
2557 if (!(cr4 & X86_CR4_PGE))
2558 fGlobal = true;
2559 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2560 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2561
2562 /*
2563 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2564 * This should be done before SyncCR3.
2565 */
2566 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2567 {
2568 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2569
2570 RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2571 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2572 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2573 {
2574 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2575 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2576 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2577 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2578 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
2579 }
2580
2581 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2582 if ( rc == VINF_PGM_SYNC_CR3
2583 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2584 {
2585 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2586#ifdef IN_RING3
2587 rc = pgmPoolSyncCR3(pVCpu);
2588#else
2589 if (rc == VINF_PGM_SYNC_CR3)
2590 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2591 return VINF_PGM_SYNC_CR3;
2592#endif
2593 }
2594 AssertRCReturn(rc, rc);
2595 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2596 }
2597
2598 /*
2599 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2600 */
2601 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2602
2603 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2604 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2605 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2606 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2607
2608 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2609 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2610 if (rc == VINF_SUCCESS)
2611 {
2612 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2613 {
2614 /* Go back to ring 3 if a pgm pool sync is again pending. */
2615 return VINF_PGM_SYNC_CR3;
2616 }
2617
2618 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2619 {
2620 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2621 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2622 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2623 }
2624 }
2625
2626 /*
2627 * Now flush the CR3 (guest context).
2628 */
2629 if (rc == VINF_SUCCESS)
2630 PGM_INVL_VCPU_TLBS(pVCpu);
2631 return rc;
2632}
2633
2634
2635/**
2636 * Maps all the PAE PDPE entries.
2637 *
2638 * @returns VBox status code.
2639 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2640 * @param paPaePdpes The new PAE PDPE values.
2641 *
2642 * @remarks This function may be invoked during the process of changing the guest
2643 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2644 * reflect PAE paging just yet.
2645 */
2646VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2647{
2648 Assert(paPaePdpes);
2649 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2650 {
2651 X86PDPE const PaePdpe = paPaePdpes[i];
2652
2653 /*
2654 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2655 * are deferred.[1] Also, different situations require different handling of invalid
2656 * PDPE entries. Here we assume the caller has already validated or doesn't require
2657 * validation of the PDPEs.
2658 *
2659 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2660 */
2661 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2662 {
2663 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2664 RTHCPTR HCPtr;
2665 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2666
2667 PGM_LOCK_VOID(pVM);
2668 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2669 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2670 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2671 PGM_UNLOCK(pVM);
2672 if (RT_SUCCESS(rc))
2673 {
2674# ifdef IN_RING3
2675 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2676 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2677# else
2678 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2679 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2680# endif
2681 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2682 continue;
2683 }
2684 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2685 }
2686 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2687 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2688 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2689 }
2690
2691 return VINF_SUCCESS;
2692}
2693
2694
2695/**
2696 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2697 *
2698 * @returns VBox status code.
2699 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2700 * @param cr3 The guest CR3 value.
2701 *
2702 * @remarks This function may be invoked during the process of changing the guest
2703 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2704 * PAE paging just yet.
2705 */
2706VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2707{
2708 /*
2709 * Read the page-directory-pointer table (PDPT) at CR3.
2710 */
2711 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2712 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2713 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2714
2715#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2716 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2717 {
2718 PGMPTWALK Walk;
2719 PGMPTWALKGST GstWalk;
2720 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
2721 if (RT_SUCCESS(rc))
2722 GCPhysCR3 = Walk.GCPhys;
2723 else
2724 {
2725 /** @todo Raise EPT violation VM-exit. */
2726 return VERR_NOT_IMPLEMENTED;
2727 }
2728 }
2729#endif
2730
2731 PGM_LOCK_VOID(pVM);
2732 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
2733 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
2734
2735 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2736 RTHCPTR HCPtrGuestCr3;
2737 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3);
2738 PGM_UNLOCK(pVM);
2739 AssertRCReturn(rc, rc);
2740 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2741
2742 /*
2743 * Validate the page-directory-pointer table entries (PDPE).
2744 */
2745 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2746 {
2747 /*
2748 * Map the PDPT.
2749 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2750 * that PGMFlushTLB will be called soon and only a change to CR3 then
2751 * will cause the shadow page tables to be updated.
2752 */
2753# ifdef IN_RING3
2754 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2755 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2756# else
2757 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2758 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2759# endif
2760
2761 /*
2762 * Update CPUM.
2763 * We do this prior to mapping the PDPEs to keep the order consistent
2764 * with what's used in HM. In practice, it doesn't really matter.
2765 */
2766 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2767
2768 /*
2769 * Map the PDPEs.
2770 */
2771 return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2772 }
2773 return VERR_PGM_PAE_PDPE_RSVD;
2774}
2775
2776
2777/**
2778 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2779 *
2780 * @returns VBox status code, with the following informational code for
2781 * VM scheduling.
2782 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2783 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2784 *
2785 * @param pVCpu The cross context virtual CPU structure.
2786 * @param cr0 The new cr0.
2787 * @param cr4 The new cr4.
2788 * @param efer The new extended feature enable register.
2789 * @param fForce Whether to force a mode change.
2790 */
2791VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2792{
2793 VMCPU_ASSERT_EMT(pVCpu);
2794
2795 /*
2796 * Calc the new guest mode.
2797 *
2798 * Note! We check PG before PE and without requiring PE because of the
2799 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2800 */
2801 PGMMODE enmGuestMode;
2802 if (cr0 & X86_CR0_PG)
2803 {
2804 if (!(cr4 & X86_CR4_PAE))
2805 {
2806 bool const fPse = !!(cr4 & X86_CR4_PSE);
2807 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2808 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2809 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2810 enmGuestMode = PGMMODE_32_BIT;
2811 }
2812 else if (!(efer & MSR_K6_EFER_LME))
2813 {
2814 if (!(efer & MSR_K6_EFER_NXE))
2815 enmGuestMode = PGMMODE_PAE;
2816 else
2817 enmGuestMode = PGMMODE_PAE_NX;
2818 }
2819 else
2820 {
2821 if (!(efer & MSR_K6_EFER_NXE))
2822 enmGuestMode = PGMMODE_AMD64;
2823 else
2824 enmGuestMode = PGMMODE_AMD64_NX;
2825 }
2826 }
2827 else if (!(cr0 & X86_CR0_PE))
2828 enmGuestMode = PGMMODE_REAL;
2829 else
2830 enmGuestMode = PGMMODE_PROTECTED;
2831
2832 /*
2833 * Did it change?
2834 */
2835 if ( !fForce
2836 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2837 return VINF_SUCCESS;
2838
2839 /* Flush the TLB */
2840 PGM_INVL_VCPU_TLBS(pVCpu);
2841 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2842}
2843
2844
2845/**
2846 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2847 *
2848 * @returns PGM_TYPE_*.
2849 * @param pgmMode The mode value to convert.
2850 */
2851DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2852{
2853 switch (pgmMode)
2854 {
2855 case PGMMODE_REAL: return PGM_TYPE_REAL;
2856 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2857 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2858 case PGMMODE_PAE:
2859 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2860 case PGMMODE_AMD64:
2861 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2862 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2863 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2864 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2865 case PGMMODE_EPT: return PGM_TYPE_EPT;
2866 case PGMMODE_NONE: return PGM_TYPE_NONE;
2867 default:
2868 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2869 }
2870}
2871
2872
2873/**
2874 * Calculates the shadow paging mode.
2875 *
2876 * @returns The shadow paging mode.
2877 * @param pVM The cross context VM structure.
2878 * @param enmGuestMode The guest mode.
2879 * @param enmHostMode The host mode.
2880 * @param enmShadowMode The current shadow mode.
2881 */
2882static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
2883{
2884 switch (enmGuestMode)
2885 {
2886 /*
2887 * When switching to real or protected mode we don't change
2888 * anything since it's likely that we'll switch back pretty soon.
2889 *
2890 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2891 * and is supposed to determine which shadow paging and switcher to
2892 * use during init.
2893 */
2894 case PGMMODE_REAL:
2895 case PGMMODE_PROTECTED:
2896 if ( enmShadowMode != PGMMODE_INVALID
2897 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
2898 break; /* (no change) */
2899
2900 switch (enmHostMode)
2901 {
2902 case SUPPAGINGMODE_32_BIT:
2903 case SUPPAGINGMODE_32_BIT_GLOBAL:
2904 enmShadowMode = PGMMODE_32_BIT;
2905 break;
2906
2907 case SUPPAGINGMODE_PAE:
2908 case SUPPAGINGMODE_PAE_NX:
2909 case SUPPAGINGMODE_PAE_GLOBAL:
2910 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2911 enmShadowMode = PGMMODE_PAE;
2912 break;
2913
2914 case SUPPAGINGMODE_AMD64:
2915 case SUPPAGINGMODE_AMD64_GLOBAL:
2916 case SUPPAGINGMODE_AMD64_NX:
2917 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2918 enmShadowMode = PGMMODE_PAE;
2919 break;
2920
2921 default:
2922 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2923 }
2924 break;
2925
2926 case PGMMODE_32_BIT:
2927 switch (enmHostMode)
2928 {
2929 case SUPPAGINGMODE_32_BIT:
2930 case SUPPAGINGMODE_32_BIT_GLOBAL:
2931 enmShadowMode = PGMMODE_32_BIT;
2932 break;
2933
2934 case SUPPAGINGMODE_PAE:
2935 case SUPPAGINGMODE_PAE_NX:
2936 case SUPPAGINGMODE_PAE_GLOBAL:
2937 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2938 enmShadowMode = PGMMODE_PAE;
2939 break;
2940
2941 case SUPPAGINGMODE_AMD64:
2942 case SUPPAGINGMODE_AMD64_GLOBAL:
2943 case SUPPAGINGMODE_AMD64_NX:
2944 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2945 enmShadowMode = PGMMODE_PAE;
2946 break;
2947
2948 default:
2949 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2950 }
2951 break;
2952
2953 case PGMMODE_PAE:
2954 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
2955 switch (enmHostMode)
2956 {
2957 case SUPPAGINGMODE_32_BIT:
2958 case SUPPAGINGMODE_32_BIT_GLOBAL:
2959 enmShadowMode = PGMMODE_PAE;
2960 break;
2961
2962 case SUPPAGINGMODE_PAE:
2963 case SUPPAGINGMODE_PAE_NX:
2964 case SUPPAGINGMODE_PAE_GLOBAL:
2965 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2966 enmShadowMode = PGMMODE_PAE;
2967 break;
2968
2969 case SUPPAGINGMODE_AMD64:
2970 case SUPPAGINGMODE_AMD64_GLOBAL:
2971 case SUPPAGINGMODE_AMD64_NX:
2972 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2973 enmShadowMode = PGMMODE_PAE;
2974 break;
2975
2976 default:
2977 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
2978 }
2979 break;
2980
2981 case PGMMODE_AMD64:
2982 case PGMMODE_AMD64_NX:
2983 switch (enmHostMode)
2984 {
2985 case SUPPAGINGMODE_32_BIT:
2986 case SUPPAGINGMODE_32_BIT_GLOBAL:
2987 enmShadowMode = PGMMODE_AMD64;
2988 break;
2989
2990 case SUPPAGINGMODE_PAE:
2991 case SUPPAGINGMODE_PAE_NX:
2992 case SUPPAGINGMODE_PAE_GLOBAL:
2993 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2994 enmShadowMode = PGMMODE_AMD64;
2995 break;
2996
2997 case SUPPAGINGMODE_AMD64:
2998 case SUPPAGINGMODE_AMD64_GLOBAL:
2999 case SUPPAGINGMODE_AMD64_NX:
3000 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3001 enmShadowMode = PGMMODE_AMD64;
3002 break;
3003
3004 default:
3005 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3006 }
3007 break;
3008
3009 default:
3010 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3011 }
3012
3013 /*
3014 * Override the shadow mode when NEM or nested paging is active.
3015 */
3016 if (VM_IS_NEM_ENABLED(pVM))
3017 {
3018 pVM->pgm.s.fNestedPaging = true;
3019 enmShadowMode = PGMMODE_NONE;
3020 }
3021 else
3022 {
3023 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3024 pVM->pgm.s.fNestedPaging = fNestedPaging;
3025 if (fNestedPaging)
3026 {
3027 if (HMIsVmxActive(pVM))
3028 enmShadowMode = PGMMODE_EPT;
3029 else
3030 {
3031 /* The nested SVM paging depends on the host one. */
3032 Assert(HMIsSvmActive(pVM));
3033 if ( enmGuestMode == PGMMODE_AMD64
3034 || enmGuestMode == PGMMODE_AMD64_NX)
3035 enmShadowMode = PGMMODE_NESTED_AMD64;
3036 else
3037 switch (pVM->pgm.s.enmHostMode)
3038 {
3039 case SUPPAGINGMODE_32_BIT:
3040 case SUPPAGINGMODE_32_BIT_GLOBAL:
3041 enmShadowMode = PGMMODE_NESTED_32BIT;
3042 break;
3043
3044 case SUPPAGINGMODE_PAE:
3045 case SUPPAGINGMODE_PAE_GLOBAL:
3046 case SUPPAGINGMODE_PAE_NX:
3047 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3048 enmShadowMode = PGMMODE_NESTED_PAE;
3049 break;
3050
3051 case SUPPAGINGMODE_AMD64:
3052 case SUPPAGINGMODE_AMD64_GLOBAL:
3053 case SUPPAGINGMODE_AMD64_NX:
3054 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3055 enmShadowMode = PGMMODE_NESTED_AMD64;
3056 break;
3057
3058 default:
3059 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3060 }
3061 }
3062 }
3063#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3064 else
3065 {
3066 /* Nested paging is a requirement for nested VT-x. */
3067 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3068 }
3069#endif
3070 }
3071
3072 return enmShadowMode;
3073}
3074
3075
3076/**
3077 * Performs the actual mode change.
3078 * This is called by PGMChangeMode and pgmR3InitPaging().
3079 *
3080 * @returns VBox status code. May suspend or power off the VM on error, but this
3081 * will trigger using FFs and not informational status codes.
3082 *
3083 * @param pVM The cross context VM structure.
3084 * @param pVCpu The cross context virtual CPU structure.
3085 * @param enmGuestMode The new guest mode. This is assumed to be different from
3086 * the current mode.
3087 */
3088VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3089{
3090 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3091 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3092
3093 /*
3094 * Calc the shadow mode and switcher.
3095 */
3096 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3097
3098 /*
3099 * Exit old mode(s).
3100 */
3101 /* shadow */
3102 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3103 {
3104 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3105 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3106 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3107 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3108 {
3109 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3110 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3111 }
3112 }
3113 else
3114 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3115
3116 /* guest */
3117 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3118 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3119 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3120 {
3121 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3122 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3123 }
3124 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3125
3126 /*
3127 * Change the paging mode data indexes.
3128 */
3129 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3130 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3131 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3132 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3133 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3134 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3135 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3136#ifdef IN_RING3
3137 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3138#endif
3139
3140 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3141 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3142 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3143 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3144 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3145 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3146 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3147#ifdef IN_RING3
3148 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3149#endif
3150
3151 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3152 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3153 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3154 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3155 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3156 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3157 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3158 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3159 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3160 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3161#ifdef VBOX_STRICT
3162 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3163#endif
3164
3165 /*
3166 * Enter new shadow mode (if changed).
3167 */
3168 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3169 {
3170 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3171 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3172 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3173 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3174 }
3175
3176 /*
3177 * Always flag the necessary updates
3178 */
3179 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3180
3181 /*
3182 * Enter the new guest and shadow+guest modes.
3183 */
3184 /* Calc the new CR3 value. */
3185 RTGCPHYS GCPhysCR3;
3186 switch (enmGuestMode)
3187 {
3188 case PGMMODE_REAL:
3189 case PGMMODE_PROTECTED:
3190 GCPhysCR3 = NIL_RTGCPHYS;
3191 break;
3192
3193 case PGMMODE_32_BIT:
3194 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3195 break;
3196
3197 case PGMMODE_PAE_NX:
3198 case PGMMODE_PAE:
3199 if (!pVM->cpum.ro.GuestFeatures.fPae)
3200#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3201 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3202 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3203#else
3204 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3205
3206#endif
3207 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3208 break;
3209
3210#ifdef VBOX_WITH_64_BITS_GUESTS
3211 case PGMMODE_AMD64_NX:
3212 case PGMMODE_AMD64:
3213 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3214 break;
3215#endif
3216#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3217 case PGMMODE_EPT:
3218 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_EPT_PAGE_MASK;
3219 break;
3220#endif
3221 default:
3222 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3223 }
3224
3225#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3226 /* Update the guest SLAT mode if it's a nested-guest. */
3227 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu)
3228 && PGMMODE_WITH_PAGING(enmGuestMode))
3229 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3230 else
3231 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3232#endif
3233
3234 /* Enter the new guest mode. */
3235 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3236 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3237 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3238
3239 /* Set the new guest CR3. */
3240 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3241
3242 /* status codes. */
3243 AssertRC(rc);
3244 AssertRC(rc2);
3245 if (RT_SUCCESS(rc))
3246 {
3247 rc = rc2;
3248 if (RT_SUCCESS(rc)) /* no informational status codes. */
3249 rc = VINF_SUCCESS;
3250 }
3251
3252 /*
3253 * Notify HM.
3254 */
3255 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3256 return rc;
3257}
3258
3259
3260/**
3261 * Called by CPUM or REM when CR0.WP changes to 1.
3262 *
3263 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3264 * @thread EMT
3265 */
3266VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3267{
3268 /*
3269 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3270 *
3271 * Use the counter to judge whether there might be pool pages with active
3272 * hacks in them. If there are, we will be running the risk of messing up
3273 * the guest by allowing it to write to read-only pages. Thus, we have to
3274 * clear the page pool ASAP if there is the slightest chance.
3275 */
3276 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3277 {
3278 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3279
3280 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3281 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3282 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3283 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3284 }
3285}
3286
3287
3288/**
3289 * Gets the current guest paging mode.
3290 *
3291 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3292 *
3293 * @returns The current paging mode.
3294 * @param pVCpu The cross context virtual CPU structure.
3295 */
3296VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3297{
3298 return pVCpu->pgm.s.enmGuestMode;
3299}
3300
3301
3302/**
3303 * Gets the current shadow paging mode.
3304 *
3305 * @returns The current paging mode.
3306 * @param pVCpu The cross context virtual CPU structure.
3307 */
3308VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3309{
3310 return pVCpu->pgm.s.enmShadowMode;
3311}
3312
3313
3314/**
3315 * Gets the current host paging mode.
3316 *
3317 * @returns The current paging mode.
3318 * @param pVM The cross context VM structure.
3319 */
3320VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3321{
3322 switch (pVM->pgm.s.enmHostMode)
3323 {
3324 case SUPPAGINGMODE_32_BIT:
3325 case SUPPAGINGMODE_32_BIT_GLOBAL:
3326 return PGMMODE_32_BIT;
3327
3328 case SUPPAGINGMODE_PAE:
3329 case SUPPAGINGMODE_PAE_GLOBAL:
3330 return PGMMODE_PAE;
3331
3332 case SUPPAGINGMODE_PAE_NX:
3333 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3334 return PGMMODE_PAE_NX;
3335
3336 case SUPPAGINGMODE_AMD64:
3337 case SUPPAGINGMODE_AMD64_GLOBAL:
3338 return PGMMODE_AMD64;
3339
3340 case SUPPAGINGMODE_AMD64_NX:
3341 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3342 return PGMMODE_AMD64_NX;
3343
3344 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3345 }
3346
3347 return PGMMODE_INVALID;
3348}
3349
3350
3351/**
3352 * Get mode name.
3353 *
3354 * @returns read-only name string.
3355 * @param enmMode The mode which name is desired.
3356 */
3357VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3358{
3359 switch (enmMode)
3360 {
3361 case PGMMODE_REAL: return "Real";
3362 case PGMMODE_PROTECTED: return "Protected";
3363 case PGMMODE_32_BIT: return "32-bit";
3364 case PGMMODE_PAE: return "PAE";
3365 case PGMMODE_PAE_NX: return "PAE+NX";
3366 case PGMMODE_AMD64: return "AMD64";
3367 case PGMMODE_AMD64_NX: return "AMD64+NX";
3368 case PGMMODE_NESTED_32BIT: return "Nested-32";
3369 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3370 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3371 case PGMMODE_EPT: return "EPT";
3372 case PGMMODE_NONE: return "None";
3373 default: return "unknown mode value";
3374 }
3375}
3376
3377
3378#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3379/**
3380 * Gets the SLAT mode name.
3381 *
3382 * @returns The read-only SLAT mode descriptive string.
3383 * @param enmSlatMode The SLAT mode value.
3384 */
3385VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3386{
3387 switch (enmSlatMode)
3388 {
3389 case PGMSLAT_DIRECT: return "Direct";
3390 case PGMSLAT_EPT: return "EPT";
3391 case PGMSLAT_32BIT: return "32-bit";
3392 case PGMSLAT_PAE: return "PAE";
3393 case PGMSLAT_AMD64: return "AMD64";
3394 default: return "Unknown";
3395 }
3396}
3397#endif
3398
3399
3400/**
3401 * Gets the physical address represented in the guest CR3 as PGM sees it.
3402 *
3403 * This is mainly for logging and debugging.
3404 *
3405 * @returns PGM's guest CR3 value.
3406 * @param pVCpu The cross context virtual CPU structure.
3407 */
3408VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3409{
3410 return pVCpu->pgm.s.GCPhysCR3;
3411}
3412
3413
3414
3415/**
3416 * Notification from CPUM that the EFER.NXE bit has changed.
3417 *
3418 * @param pVCpu The cross context virtual CPU structure of the CPU for
3419 * which EFER changed.
3420 * @param fNxe The new NXE state.
3421 */
3422VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3423{
3424/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3425 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3426
3427 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3428 if (fNxe)
3429 {
3430 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3431 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3432 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3433 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3434 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3435 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3436 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3437 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3438 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3439 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3440 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3441
3442 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3443 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3444 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3445 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3446 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3447 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3448 }
3449 else
3450 {
3451 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3452 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3453 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3454 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3455 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3456 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3457 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3458 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3459 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3460 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3461 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3462
3463 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3464 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3465 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3466 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3467 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3468 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3469 }
3470}
3471
3472
3473/**
3474 * Check if any pgm pool pages are marked dirty (not monitored)
3475 *
3476 * @returns bool locked/not locked
3477 * @param pVM The cross context VM structure.
3478 */
3479VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3480{
3481 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3482}
3483
3484
3485/**
3486 * Check if this VCPU currently owns the PGM lock.
3487 *
3488 * @returns bool owner/not owner
3489 * @param pVM The cross context VM structure.
3490 */
3491VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3492{
3493 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3494}
3495
3496
3497/**
3498 * Enable or disable large page usage
3499 *
3500 * @returns VBox status code.
3501 * @param pVM The cross context VM structure.
3502 * @param fUseLargePages Use/not use large pages
3503 */
3504VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3505{
3506 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3507
3508 pVM->pgm.s.fUseLargePages = fUseLargePages;
3509 return VINF_SUCCESS;
3510}
3511
3512
3513/**
3514 * Acquire the PGM lock.
3515 *
3516 * @returns VBox status code
3517 * @param pVM The cross context VM structure.
3518 * @param fVoid Set if the caller cannot handle failure returns.
3519 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3520 */
3521#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3522int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3523#else
3524int pgmLock(PVMCC pVM, bool fVoid)
3525#endif
3526{
3527#if defined(VBOX_STRICT)
3528 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3529#else
3530 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3531#endif
3532 if (RT_SUCCESS(rc))
3533 return rc;
3534 if (fVoid)
3535 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3536 else
3537 AssertRC(rc);
3538 return rc;
3539}
3540
3541
3542/**
3543 * Release the PGM lock.
3544 *
3545 * @returns VBox status code
3546 * @param pVM The cross context VM structure.
3547 */
3548void pgmUnlock(PVMCC pVM)
3549{
3550 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3551 pVM->pgm.s.cDeprecatedPageLocks = 0;
3552 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3553 if (rc == VINF_SEM_NESTED)
3554 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3555}
3556
3557
3558#if !defined(IN_R0) || defined(LOG_ENABLED)
3559
3560/** Format handler for PGMPAGE.
3561 * @copydoc FNRTSTRFORMATTYPE */
3562static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3563 const char *pszType, void const *pvValue,
3564 int cchWidth, int cchPrecision, unsigned fFlags,
3565 void *pvUser)
3566{
3567 size_t cch;
3568 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3569 if (RT_VALID_PTR(pPage))
3570 {
3571 char szTmp[64+80];
3572
3573 cch = 0;
3574
3575 /* The single char state stuff. */
3576 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3577 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3578
3579# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3580 if (IS_PART_INCLUDED(5))
3581 {
3582 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3583 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3584 }
3585
3586 /* The type. */
3587 if (IS_PART_INCLUDED(4))
3588 {
3589 szTmp[cch++] = ':';
3590 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3591 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3592 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3593 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3594 }
3595
3596 /* The numbers. */
3597 if (IS_PART_INCLUDED(3))
3598 {
3599 szTmp[cch++] = ':';
3600 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3601 }
3602
3603 if (IS_PART_INCLUDED(2))
3604 {
3605 szTmp[cch++] = ':';
3606 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3607 }
3608
3609 if (IS_PART_INCLUDED(6))
3610 {
3611 szTmp[cch++] = ':';
3612 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3613 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3614 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3615 }
3616# undef IS_PART_INCLUDED
3617
3618 cch = pfnOutput(pvArgOutput, szTmp, cch);
3619 }
3620 else
3621 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3622 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3623 return cch;
3624}
3625
3626
3627/** Format handler for PGMRAMRANGE.
3628 * @copydoc FNRTSTRFORMATTYPE */
3629static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3630 const char *pszType, void const *pvValue,
3631 int cchWidth, int cchPrecision, unsigned fFlags,
3632 void *pvUser)
3633{
3634 size_t cch;
3635 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3636 if (RT_VALID_PTR(pRam))
3637 {
3638 char szTmp[80];
3639 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3640 cch = pfnOutput(pvArgOutput, szTmp, cch);
3641 }
3642 else
3643 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3644 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3645 return cch;
3646}
3647
3648/** Format type andlers to be registered/deregistered. */
3649static const struct
3650{
3651 char szType[24];
3652 PFNRTSTRFORMATTYPE pfnHandler;
3653} g_aPgmFormatTypes[] =
3654{
3655 { "pgmpage", pgmFormatTypeHandlerPage },
3656 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3657};
3658
3659#endif /* !IN_R0 || LOG_ENABLED */
3660
3661/**
3662 * Registers the global string format types.
3663 *
3664 * This should be called at module load time or in some other manner that ensure
3665 * that it's called exactly one time.
3666 *
3667 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3668 */
3669VMMDECL(int) PGMRegisterStringFormatTypes(void)
3670{
3671#if !defined(IN_R0) || defined(LOG_ENABLED)
3672 int rc = VINF_SUCCESS;
3673 unsigned i;
3674 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3675 {
3676 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3677# ifdef IN_RING0
3678 if (rc == VERR_ALREADY_EXISTS)
3679 {
3680 /* in case of cleanup failure in ring-0 */
3681 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3682 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3683 }
3684# endif
3685 }
3686 if (RT_FAILURE(rc))
3687 while (i-- > 0)
3688 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3689
3690 return rc;
3691#else
3692 return VINF_SUCCESS;
3693#endif
3694}
3695
3696
3697/**
3698 * Deregisters the global string format types.
3699 *
3700 * This should be called at module unload time or in some other manner that
3701 * ensure that it's called exactly one time.
3702 */
3703VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3704{
3705#if !defined(IN_R0) || defined(LOG_ENABLED)
3706 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3707 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3708#endif
3709}
3710
3711
3712#ifdef VBOX_STRICT
3713/**
3714 * Asserts that everything related to the guest CR3 is correctly shadowed.
3715 *
3716 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3717 * and assert the correctness of the guest CR3 mapping before asserting that the
3718 * shadow page tables is in sync with the guest page tables.
3719 *
3720 * @returns Number of conflicts.
3721 * @param pVM The cross context VM structure.
3722 * @param pVCpu The cross context virtual CPU structure.
3723 * @param cr3 The current guest CR3 register value.
3724 * @param cr4 The current guest CR4 register value.
3725 */
3726VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3727{
3728 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3729
3730 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3731 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3732 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3733
3734 PGM_LOCK_VOID(pVM);
3735 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3736 PGM_UNLOCK(pVM);
3737
3738 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3739 return cErrors;
3740}
3741#endif /* VBOX_STRICT */
3742
3743
3744/**
3745 * Updates PGM's copy of the guest's EPT pointer.
3746 *
3747 * @param pVCpu The cross context virtual CPU structure.
3748 * @param uEptPtr The EPT pointer.
3749 *
3750 * @remarks This can be called as part of VM-entry so we might be in the midst of
3751 * switching to VMX non-root mode.
3752 */
3753VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3754{
3755 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3756 PGM_LOCK_VOID(pVM);
3757 pVCpu->pgm.s.uEptPtr = uEptPtr;
3758 PGM_UNLOCK(pVM);
3759}
3760
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette