VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 92679

Last change on this file since 92679 was 92642, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Avoid RT_ZERO'ing multiple times, also just use passed in pWalk rather than construct another copy on the stack. Assert basic assumptions when Walk succeeds.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 142.4 KB
Line 
1/* $Id: PGMAll.cpp 92642 2021-11-30 09:19:01Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
53#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
54static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
55 PPGMPTWALKGST pGstWalk);
56static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
57static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
58#endif
59static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
60static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
61
62
63#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
64/* Guest - EPT SLAT is identical for all guest paging mode. */
65# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
66# define PGM_GST_TYPE PGM_TYPE_EPT
67# include "PGMGstDefs.h"
68# include "PGMAllGstSlatEpt.cpp.h"
69# undef PGM_GST_TYPE
70#endif
71
72
73/*
74 * Shadow - 32-bit mode
75 */
76#define PGM_SHW_TYPE PGM_TYPE_32BIT
77#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
78#include "PGMAllShw.h"
79
80/* Guest - real mode */
81#define PGM_GST_TYPE PGM_TYPE_REAL
82#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
83#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
84#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
85#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
86#include "PGMGstDefs.h"
87#include "PGMAllGst.h"
88#include "PGMAllBth.h"
89#undef BTH_PGMPOOLKIND_PT_FOR_PT
90#undef BTH_PGMPOOLKIND_ROOT
91#undef PGM_BTH_NAME
92#undef PGM_GST_TYPE
93#undef PGM_GST_NAME
94
95/* Guest - protected mode */
96#define PGM_GST_TYPE PGM_TYPE_PROT
97#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
98#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
99#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
100#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
101#include "PGMGstDefs.h"
102#include "PGMAllGst.h"
103#include "PGMAllBth.h"
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef BTH_PGMPOOLKIND_ROOT
106#undef PGM_BTH_NAME
107#undef PGM_GST_TYPE
108#undef PGM_GST_NAME
109
110/* Guest - 32-bit mode */
111#define PGM_GST_TYPE PGM_TYPE_32BIT
112#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
113#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
114#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
115#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
116#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
117#include "PGMGstDefs.h"
118#include "PGMAllGst.h"
119#include "PGMAllBth.h"
120#undef BTH_PGMPOOLKIND_PT_FOR_BIG
121#undef BTH_PGMPOOLKIND_PT_FOR_PT
122#undef BTH_PGMPOOLKIND_ROOT
123#undef PGM_BTH_NAME
124#undef PGM_GST_TYPE
125#undef PGM_GST_NAME
126
127#undef PGM_SHW_TYPE
128#undef PGM_SHW_NAME
129
130
131/*
132 * Shadow - PAE mode
133 */
134#define PGM_SHW_TYPE PGM_TYPE_PAE
135#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
136#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
137#include "PGMAllShw.h"
138
139/* Guest - real mode */
140#define PGM_GST_TYPE PGM_TYPE_REAL
141#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
145#include "PGMGstDefs.h"
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef BTH_PGMPOOLKIND_ROOT
149#undef PGM_BTH_NAME
150#undef PGM_GST_TYPE
151#undef PGM_GST_NAME
152
153/* Guest - protected mode */
154#define PGM_GST_TYPE PGM_TYPE_PROT
155#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
156#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
157#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
158#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
159#include "PGMGstDefs.h"
160#include "PGMAllBth.h"
161#undef BTH_PGMPOOLKIND_PT_FOR_PT
162#undef BTH_PGMPOOLKIND_ROOT
163#undef PGM_BTH_NAME
164#undef PGM_GST_TYPE
165#undef PGM_GST_NAME
166
167/* Guest - 32-bit mode */
168#define PGM_GST_TYPE PGM_TYPE_32BIT
169#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
170#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
171#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
172#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
173#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
174#include "PGMGstDefs.h"
175#include "PGMAllBth.h"
176#undef BTH_PGMPOOLKIND_PT_FOR_BIG
177#undef BTH_PGMPOOLKIND_PT_FOR_PT
178#undef BTH_PGMPOOLKIND_ROOT
179#undef PGM_BTH_NAME
180#undef PGM_GST_TYPE
181#undef PGM_GST_NAME
182
183
184/* Guest - PAE mode */
185#define PGM_GST_TYPE PGM_TYPE_PAE
186#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
187#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
188#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
189#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
190#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
191#include "PGMGstDefs.h"
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205/*
206 * Shadow - AMD64 mode
207 */
208#define PGM_SHW_TYPE PGM_TYPE_AMD64
209#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
210#include "PGMAllShw.h"
211
212/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
213/** @todo retire this hack. */
214#define PGM_GST_TYPE PGM_TYPE_PROT
215#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219#include "PGMGstDefs.h"
220#include "PGMAllBth.h"
221#undef BTH_PGMPOOLKIND_PT_FOR_PT
222#undef BTH_PGMPOOLKIND_ROOT
223#undef PGM_BTH_NAME
224#undef PGM_GST_TYPE
225#undef PGM_GST_NAME
226
227#ifdef VBOX_WITH_64_BITS_GUESTS
228/* Guest - AMD64 mode */
229# define PGM_GST_TYPE PGM_TYPE_AMD64
230# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
231# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
232# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
233# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
234# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
235# include "PGMGstDefs.h"
236# include "PGMAllGst.h"
237# include "PGMAllBth.h"
238# undef BTH_PGMPOOLKIND_PT_FOR_BIG
239# undef BTH_PGMPOOLKIND_PT_FOR_PT
240# undef BTH_PGMPOOLKIND_ROOT
241# undef PGM_BTH_NAME
242# undef PGM_GST_TYPE
243# undef PGM_GST_NAME
244#endif /* VBOX_WITH_64_BITS_GUESTS */
245
246#undef PGM_SHW_TYPE
247#undef PGM_SHW_NAME
248
249
250/*
251 * Shadow - 32-bit nested paging mode.
252 */
253#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
254#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
255#include "PGMAllShw.h"
256
257/* Guest - real mode */
258#define PGM_GST_TYPE PGM_TYPE_REAL
259#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
260#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
261#include "PGMGstDefs.h"
262#include "PGMAllBth.h"
263#undef PGM_BTH_NAME
264#undef PGM_GST_TYPE
265#undef PGM_GST_NAME
266
267/* Guest - protected mode */
268#define PGM_GST_TYPE PGM_TYPE_PROT
269#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
270#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
271#include "PGMGstDefs.h"
272#include "PGMAllBth.h"
273#undef PGM_BTH_NAME
274#undef PGM_GST_TYPE
275#undef PGM_GST_NAME
276
277/* Guest - 32-bit mode */
278#define PGM_GST_TYPE PGM_TYPE_32BIT
279#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
280#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
281#include "PGMGstDefs.h"
282#include "PGMAllBth.h"
283#undef PGM_BTH_NAME
284#undef PGM_GST_TYPE
285#undef PGM_GST_NAME
286
287/* Guest - PAE mode */
288#define PGM_GST_TYPE PGM_TYPE_PAE
289#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
290#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
291#include "PGMGstDefs.h"
292#include "PGMAllBth.h"
293#undef PGM_BTH_NAME
294#undef PGM_GST_TYPE
295#undef PGM_GST_NAME
296
297#ifdef VBOX_WITH_64_BITS_GUESTS
298/* Guest - AMD64 mode */
299# define PGM_GST_TYPE PGM_TYPE_AMD64
300# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
301# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
302# include "PGMGstDefs.h"
303# include "PGMAllBth.h"
304# undef PGM_BTH_NAME
305# undef PGM_GST_TYPE
306# undef PGM_GST_NAME
307#endif /* VBOX_WITH_64_BITS_GUESTS */
308
309#undef PGM_SHW_TYPE
310#undef PGM_SHW_NAME
311
312
313/*
314 * Shadow - PAE nested paging mode.
315 */
316#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
317#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
318#include "PGMAllShw.h"
319
320/* Guest - real mode */
321#define PGM_GST_TYPE PGM_TYPE_REAL
322#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
323#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
324#include "PGMGstDefs.h"
325#include "PGMAllBth.h"
326#undef PGM_BTH_NAME
327#undef PGM_GST_TYPE
328#undef PGM_GST_NAME
329
330/* Guest - protected mode */
331#define PGM_GST_TYPE PGM_TYPE_PROT
332#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
333#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
334#include "PGMGstDefs.h"
335#include "PGMAllBth.h"
336#undef PGM_BTH_NAME
337#undef PGM_GST_TYPE
338#undef PGM_GST_NAME
339
340/* Guest - 32-bit mode */
341#define PGM_GST_TYPE PGM_TYPE_32BIT
342#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
343#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
344#include "PGMGstDefs.h"
345#include "PGMAllBth.h"
346#undef PGM_BTH_NAME
347#undef PGM_GST_TYPE
348#undef PGM_GST_NAME
349
350/* Guest - PAE mode */
351#define PGM_GST_TYPE PGM_TYPE_PAE
352#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
353#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
354#include "PGMGstDefs.h"
355#include "PGMAllBth.h"
356#undef PGM_BTH_NAME
357#undef PGM_GST_TYPE
358#undef PGM_GST_NAME
359
360#ifdef VBOX_WITH_64_BITS_GUESTS
361/* Guest - AMD64 mode */
362# define PGM_GST_TYPE PGM_TYPE_AMD64
363# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
364# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
365# include "PGMGstDefs.h"
366# include "PGMAllBth.h"
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370#endif /* VBOX_WITH_64_BITS_GUESTS */
371
372#undef PGM_SHW_TYPE
373#undef PGM_SHW_NAME
374
375
376/*
377 * Shadow - AMD64 nested paging mode.
378 */
379#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
380#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
381#include "PGMAllShw.h"
382
383/* Guest - real mode */
384#define PGM_GST_TYPE PGM_TYPE_REAL
385#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
386#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
387#include "PGMGstDefs.h"
388#include "PGMAllBth.h"
389#undef PGM_BTH_NAME
390#undef PGM_GST_TYPE
391#undef PGM_GST_NAME
392
393/* Guest - protected mode */
394#define PGM_GST_TYPE PGM_TYPE_PROT
395#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
396#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
397#include "PGMGstDefs.h"
398#include "PGMAllBth.h"
399#undef PGM_BTH_NAME
400#undef PGM_GST_TYPE
401#undef PGM_GST_NAME
402
403/* Guest - 32-bit mode */
404#define PGM_GST_TYPE PGM_TYPE_32BIT
405#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
406#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
407#include "PGMGstDefs.h"
408#include "PGMAllBth.h"
409#undef PGM_BTH_NAME
410#undef PGM_GST_TYPE
411#undef PGM_GST_NAME
412
413/* Guest - PAE mode */
414#define PGM_GST_TYPE PGM_TYPE_PAE
415#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
416#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
417#include "PGMGstDefs.h"
418#include "PGMAllBth.h"
419#undef PGM_BTH_NAME
420#undef PGM_GST_TYPE
421#undef PGM_GST_NAME
422
423#ifdef VBOX_WITH_64_BITS_GUESTS
424/* Guest - AMD64 mode */
425# define PGM_GST_TYPE PGM_TYPE_AMD64
426# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
427# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
428# include "PGMGstDefs.h"
429# include "PGMAllBth.h"
430# undef PGM_BTH_NAME
431# undef PGM_GST_TYPE
432# undef PGM_GST_NAME
433#endif /* VBOX_WITH_64_BITS_GUESTS */
434
435#undef PGM_SHW_TYPE
436#undef PGM_SHW_NAME
437
438
439/*
440 * Shadow - EPT.
441 */
442#define PGM_SHW_TYPE PGM_TYPE_EPT
443#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
444#include "PGMAllShw.h"
445
446/* Guest - real mode */
447#define PGM_GST_TYPE PGM_TYPE_REAL
448#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
449#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
450#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
451#include "PGMGstDefs.h"
452#include "PGMAllBth.h"
453#undef BTH_PGMPOOLKIND_PT_FOR_PT
454#undef PGM_BTH_NAME
455#undef PGM_GST_TYPE
456#undef PGM_GST_NAME
457
458/* Guest - protected mode */
459#define PGM_GST_TYPE PGM_TYPE_PROT
460#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
461#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
462#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
463#include "PGMGstDefs.h"
464#include "PGMAllBth.h"
465#undef BTH_PGMPOOLKIND_PT_FOR_PT
466#undef PGM_BTH_NAME
467#undef PGM_GST_TYPE
468#undef PGM_GST_NAME
469
470/* Guest - 32-bit mode */
471#define PGM_GST_TYPE PGM_TYPE_32BIT
472#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
473#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
474#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
475#include "PGMGstDefs.h"
476#include "PGMAllBth.h"
477#undef BTH_PGMPOOLKIND_PT_FOR_PT
478#undef PGM_BTH_NAME
479#undef PGM_GST_TYPE
480#undef PGM_GST_NAME
481
482/* Guest - PAE mode */
483#define PGM_GST_TYPE PGM_TYPE_PAE
484#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
485#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
486#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
487#include "PGMGstDefs.h"
488#include "PGMAllBth.h"
489#undef BTH_PGMPOOLKIND_PT_FOR_PT
490#undef PGM_BTH_NAME
491#undef PGM_GST_TYPE
492#undef PGM_GST_NAME
493
494#ifdef VBOX_WITH_64_BITS_GUESTS
495/* Guest - AMD64 mode */
496# define PGM_GST_TYPE PGM_TYPE_AMD64
497# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
498# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
499# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
500# include "PGMGstDefs.h"
501# include "PGMAllBth.h"
502# undef BTH_PGMPOOLKIND_PT_FOR_PT
503# undef PGM_BTH_NAME
504# undef PGM_GST_TYPE
505# undef PGM_GST_NAME
506#endif /* VBOX_WITH_64_BITS_GUESTS */
507
508#undef PGM_SHW_TYPE
509#undef PGM_SHW_NAME
510
511
512/*
513 * Shadow - NEM / None.
514 */
515#define PGM_SHW_TYPE PGM_TYPE_NONE
516#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
517#include "PGMAllShw.h"
518
519/* Guest - real mode */
520#define PGM_GST_TYPE PGM_TYPE_REAL
521#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
522#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
523#include "PGMGstDefs.h"
524#include "PGMAllBth.h"
525#undef PGM_BTH_NAME
526#undef PGM_GST_TYPE
527#undef PGM_GST_NAME
528
529/* Guest - protected mode */
530#define PGM_GST_TYPE PGM_TYPE_PROT
531#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
532#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
533#include "PGMGstDefs.h"
534#include "PGMAllBth.h"
535#undef PGM_BTH_NAME
536#undef PGM_GST_TYPE
537#undef PGM_GST_NAME
538
539/* Guest - 32-bit mode */
540#define PGM_GST_TYPE PGM_TYPE_32BIT
541#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
542#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
543#include "PGMGstDefs.h"
544#include "PGMAllBth.h"
545#undef PGM_BTH_NAME
546#undef PGM_GST_TYPE
547#undef PGM_GST_NAME
548
549/* Guest - PAE mode */
550#define PGM_GST_TYPE PGM_TYPE_PAE
551#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
552#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
553#include "PGMGstDefs.h"
554#include "PGMAllBth.h"
555#undef PGM_BTH_NAME
556#undef PGM_GST_TYPE
557#undef PGM_GST_NAME
558
559#ifdef VBOX_WITH_64_BITS_GUESTS
560/* Guest - AMD64 mode */
561# define PGM_GST_TYPE PGM_TYPE_AMD64
562# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
563# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
564# include "PGMGstDefs.h"
565# include "PGMAllBth.h"
566# undef PGM_BTH_NAME
567# undef PGM_GST_TYPE
568# undef PGM_GST_NAME
569#endif /* VBOX_WITH_64_BITS_GUESTS */
570
571#undef PGM_SHW_TYPE
572#undef PGM_SHW_NAME
573
574
575
576/**
577 * Guest mode data array.
578 */
579PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
580{
581 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
582 {
583 PGM_TYPE_REAL,
584 PGM_GST_NAME_REAL(GetPage),
585 PGM_GST_NAME_REAL(ModifyPage),
586 PGM_GST_NAME_REAL(Enter),
587 PGM_GST_NAME_REAL(Exit),
588#ifdef IN_RING3
589 PGM_GST_NAME_REAL(Relocate),
590#endif
591 },
592 {
593 PGM_TYPE_PROT,
594 PGM_GST_NAME_PROT(GetPage),
595 PGM_GST_NAME_PROT(ModifyPage),
596 PGM_GST_NAME_PROT(Enter),
597 PGM_GST_NAME_PROT(Exit),
598#ifdef IN_RING3
599 PGM_GST_NAME_PROT(Relocate),
600#endif
601 },
602 {
603 PGM_TYPE_32BIT,
604 PGM_GST_NAME_32BIT(GetPage),
605 PGM_GST_NAME_32BIT(ModifyPage),
606 PGM_GST_NAME_32BIT(Enter),
607 PGM_GST_NAME_32BIT(Exit),
608#ifdef IN_RING3
609 PGM_GST_NAME_32BIT(Relocate),
610#endif
611 },
612 {
613 PGM_TYPE_PAE,
614 PGM_GST_NAME_PAE(GetPage),
615 PGM_GST_NAME_PAE(ModifyPage),
616 PGM_GST_NAME_PAE(Enter),
617 PGM_GST_NAME_PAE(Exit),
618#ifdef IN_RING3
619 PGM_GST_NAME_PAE(Relocate),
620#endif
621 },
622#ifdef VBOX_WITH_64_BITS_GUESTS
623 {
624 PGM_TYPE_AMD64,
625 PGM_GST_NAME_AMD64(GetPage),
626 PGM_GST_NAME_AMD64(ModifyPage),
627 PGM_GST_NAME_AMD64(Enter),
628 PGM_GST_NAME_AMD64(Exit),
629# ifdef IN_RING3
630 PGM_GST_NAME_AMD64(Relocate),
631# endif
632 },
633#endif
634};
635
636
637/**
638 * The shadow mode data array.
639 */
640PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
641{
642 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
643 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
644 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
645 {
646 PGM_TYPE_32BIT,
647 PGM_SHW_NAME_32BIT(GetPage),
648 PGM_SHW_NAME_32BIT(ModifyPage),
649 PGM_SHW_NAME_32BIT(Enter),
650 PGM_SHW_NAME_32BIT(Exit),
651#ifdef IN_RING3
652 PGM_SHW_NAME_32BIT(Relocate),
653#endif
654 },
655 {
656 PGM_TYPE_PAE,
657 PGM_SHW_NAME_PAE(GetPage),
658 PGM_SHW_NAME_PAE(ModifyPage),
659 PGM_SHW_NAME_PAE(Enter),
660 PGM_SHW_NAME_PAE(Exit),
661#ifdef IN_RING3
662 PGM_SHW_NAME_PAE(Relocate),
663#endif
664 },
665 {
666 PGM_TYPE_AMD64,
667 PGM_SHW_NAME_AMD64(GetPage),
668 PGM_SHW_NAME_AMD64(ModifyPage),
669 PGM_SHW_NAME_AMD64(Enter),
670 PGM_SHW_NAME_AMD64(Exit),
671#ifdef IN_RING3
672 PGM_SHW_NAME_AMD64(Relocate),
673#endif
674 },
675 {
676 PGM_TYPE_NESTED_32BIT,
677 PGM_SHW_NAME_NESTED_32BIT(GetPage),
678 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
679 PGM_SHW_NAME_NESTED_32BIT(Enter),
680 PGM_SHW_NAME_NESTED_32BIT(Exit),
681#ifdef IN_RING3
682 PGM_SHW_NAME_NESTED_32BIT(Relocate),
683#endif
684 },
685 {
686 PGM_TYPE_NESTED_PAE,
687 PGM_SHW_NAME_NESTED_PAE(GetPage),
688 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
689 PGM_SHW_NAME_NESTED_PAE(Enter),
690 PGM_SHW_NAME_NESTED_PAE(Exit),
691#ifdef IN_RING3
692 PGM_SHW_NAME_NESTED_PAE(Relocate),
693#endif
694 },
695 {
696 PGM_TYPE_NESTED_AMD64,
697 PGM_SHW_NAME_NESTED_AMD64(GetPage),
698 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
699 PGM_SHW_NAME_NESTED_AMD64(Enter),
700 PGM_SHW_NAME_NESTED_AMD64(Exit),
701#ifdef IN_RING3
702 PGM_SHW_NAME_NESTED_AMD64(Relocate),
703#endif
704 },
705 {
706 PGM_TYPE_EPT,
707 PGM_SHW_NAME_EPT(GetPage),
708 PGM_SHW_NAME_EPT(ModifyPage),
709 PGM_SHW_NAME_EPT(Enter),
710 PGM_SHW_NAME_EPT(Exit),
711#ifdef IN_RING3
712 PGM_SHW_NAME_EPT(Relocate),
713#endif
714 },
715 {
716 PGM_TYPE_NONE,
717 PGM_SHW_NAME_NONE(GetPage),
718 PGM_SHW_NAME_NONE(ModifyPage),
719 PGM_SHW_NAME_NONE(Enter),
720 PGM_SHW_NAME_NONE(Exit),
721#ifdef IN_RING3
722 PGM_SHW_NAME_NONE(Relocate),
723#endif
724 },
725};
726
727
728/**
729 * The guest+shadow mode data array.
730 */
731PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
732{
733#if !defined(IN_RING3) && !defined(VBOX_STRICT)
734# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
735# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
736 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
737
738#elif !defined(IN_RING3) && defined(VBOX_STRICT)
739# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
740# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
741 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
742
743#elif defined(IN_RING3) && !defined(VBOX_STRICT)
744# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
745# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
746 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
747
748#elif defined(IN_RING3) && defined(VBOX_STRICT)
749# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
750# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
751 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
752
753#else
754# error "Misconfig."
755#endif
756
757 /* 32-bit shadow paging mode: */
758 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
759 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
760 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
769
770 /* PAE shadow paging mode: */
771 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
772 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
773 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
774 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
782
783 /* AMD64 shadow paging mode: */
784 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
785 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
786 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
787 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
788 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
789#ifdef VBOX_WITH_64_BITS_GUESTS
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
791#else
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
793#endif
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
799
800 /* 32-bit nested paging mode: */
801 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
802 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
804 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
806#ifdef VBOX_WITH_64_BITS_GUESTS
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
808#else
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
810#endif
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
816
817 /* PAE nested paging mode: */
818 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
823#ifdef VBOX_WITH_64_BITS_GUESTS
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
825#else
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
827#endif
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
833
834 /* AMD64 nested paging mode: */
835 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
840#ifdef VBOX_WITH_64_BITS_GUESTS
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
842#else
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
844#endif
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
850
851 /* EPT nested paging mode: */
852 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
857#ifdef VBOX_WITH_64_BITS_GUESTS
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
859#else
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
861#endif
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
867
868 /* NONE / NEM: */
869 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
874#ifdef VBOX_WITH_64_BITS_GUESTS
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
876#else
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
878#endif
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
884
885
886#undef PGMMODEDATABTH_ENTRY
887#undef PGMMODEDATABTH_NULL_ENTRY
888};
889
890
891#ifdef IN_RING0
892/**
893 * #PF Handler.
894 *
895 * @returns VBox status code (appropriate for trap handling and GC return).
896 * @param pVCpu The cross context virtual CPU structure.
897 * @param uErr The trap error code.
898 * @param pRegFrame Trap register frame.
899 * @param pvFault The fault address.
900 */
901VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
902{
903 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
904
905 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
906 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
907 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
908
909
910# ifdef VBOX_WITH_STATISTICS
911 /*
912 * Error code stats.
913 */
914 if (uErr & X86_TRAP_PF_US)
915 {
916 if (!(uErr & X86_TRAP_PF_P))
917 {
918 if (uErr & X86_TRAP_PF_RW)
919 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
920 else
921 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
922 }
923 else if (uErr & X86_TRAP_PF_RW)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
925 else if (uErr & X86_TRAP_PF_RSVD)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
927 else if (uErr & X86_TRAP_PF_ID)
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
929 else
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
931 }
932 else
933 { /* Supervisor */
934 if (!(uErr & X86_TRAP_PF_P))
935 {
936 if (uErr & X86_TRAP_PF_RW)
937 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
938 else
939 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
940 }
941 else if (uErr & X86_TRAP_PF_RW)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
943 else if (uErr & X86_TRAP_PF_ID)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
945 else if (uErr & X86_TRAP_PF_RSVD)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
947 }
948# endif /* VBOX_WITH_STATISTICS */
949
950 /*
951 * Call the worker.
952 */
953 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
954 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
955 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
956 bool fLockTaken = false;
957 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
958 if (fLockTaken)
959 {
960 PGM_LOCK_ASSERT_OWNER(pVM);
961 PGM_UNLOCK(pVM);
962 }
963 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
964
965 /*
966 * Return code tweaks.
967 */
968 if (rc != VINF_SUCCESS)
969 {
970 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
971 rc = VINF_SUCCESS;
972
973 /* Note: hack alert for difficult to reproduce problem. */
974 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
975 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
976 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
977 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
978 {
979 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
980 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
981 rc = VINF_SUCCESS;
982 }
983 }
984
985 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
986 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
987 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
988 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
989 return rc;
990}
991#endif /* IN_RING0 */
992
993
994/**
995 * Prefetch a page
996 *
997 * Typically used to sync commonly used pages before entering raw mode
998 * after a CR3 reload.
999 *
1000 * @returns VBox status code suitable for scheduling.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1003 * @param pVCpu The cross context virtual CPU structure.
1004 * @param GCPtrPage Page to invalidate.
1005 */
1006VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1007{
1008 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1009
1010 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1011 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1012 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1013 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1014
1015 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1016 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1017 return rc;
1018}
1019
1020
1021/**
1022 * Emulation of the invlpg instruction (HC only actually).
1023 *
1024 * @returns Strict VBox status code, special care required.
1025 * @retval VINF_PGM_SYNC_CR3 - handled.
1026 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1027 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1028 *
1029 * @param pVCpu The cross context virtual CPU structure.
1030 * @param GCPtrPage Page to invalidate.
1031 *
1032 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1033 * safe, but there could be edge cases!
1034 *
1035 * @todo Flush page or page directory only if necessary!
1036 * @todo VBOXSTRICTRC
1037 */
1038VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1039{
1040 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1041 int rc;
1042 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1043
1044 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1045
1046 /*
1047 * Call paging mode specific worker.
1048 */
1049 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1050 PGM_LOCK_VOID(pVM);
1051
1052 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1053 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1054 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1055 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1056
1057 PGM_UNLOCK(pVM);
1058 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1059
1060 /* Ignore all irrelevant error codes. */
1061 if ( rc == VERR_PAGE_NOT_PRESENT
1062 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1063 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1064 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1065 rc = VINF_SUCCESS;
1066
1067 return rc;
1068}
1069
1070
1071/**
1072 * Executes an instruction using the interpreter.
1073 *
1074 * @returns VBox status code (appropriate for trap handling and GC return).
1075 * @param pVM The cross context VM structure.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param pRegFrame Register frame.
1078 * @param pvFault Fault address.
1079 */
1080VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1081{
1082 NOREF(pVM);
1083 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1084 if (rc == VERR_EM_INTERPRETER)
1085 rc = VINF_EM_RAW_EMULATE_INSTR;
1086 if (rc != VINF_SUCCESS)
1087 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1088 return rc;
1089}
1090
1091
1092/**
1093 * Gets effective page information (from the VMM page directory).
1094 *
1095 * @returns VBox status code.
1096 * @param pVCpu The cross context virtual CPU structure.
1097 * @param GCPtr Guest Context virtual address of the page.
1098 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1099 * @param pHCPhys Where to store the HC physical address of the page.
1100 * This is page aligned.
1101 * @remark You should use PGMMapGetPage() for pages in a mapping.
1102 */
1103VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1104{
1105 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1106 PGM_LOCK_VOID(pVM);
1107
1108 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1109 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1110 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1111 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1112
1113 PGM_UNLOCK(pVM);
1114 return rc;
1115}
1116
1117
1118/**
1119 * Modify page flags for a range of pages in the shadow context.
1120 *
1121 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1122 *
1123 * @returns VBox status code.
1124 * @param pVCpu The cross context virtual CPU structure.
1125 * @param GCPtr Virtual address of the first page in the range.
1126 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1127 * @param fMask The AND mask - page flags X86_PTE_*.
1128 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1129 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1130 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1131 */
1132DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1133{
1134 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1135 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1136
1137 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1138
1139 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1140 PGM_LOCK_VOID(pVM);
1141
1142 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1143 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1144 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1145 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1146
1147 PGM_UNLOCK(pVM);
1148 return rc;
1149}
1150
1151
1152/**
1153 * Changing the page flags for a single page in the shadow page tables so as to
1154 * make it read-only.
1155 *
1156 * @returns VBox status code.
1157 * @param pVCpu The cross context virtual CPU structure.
1158 * @param GCPtr Virtual address of the first page in the range.
1159 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1160 */
1161VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1162{
1163 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1164}
1165
1166
1167/**
1168 * Changing the page flags for a single page in the shadow page tables so as to
1169 * make it writable.
1170 *
1171 * The call must know with 101% certainty that the guest page tables maps this
1172 * as writable too. This function will deal shared, zero and write monitored
1173 * pages.
1174 *
1175 * @returns VBox status code.
1176 * @param pVCpu The cross context virtual CPU structure.
1177 * @param GCPtr Virtual address of the first page in the range.
1178 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1179 */
1180VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1181{
1182 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1183 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1184 return VINF_SUCCESS;
1185}
1186
1187
1188/**
1189 * Changing the page flags for a single page in the shadow page tables so as to
1190 * make it not present.
1191 *
1192 * @returns VBox status code.
1193 * @param pVCpu The cross context virtual CPU structure.
1194 * @param GCPtr Virtual address of the first page in the range.
1195 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1196 */
1197VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1198{
1199 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1200}
1201
1202
1203/**
1204 * Changing the page flags for a single page in the shadow page tables so as to
1205 * make it supervisor and writable.
1206 *
1207 * This if for dealing with CR0.WP=0 and readonly user pages.
1208 *
1209 * @returns VBox status code.
1210 * @param pVCpu The cross context virtual CPU structure.
1211 * @param GCPtr Virtual address of the first page in the range.
1212 * @param fBigPage Whether or not this is a big page. If it is, we have to
1213 * change the shadow PDE as well. If it isn't, the caller
1214 * has checked that the shadow PDE doesn't need changing.
1215 * We ASSUME 4KB pages backing the big page here!
1216 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1217 */
1218int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1219{
1220 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1221 if (rc == VINF_SUCCESS && fBigPage)
1222 {
1223 /* this is a bit ugly... */
1224 switch (pVCpu->pgm.s.enmShadowMode)
1225 {
1226 case PGMMODE_32_BIT:
1227 {
1228 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1229 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1230 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1231 pPde->u |= X86_PDE_RW;
1232 Log(("-> PDE=%#llx (32)\n", pPde->u));
1233 break;
1234 }
1235 case PGMMODE_PAE:
1236 case PGMMODE_PAE_NX:
1237 {
1238 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1239 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1240 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1241 pPde->u |= X86_PDE_RW;
1242 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1243 break;
1244 }
1245 default:
1246 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1247 }
1248 }
1249 return rc;
1250}
1251
1252
1253/**
1254 * Gets the shadow page directory for the specified address, PAE.
1255 *
1256 * @returns Pointer to the shadow PD.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param GCPtr The address.
1259 * @param uGstPdpe Guest PDPT entry. Valid.
1260 * @param ppPD Receives address of page directory
1261 */
1262int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1263{
1264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1265 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1266 PPGMPOOLPAGE pShwPage;
1267 int rc;
1268 PGM_LOCK_ASSERT_OWNER(pVM);
1269
1270
1271 /* Allocate page directory if not present. */
1272 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1273 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1274 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1275 X86PGPAEUINT const uPdpe = pPdpe->u;
1276 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1277 {
1278 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1279 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1280 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1281
1282 pgmPoolCacheUsed(pPool, pShwPage);
1283
1284 /* Update the entry if necessary. */
1285 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1286 if (uPdpeNew == uPdpe)
1287 { /* likely */ }
1288 else
1289 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1290 }
1291 else
1292 {
1293 RTGCPTR64 GCPdPt;
1294 PGMPOOLKIND enmKind;
1295 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1296 {
1297 /* AMD-V nested paging or real/protected mode without paging. */
1298 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1299 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1300 }
1301 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1302 {
1303 if (uGstPdpe & X86_PDPE_P)
1304 {
1305 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1306 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1307 }
1308 else
1309 {
1310 /* PD not present; guest must reload CR3 to change it.
1311 * No need to monitor anything in this case. */
1312 /** @todo r=bird: WTF is hit?!? */
1313 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1314 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1315 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1316 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1317 }
1318 }
1319 else
1320 {
1321 GCPdPt = CPUMGetGuestCR3(pVCpu);
1322 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1323 }
1324
1325 /* Create a reference back to the PDPT by using the index in its shadow page. */
1326 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1327 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1328 &pShwPage);
1329 AssertRCReturn(rc, rc);
1330
1331 /* Hook it up. */
1332 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1333 }
1334 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1335
1336 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1337 return VINF_SUCCESS;
1338}
1339
1340
1341/**
1342 * Gets the pointer to the shadow page directory entry for an address, PAE.
1343 *
1344 * @returns Pointer to the PDE.
1345 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1346 * @param GCPtr The address.
1347 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1348 */
1349DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1350{
1351 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1352 PGM_LOCK_ASSERT_OWNER(pVM);
1353
1354 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1355 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1356 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1357 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1358 if (!(uPdpe & X86_PDPE_P))
1359 {
1360 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1361 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1362 }
1363 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1364
1365 /* Fetch the pgm pool shadow descriptor. */
1366 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1367 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1368
1369 *ppShwPde = pShwPde;
1370 return VINF_SUCCESS;
1371}
1372
1373
1374/**
1375 * Syncs the SHADOW page directory pointer for the specified address.
1376 *
1377 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1378 *
1379 * The caller is responsible for making sure the guest has a valid PD before
1380 * calling this function.
1381 *
1382 * @returns VBox status code.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param GCPtr The address.
1385 * @param uGstPml4e Guest PML4 entry (valid).
1386 * @param uGstPdpe Guest PDPT entry (valid).
1387 * @param ppPD Receives address of page directory
1388 */
1389static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1390{
1391 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1392 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1393 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1394 int rc;
1395
1396 PGM_LOCK_ASSERT_OWNER(pVM);
1397
1398 /*
1399 * PML4.
1400 */
1401 PPGMPOOLPAGE pShwPage;
1402 {
1403 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1404 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1405 X86PGPAEUINT const uPml4e = pPml4e->u;
1406
1407 /* Allocate page directory pointer table if not present. */
1408 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1409 {
1410 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1411 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1412
1413 pgmPoolCacheUsed(pPool, pShwPage);
1414
1415 /* Update the entry if needed. */
1416 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1417 | (uPml4e & PGM_PML4_FLAGS);
1418 if (uPml4e == uPml4eNew)
1419 { /* likely */ }
1420 else
1421 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1422 }
1423 else
1424 {
1425 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1426
1427 RTGCPTR64 GCPml4;
1428 PGMPOOLKIND enmKind;
1429 if (fNestedPagingOrNoGstPaging)
1430 {
1431 /* AMD-V nested paging or real/protected mode without paging */
1432 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1433 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1434 }
1435 else
1436 {
1437 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1438 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1439 }
1440
1441 /* Create a reference back to the PDPT by using the index in its shadow page. */
1442 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1443 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1444 &pShwPage);
1445 AssertRCReturn(rc, rc);
1446
1447 /* Hook it up. */
1448 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1449 | (uPml4e & PGM_PML4_FLAGS));
1450 }
1451 }
1452
1453 /*
1454 * PDPT.
1455 */
1456 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1457 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1458 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1459 X86PGPAEUINT const uPdpe = pPdpe->u;
1460
1461 /* Allocate page directory if not present. */
1462 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1463 {
1464 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1465 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1466
1467 pgmPoolCacheUsed(pPool, pShwPage);
1468
1469 /* Update the entry if needed. */
1470 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1471 | (uPdpe & PGM_PDPT_FLAGS);
1472 if (uPdpe == uPdpeNew)
1473 { /* likely */ }
1474 else
1475 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1476 }
1477 else
1478 {
1479 RTGCPTR64 GCPdPt;
1480 PGMPOOLKIND enmKind;
1481 if (fNestedPagingOrNoGstPaging)
1482 {
1483 /* AMD-V nested paging or real/protected mode without paging */
1484 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1485 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1486 }
1487 else
1488 {
1489 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1490 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1491 }
1492
1493 /* Create a reference back to the PDPT by using the index in its shadow page. */
1494 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1495 pShwPage->idx, iPdPt, false /*fLockPage*/,
1496 &pShwPage);
1497 AssertRCReturn(rc, rc);
1498
1499 /* Hook it up. */
1500 ASMAtomicWriteU64(&pPdpe->u,
1501 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1502 }
1503
1504 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1505 return VINF_SUCCESS;
1506}
1507
1508
1509/**
1510 * Gets the SHADOW page directory pointer for the specified address (long mode).
1511 *
1512 * @returns VBox status code.
1513 * @param pVCpu The cross context virtual CPU structure.
1514 * @param GCPtr The address.
1515 * @param ppPml4e Receives the address of the page map level 4 entry.
1516 * @param ppPdpt Receives the address of the page directory pointer table.
1517 * @param ppPD Receives the address of the page directory.
1518 */
1519DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1520{
1521 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1522 PGM_LOCK_ASSERT_OWNER(pVM);
1523
1524 /*
1525 * PML4
1526 */
1527 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1528 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1529 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1530 if (ppPml4e)
1531 *ppPml4e = (PX86PML4E)pPml4e;
1532 X86PGPAEUINT const uPml4e = pPml4e->u;
1533 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1534 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1535 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1536
1537 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1538 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1539 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1540
1541 /*
1542 * PDPT
1543 */
1544 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1545 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1546 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1547 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1548 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1549
1550 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1551 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1552
1553 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1554 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1555 return VINF_SUCCESS;
1556}
1557
1558
1559/**
1560 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1561 * backing pages in case the PDPT or PML4 entry is missing.
1562 *
1563 * @returns VBox status code.
1564 * @param pVCpu The cross context virtual CPU structure.
1565 * @param GCPtr The address.
1566 * @param ppPdpt Receives address of pdpt
1567 * @param ppPD Receives address of page directory
1568 */
1569static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1570{
1571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1572 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1573 int rc;
1574
1575 Assert(pVM->pgm.s.fNestedPaging);
1576 PGM_LOCK_ASSERT_OWNER(pVM);
1577
1578 /*
1579 * PML4 level.
1580 */
1581 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1582 Assert(pPml4);
1583
1584 /* Allocate page directory pointer table if not present. */
1585 PPGMPOOLPAGE pShwPage;
1586 {
1587 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1588 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1589 EPTPML4E Pml4e;
1590 Pml4e.u = pPml4e->u;
1591 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1592 {
1593 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1594 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1595 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1596 &pShwPage);
1597 AssertRCReturn(rc, rc);
1598
1599 /* Hook up the new PDPT now. */
1600 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1601 }
1602 else
1603 {
1604 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1605 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1606
1607 pgmPoolCacheUsed(pPool, pShwPage);
1608
1609 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1610 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1611 { }
1612 else
1613 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1614 }
1615 }
1616
1617 /*
1618 * PDPT level.
1619 */
1620 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1621 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1622 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1623
1624 if (ppPdpt)
1625 *ppPdpt = pPdpt;
1626
1627 /* Allocate page directory if not present. */
1628 EPTPDPTE Pdpe;
1629 Pdpe.u = pPdpe->u;
1630 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1631 {
1632 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1633 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1634 pShwPage->idx, iPdPt, false /*fLockPage*/,
1635 &pShwPage);
1636 AssertRCReturn(rc, rc);
1637
1638 /* Hook up the new PD now. */
1639 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1640 }
1641 else
1642 {
1643 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1644 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1645
1646 pgmPoolCacheUsed(pPool, pShwPage);
1647
1648 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1649 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1650 { }
1651 else
1652 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1653 }
1654
1655 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1656 return VINF_SUCCESS;
1657}
1658
1659
1660#ifdef IN_RING0
1661/**
1662 * Synchronizes a range of nested page table entries.
1663 *
1664 * The caller must own the PGM lock.
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1667 * @param GCPhys Where to start.
1668 * @param cPages How many pages which entries should be synced.
1669 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1670 * host paging mode for AMD-V).
1671 */
1672int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1673{
1674 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1675
1676/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1677 int rc;
1678 switch (enmShwPagingMode)
1679 {
1680 case PGMMODE_32_BIT:
1681 {
1682 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1683 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1684 break;
1685 }
1686
1687 case PGMMODE_PAE:
1688 case PGMMODE_PAE_NX:
1689 {
1690 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1691 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1692 break;
1693 }
1694
1695 case PGMMODE_AMD64:
1696 case PGMMODE_AMD64_NX:
1697 {
1698 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1699 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1700 break;
1701 }
1702
1703 case PGMMODE_EPT:
1704 {
1705 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1706 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1707 break;
1708 }
1709
1710 default:
1711 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1712 }
1713 return rc;
1714}
1715#endif /* IN_RING0 */
1716
1717
1718/**
1719 * Gets effective Guest OS page information.
1720 *
1721 * When GCPtr is in a big page, the function will return as if it was a normal
1722 * 4KB page. If the need for distinguishing between big and normal page becomes
1723 * necessary at a later point, a PGMGstGetPage() will be created for that
1724 * purpose.
1725 *
1726 * @returns VBox status code.
1727 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1728 * @param GCPtr Guest Context virtual address of the page.
1729 * @param pWalk Where to store the page walk information.
1730 */
1731VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1732{
1733 VMCPU_ASSERT_EMT(pVCpu);
1734 Assert(pWalk);
1735 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1736 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1737 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1738 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1739}
1740
1741
1742/**
1743 * Maps the guest CR3.
1744 *
1745 * @returns VBox status code.
1746 * @param pVCpu The cross context virtual CPU structure.
1747 * @param GCPhysCr3 The guest CR3 value.
1748 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1749 */
1750DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1751{
1752 /** @todo this needs some reworking wrt. locking? */
1753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1754 PGM_LOCK_VOID(pVM);
1755 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1756 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1757
1758 RTHCPTR HCPtrGuestCr3;
1759 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1760 PGM_UNLOCK(pVM);
1761
1762 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1763 return rc;
1764}
1765
1766
1767/**
1768 * Unmaps the guest CR3.
1769 *
1770 * @returns VBox status code.
1771 * @param pVCpu The cross context virtual CPU structure.
1772 */
1773DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1774{
1775 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1776 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1777 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
1778 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1779}
1780
1781
1782/**
1783 * Performs a guest page table walk.
1784 *
1785 * The guest should be in paged protect mode or long mode when making a call to
1786 * this function.
1787 *
1788 * @returns VBox status code.
1789 * @retval VINF_SUCCESS on success.
1790 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1791 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1792 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1793 *
1794 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1795 * @param GCPtr The guest virtual address to walk by.
1796 * @param pWalk Where to return the walk result. This is valid for some
1797 * error codes as well.
1798 * @param pGstWalk The guest mode specific page walk information.
1799 */
1800int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1801{
1802 VMCPU_ASSERT_EMT(pVCpu);
1803 switch (pVCpu->pgm.s.enmGuestMode)
1804 {
1805 case PGMMODE_32_BIT:
1806 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1807 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1808
1809 case PGMMODE_PAE:
1810 case PGMMODE_PAE_NX:
1811 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1812 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1813
1814 case PGMMODE_AMD64:
1815 case PGMMODE_AMD64_NX:
1816 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1817 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1818
1819 case PGMMODE_REAL:
1820 case PGMMODE_PROTECTED:
1821 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1822 return VERR_PGM_NOT_USED_IN_MODE;
1823
1824 case PGMMODE_EPT:
1825 case PGMMODE_NESTED_32BIT:
1826 case PGMMODE_NESTED_PAE:
1827 case PGMMODE_NESTED_AMD64:
1828 default:
1829 AssertFailed();
1830 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1831 return VERR_PGM_NOT_USED_IN_MODE;
1832 }
1833}
1834
1835
1836#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1837/**
1838 * Performs a guest second-level address translation (SLAT).
1839 *
1840 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1841 * function.
1842 *
1843 * @returns VBox status code.
1844 * @retval VINF_SUCCESS on success.
1845 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1846 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1847 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1848 *
1849 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1850 * @param GCPhysNested The nested-guest physical address being translated
1851 * (input).
1852 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1853 * valid. This indicates the SLAT is caused when
1854 * translating a nested-guest linear address.
1855 * @param GCPtrNested The nested-guest virtual address that initiated the
1856 * SLAT. If none, pass NIL_RTGCPTR.
1857 * @param pWalk Where to return the walk result. This is valid for
1858 * some error codes as well.
1859 * @param pGstWalk The second-level paging-mode specific walk
1860 * information.
1861 */
1862static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1863 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1864{
1865 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1866 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1867 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1868 switch (pVCpu->pgm.s.enmGuestSlatMode)
1869 {
1870 case PGMSLAT_EPT:
1871 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1872 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1873
1874 default:
1875 AssertFailed();
1876 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1877 return VERR_PGM_NOT_USED_IN_MODE;
1878 }
1879}
1880
1881
1882/**
1883 * Performs a guest second-level address translation (SLAT) for a nested-guest
1884 * physical address.
1885 *
1886 * This version requires the SLAT mode to be provided by the caller because we could
1887 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1888 * register values.
1889 *
1890 * @returns VBox status code.
1891 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1892 * @param enmSlatMode The second-level paging mode to use.
1893 * @param GCPhysNested The nested-guest physical address to translate.
1894 * @param pWalk Where to store the walk result.
1895 * @param pGstWalk Where to store the second-level paging-mode specific
1896 * walk information.
1897 */
1898static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1899 PPGMPTWALKGST pGstWalk)
1900{
1901 AssertPtr(pWalk);
1902 AssertPtr(pGstWalk);
1903 switch (enmSlatMode)
1904 {
1905 case PGMSLAT_EPT:
1906 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1907 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1908 &pGstWalk->u.Ept);
1909
1910 default:
1911 AssertFailed();
1912 return VERR_PGM_NOT_USED_IN_MODE;
1913 }
1914}
1915#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1916
1917
1918/**
1919 * Tries to continue the previous walk.
1920 *
1921 * @note Requires the caller to hold the PGM lock from the first
1922 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1923 * we cannot use the pointers.
1924 *
1925 * @returns VBox status code.
1926 * @retval VINF_SUCCESS on success.
1927 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1928 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1929 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1930 *
1931 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1932 * @param GCPtr The guest virtual address to walk by.
1933 * @param pWalk Pointer to the previous walk result and where to return
1934 * the result of this walk. This is valid for some error
1935 * codes as well.
1936 * @param pGstWalk The guest-mode specific walk information.
1937 */
1938int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1939{
1940 /*
1941 * We can only handle successfully walks.
1942 * We also limit ourselves to the next page.
1943 */
1944 if ( pWalk->fSucceeded
1945 && GCPtr - pWalk->GCPtr == PAGE_SIZE)
1946 {
1947 Assert(pWalk->uLevel == 0);
1948 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1949 {
1950 /*
1951 * AMD64
1952 */
1953 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1954 {
1955 /*
1956 * We fall back to full walk if the PDE table changes, if any
1957 * reserved bits are set, or if the effective page access changes.
1958 */
1959 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1960 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1961 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1962 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1963
1964 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1965 {
1966 if (pGstWalk->u.Amd64.pPte)
1967 {
1968 X86PTEPAE Pte;
1969 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1970 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1971 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1972 {
1973 pWalk->GCPtr = GCPtr;
1974 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1975 pGstWalk->u.Amd64.Pte.u = Pte.u;
1976 pGstWalk->u.Amd64.pPte++;
1977 return VINF_SUCCESS;
1978 }
1979 }
1980 }
1981 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1982 {
1983 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1984 if (pGstWalk->u.Amd64.pPde)
1985 {
1986 X86PDEPAE Pde;
1987 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1988 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1989 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1990 {
1991 /* Get the new PTE and check out the first entry. */
1992 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1993 &pGstWalk->u.Amd64.pPt);
1994 if (RT_SUCCESS(rc))
1995 {
1996 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1997 X86PTEPAE Pte;
1998 Pte.u = pGstWalk->u.Amd64.pPte->u;
1999 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2000 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2001 {
2002 pWalk->GCPtr = GCPtr;
2003 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2004 pGstWalk->u.Amd64.Pte.u = Pte.u;
2005 pGstWalk->u.Amd64.Pde.u = Pde.u;
2006 pGstWalk->u.Amd64.pPde++;
2007 return VINF_SUCCESS;
2008 }
2009 }
2010 }
2011 }
2012 }
2013 }
2014 else if (!pWalk->fGigantPage)
2015 {
2016 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2017 {
2018 pWalk->GCPtr = GCPtr;
2019 pWalk->GCPhys += PAGE_SIZE;
2020 return VINF_SUCCESS;
2021 }
2022 }
2023 else
2024 {
2025 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2026 {
2027 pWalk->GCPtr = GCPtr;
2028 pWalk->GCPhys += PAGE_SIZE;
2029 return VINF_SUCCESS;
2030 }
2031 }
2032 }
2033 }
2034 /* Case we don't handle. Do full walk. */
2035 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2036}
2037
2038
2039/**
2040 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2041 *
2042 * @returns VBox status code.
2043 * @param pVCpu The cross context virtual CPU structure.
2044 * @param GCPtr The address of the first page.
2045 * @param cb The size of the range in bytes.
2046 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2047 */
2048VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2049{
2050 VMCPU_ASSERT_EMT(pVCpu);
2051 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2052}
2053
2054
2055/**
2056 * Modify page flags for a range of pages in the guest's tables
2057 *
2058 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2059 *
2060 * @returns VBox status code.
2061 * @param pVCpu The cross context virtual CPU structure.
2062 * @param GCPtr Virtual address of the first page in the range.
2063 * @param cb Size (in bytes) of the range to apply the modification to.
2064 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2065 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2066 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2067 */
2068VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2069{
2070 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2071 VMCPU_ASSERT_EMT(pVCpu);
2072
2073 /*
2074 * Validate input.
2075 */
2076 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2077 Assert(cb);
2078
2079 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2080
2081 /*
2082 * Adjust input.
2083 */
2084 cb += GCPtr & PAGE_OFFSET_MASK;
2085 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2086 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2087
2088 /*
2089 * Call worker.
2090 */
2091 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2092 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2093 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2094 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2095
2096 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2097 return rc;
2098}
2099
2100
2101/**
2102 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2103 *
2104 * @returns @c true if the PDPE is valid, @c false otherwise.
2105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2106 * @param paPaePdpes The PAE PDPEs to validate.
2107 *
2108 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2109 */
2110VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2111{
2112 Assert(paPaePdpes);
2113 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2114 {
2115 X86PDPE const PaePdpe = paPaePdpes[i];
2116 if ( !(PaePdpe.u & X86_PDPE_P)
2117 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2118 { /* likely */ }
2119 else
2120 return false;
2121 }
2122 return true;
2123}
2124
2125
2126/**
2127 * Performs the lazy mapping of the 32-bit guest PD.
2128 *
2129 * @returns VBox status code.
2130 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2131 * @param ppPd Where to return the pointer to the mapping. This is
2132 * always set.
2133 */
2134int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2135{
2136 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2137 PGM_LOCK_VOID(pVM);
2138
2139 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2140
2141 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2142 PPGMPAGE pPage;
2143 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2144 if (RT_SUCCESS(rc))
2145 {
2146 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2147 if (RT_SUCCESS(rc))
2148 {
2149# ifdef IN_RING3
2150 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2151 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2152# else
2153 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2154 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2155# endif
2156 PGM_UNLOCK(pVM);
2157 return VINF_SUCCESS;
2158 }
2159 AssertRC(rc);
2160 }
2161 PGM_UNLOCK(pVM);
2162
2163 *ppPd = NULL;
2164 return rc;
2165}
2166
2167
2168/**
2169 * Performs the lazy mapping of the PAE guest PDPT.
2170 *
2171 * @returns VBox status code.
2172 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2173 * @param ppPdpt Where to return the pointer to the mapping. This is
2174 * always set.
2175 */
2176int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2177{
2178 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2179 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2180 PGM_LOCK_VOID(pVM);
2181
2182 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2183 PPGMPAGE pPage;
2184 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2185 * guest-physical address here. */
2186 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2187 if (RT_SUCCESS(rc))
2188 {
2189 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2190 if (RT_SUCCESS(rc))
2191 {
2192# ifdef IN_RING3
2193 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2194 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2195# else
2196 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2197 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2198# endif
2199 PGM_UNLOCK(pVM);
2200 return VINF_SUCCESS;
2201 }
2202 AssertRC(rc);
2203 }
2204
2205 PGM_UNLOCK(pVM);
2206 *ppPdpt = NULL;
2207 return rc;
2208}
2209
2210
2211/**
2212 * Performs the lazy mapping / updating of a PAE guest PD.
2213 *
2214 * @returns Pointer to the mapping.
2215 * @returns VBox status code.
2216 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2217 * @param iPdpt Which PD entry to map (0..3).
2218 * @param ppPd Where to return the pointer to the mapping. This is
2219 * always set.
2220 */
2221int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2222{
2223 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2224 PGM_LOCK_VOID(pVM);
2225
2226 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2227 Assert(pGuestPDPT);
2228 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2229 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2230 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2231
2232 PPGMPAGE pPage;
2233 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2234 if (RT_SUCCESS(rc))
2235 {
2236 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2237 AssertRC(rc);
2238 if (RT_SUCCESS(rc))
2239 {
2240# ifdef IN_RING3
2241 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2242 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2243# else
2244 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2245 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2246# endif
2247 if (fChanged)
2248 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2249 PGM_UNLOCK(pVM);
2250 return VINF_SUCCESS;
2251 }
2252 }
2253
2254 /* Invalid page or some failure, invalidate the entry. */
2255 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2256 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2257 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2258
2259 PGM_UNLOCK(pVM);
2260 return rc;
2261}
2262
2263
2264/**
2265 * Performs the lazy mapping of the 32-bit guest PD.
2266 *
2267 * @returns VBox status code.
2268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2269 * @param ppPml4 Where to return the pointer to the mapping. This will
2270 * always be set.
2271 */
2272int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2273{
2274 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2275 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2276 PGM_LOCK_VOID(pVM);
2277
2278 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2279 PPGMPAGE pPage;
2280 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2281 * guest-physical address here. */
2282 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2283 if (RT_SUCCESS(rc))
2284 {
2285 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2286 if (RT_SUCCESS(rc))
2287 {
2288# ifdef IN_RING3
2289 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2290 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2291# else
2292 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2293 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2294# endif
2295 PGM_UNLOCK(pVM);
2296 return VINF_SUCCESS;
2297 }
2298 }
2299
2300 PGM_UNLOCK(pVM);
2301 *ppPml4 = NULL;
2302 return rc;
2303}
2304
2305
2306#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2307 /**
2308 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2309 *
2310 * @returns VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2312 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2313 * always be set.
2314 */
2315int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2316{
2317 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2318 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2319 PGM_LOCK_VOID(pVM);
2320
2321 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2322 PPGMPAGE pPage;
2323 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2324 if (RT_SUCCESS(rc))
2325 {
2326 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2327 if (RT_SUCCESS(rc))
2328 {
2329# ifdef IN_RING3
2330 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2331 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2332# else
2333 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2334 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2335# endif
2336 PGM_UNLOCK(pVM);
2337 return VINF_SUCCESS;
2338 }
2339 }
2340
2341 PGM_UNLOCK(pVM);
2342 *ppEptPml4 = NULL;
2343 return rc;
2344}
2345#endif
2346
2347
2348/**
2349 * Gets the current CR3 register value for the shadow memory context.
2350 * @returns CR3 value.
2351 * @param pVCpu The cross context virtual CPU structure.
2352 */
2353VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2354{
2355 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2356 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2357 return pPoolPage->Core.Key;
2358}
2359
2360
2361/**
2362 * Forces lazy remapping of the guest's PAE page-directory structures.
2363 *
2364 * @param pVCpu The cross context virtual CPU structure.
2365 */
2366static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2367{
2368 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2369 {
2370 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2371 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2372 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2373 }
2374}
2375
2376
2377/**
2378 * Gets the CR3 mask corresponding to the given paging mode.
2379 *
2380 * @returns The CR3 mask.
2381 * @param enmMode The paging mode.
2382 */
2383DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
2384{
2385 /** @todo This work can be optimized either by storing the masks in
2386 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
2387 * store the result when entering guest mode since we currently use it only
2388 * for enmGuestMode. */
2389 switch (enmMode)
2390 {
2391 case PGMMODE_PAE:
2392 case PGMMODE_PAE_NX:
2393 return X86_CR3_PAE_PAGE_MASK;
2394 case PGMMODE_AMD64:
2395 case PGMMODE_AMD64_NX:
2396 return X86_CR3_AMD64_PAGE_MASK;
2397 case PGMMODE_EPT:
2398 return X86_CR3_EPT_PAGE_MASK;
2399 default:
2400 return X86_CR3_PAGE_MASK;
2401 }
2402}
2403
2404
2405/**
2406 * Gets the masked CR3 value according to the current guest paging mode.
2407 *
2408 * @returns The masked PGM CR3 value.
2409 * @param pVCpu The cross context virtual CPU structure.
2410 * @param uCr3 The raw guest CR3 value.
2411 */
2412DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2413{
2414 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
2415 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
2416 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2417 return GCPhysCR3;
2418}
2419
2420
2421#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2422/**
2423 * Performs second-level address translation for the given CR3 and updates the
2424 * nested-guest CR3 when successful.
2425 *
2426 * @returns VBox status code.
2427 * @param pVCpu The cross context virtual CPU structure.
2428 * @param uCr3 The masked nested-guest CR3 value.
2429 * @param pGCPhysCR3 Where to store the translated CR3.
2430 *
2431 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2432 * mindful of this in code that's hyper sensitive to the order of
2433 * operations.
2434 */
2435static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2436{
2437 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2438 {
2439 PGMPTWALK Walk;
2440 PGMPTWALKGST GstWalk;
2441 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2442 if (RT_SUCCESS(rc))
2443 {
2444 /* Update nested-guest CR3. */
2445 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2446
2447 /* Pass back the translated result. */
2448 *pGCPhysCr3 = Walk.GCPhys;
2449 return VINF_SUCCESS;
2450 }
2451
2452 /* Translation failed. */
2453 *pGCPhysCr3 = NIL_RTGCPHYS;
2454 return rc;
2455 }
2456
2457 /*
2458 * If the nested-guest CR3 has not changed, then the previously
2459 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2460 */
2461 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2462 return VINF_SUCCESS;
2463}
2464#endif
2465
2466
2467/**
2468 * Performs and schedules necessary updates following a CR3 load or reload.
2469 *
2470 * This will normally involve mapping the guest PD or nPDPT
2471 *
2472 * @returns VBox status code.
2473 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2474 * safely be ignored and overridden since the FF will be set too then.
2475 * @param pVCpu The cross context virtual CPU structure.
2476 * @param cr3 The new cr3.
2477 * @param fGlobal Indicates whether this is a global flush or not.
2478 */
2479VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2480{
2481 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2482 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2483
2484 VMCPU_ASSERT_EMT(pVCpu);
2485
2486 /*
2487 * Always flag the necessary updates; necessary for hardware acceleration
2488 */
2489 /** @todo optimize this, it shouldn't always be necessary. */
2490 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2491 if (fGlobal)
2492 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2493
2494 /*
2495 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2496 */
2497 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2498 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2499#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2500 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2501 {
2502 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
2503 RTGCPHYS GCPhysOut;
2504 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2505 if (RT_SUCCESS(rc))
2506 GCPhysCR3 = GCPhysOut;
2507 else
2508 {
2509 /* CR3 SLAT translation failed but we try to pretend it
2510 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2511 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2512 int const rc2 = pgmGstUnmapCr3(pVCpu);
2513 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2514 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2515 return rc2;
2516 }
2517 }
2518#endif
2519
2520 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2521 int rc = VINF_SUCCESS;
2522 if (GCPhysOldCR3 != GCPhysCR3)
2523 {
2524 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2525 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2526 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2527
2528 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2529 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2530 if (RT_LIKELY(rc == VINF_SUCCESS))
2531 { }
2532 else
2533 {
2534 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2535 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2536 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2537 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2538 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2539 }
2540
2541 if (fGlobal)
2542 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2543 else
2544 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2545 }
2546 else
2547 {
2548#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2549 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2550 if (pPool->cDirtyPages)
2551 {
2552 PGM_LOCK_VOID(pVM);
2553 pgmPoolResetDirtyPages(pVM);
2554 PGM_UNLOCK(pVM);
2555 }
2556#endif
2557 if (fGlobal)
2558 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2559 else
2560 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2561
2562 /*
2563 * Flush PAE PDPTEs.
2564 */
2565 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2566 pgmGstFlushPaePdpes(pVCpu);
2567 }
2568
2569 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2570 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2571 return rc;
2572}
2573
2574
2575/**
2576 * Performs and schedules necessary updates following a CR3 load or reload when
2577 * using nested or extended paging.
2578 *
2579 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2580 * TLB and triggering a SyncCR3.
2581 *
2582 * This will normally involve mapping the guest PD or nPDPT
2583 *
2584 * @returns VBox status code.
2585 * @retval VINF_SUCCESS.
2586 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2587 * paging modes). This can safely be ignored and overridden since the
2588 * FF will be set too then.
2589 * @param pVCpu The cross context virtual CPU structure.
2590 * @param cr3 The new CR3.
2591 */
2592VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2593{
2594 VMCPU_ASSERT_EMT(pVCpu);
2595
2596 /* We assume we're only called in nested paging mode. */
2597 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2598
2599 /*
2600 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2601 */
2602 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2603 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2604#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2605 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2606 {
2607 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
2608 RTGCPHYS GCPhysOut;
2609 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2610 if (RT_SUCCESS(rc))
2611 GCPhysCR3 = GCPhysOut;
2612 else
2613 {
2614 /* CR3 SLAT translation failed but we try to pretend it
2615 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2616 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2617 int const rc2 = pgmGstUnmapCr3(pVCpu);
2618 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2619 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2620 return rc2;
2621 }
2622 }
2623#endif
2624
2625 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2626 int rc = VINF_SUCCESS;
2627 if (GCPhysOldCR3 != GCPhysCR3)
2628 {
2629 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2630 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2631 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2632
2633 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2634 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2635
2636 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2637 }
2638 /*
2639 * Flush PAE PDPTEs.
2640 */
2641 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2642 pgmGstFlushPaePdpes(pVCpu);
2643
2644 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2645 return rc;
2646}
2647
2648
2649/**
2650 * Synchronize the paging structures.
2651 *
2652 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2653 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2654 * in several places, most importantly whenever the CR3 is loaded.
2655 *
2656 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2657 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2658 * the VMM into guest context.
2659 * @param pVCpu The cross context virtual CPU structure.
2660 * @param cr0 Guest context CR0 register
2661 * @param cr3 Guest context CR3 register
2662 * @param cr4 Guest context CR4 register
2663 * @param fGlobal Including global page directories or not
2664 */
2665VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2666{
2667 int rc;
2668
2669 VMCPU_ASSERT_EMT(pVCpu);
2670
2671 /*
2672 * The pool may have pending stuff and even require a return to ring-3 to
2673 * clear the whole thing.
2674 */
2675 rc = pgmPoolSyncCR3(pVCpu);
2676 if (rc != VINF_SUCCESS)
2677 return rc;
2678
2679 /*
2680 * We might be called when we shouldn't.
2681 *
2682 * The mode switching will ensure that the PD is resynced after every mode
2683 * switch. So, if we find ourselves here when in protected or real mode
2684 * we can safely clear the FF and return immediately.
2685 */
2686 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2687 {
2688 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2689 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2690 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2691 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2692 return VINF_SUCCESS;
2693 }
2694
2695 /* If global pages are not supported, then all flushes are global. */
2696 if (!(cr4 & X86_CR4_PGE))
2697 fGlobal = true;
2698 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2699 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2700
2701 /*
2702 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2703 * This should be done before SyncCR3.
2704 */
2705 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2706 {
2707 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2708
2709 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2710 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2711#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2712 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2713 {
2714 RTGCPHYS GCPhysOut;
2715 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2716 if (RT_SUCCESS(rc2))
2717 GCPhysCR3 = GCPhysOut;
2718 else
2719 {
2720 /* CR3 SLAT translation failed but we try to pretend it
2721 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2722 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2723 rc2 = pgmGstUnmapCr3(pVCpu);
2724 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2725 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2726 return rc2;
2727 }
2728 }
2729#endif
2730 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2731 if (GCPhysOldCR3 != GCPhysCR3)
2732 {
2733 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2734 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2735 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2736 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2737 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2738 }
2739
2740 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2741 if ( rc == VINF_PGM_SYNC_CR3
2742 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2743 {
2744 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2745#ifdef IN_RING3
2746 rc = pgmPoolSyncCR3(pVCpu);
2747#else
2748 if (rc == VINF_PGM_SYNC_CR3)
2749 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2750 return VINF_PGM_SYNC_CR3;
2751#endif
2752 }
2753 AssertRCReturn(rc, rc);
2754 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2755 }
2756
2757 /*
2758 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2759 */
2760 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2761
2762 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2763 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2764 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2765 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2766
2767 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2768 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2769 if (rc == VINF_SUCCESS)
2770 {
2771 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2772 {
2773 /* Go back to ring 3 if a pgm pool sync is again pending. */
2774 return VINF_PGM_SYNC_CR3;
2775 }
2776
2777 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2778 {
2779 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2780 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2781 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2782 }
2783 }
2784
2785 /*
2786 * Now flush the CR3 (guest context).
2787 */
2788 if (rc == VINF_SUCCESS)
2789 PGM_INVL_VCPU_TLBS(pVCpu);
2790 return rc;
2791}
2792
2793
2794/**
2795 * Maps all the PAE PDPE entries.
2796 *
2797 * @returns VBox status code.
2798 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2799 * @param paPaePdpes The new PAE PDPE values.
2800 *
2801 * @remarks This function may be invoked during the process of changing the guest
2802 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2803 * reflect PAE paging just yet.
2804 */
2805VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2806{
2807 Assert(paPaePdpes);
2808 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2809 {
2810 X86PDPE const PaePdpe = paPaePdpes[i];
2811
2812 /*
2813 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2814 * are deferred.[1] Also, different situations require different handling of invalid
2815 * PDPE entries. Here we assume the caller has already validated or doesn't require
2816 * validation of the PDPEs.
2817 *
2818 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2819 */
2820 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2821 {
2822 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2823 RTHCPTR HCPtr;
2824 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2825
2826 PGM_LOCK_VOID(pVM);
2827 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2828 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2829 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2830 PGM_UNLOCK(pVM);
2831 if (RT_SUCCESS(rc))
2832 {
2833#ifdef IN_RING3
2834 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2835 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2836#else
2837 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2838 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2839#endif
2840 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2841 continue;
2842 }
2843 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2844 }
2845 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2846 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2847 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2848 }
2849
2850 return VINF_SUCCESS;
2851}
2852
2853
2854/**
2855 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2856 *
2857 * @returns VBox status code.
2858 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2859 * @param cr3 The guest CR3 value.
2860 *
2861 * @remarks This function may be invoked during the process of changing the guest
2862 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2863 * PAE paging just yet.
2864 */
2865VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2866{
2867 /*
2868 * Read the page-directory-pointer table (PDPT) at CR3.
2869 */
2870 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2871 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2872
2873#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2874 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2875 {
2876 RTGCPHYS GCPhysOut;
2877 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2878 if (RT_SUCCESS(rc))
2879 GCPhysCR3 = GCPhysOut;
2880 else
2881 {
2882 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2883 return rc;
2884 }
2885 }
2886#endif
2887
2888 RTHCPTR HCPtrGuestCr3;
2889 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
2890 if (RT_SUCCESS(rc))
2891 {
2892 /*
2893 * Validate the page-directory-pointer table entries (PDPE).
2894 */
2895 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2896 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2897 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2898 {
2899 /*
2900 * Map the PDPT.
2901 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2902 * that PGMFlushTLB will be called soon and only a change to CR3 then
2903 * will cause the shadow page tables to be updated.
2904 */
2905#ifdef IN_RING3
2906 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2907 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2908#else
2909 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2910 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2911#endif
2912
2913 /*
2914 * Update CPUM.
2915 * We do this prior to mapping the PDPEs to keep the order consistent
2916 * with what's used in HM. In practice, it doesn't really matter.
2917 */
2918 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2919
2920 /*
2921 * Map the PDPEs.
2922 */
2923 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2924 if (RT_SUCCESS(rc))
2925 {
2926#ifdef IN_RING3
2927 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
2928 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
2929#else
2930 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
2931 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
2932#endif
2933 }
2934 }
2935 else
2936 rc = VERR_PGM_PAE_PDPE_RSVD;
2937 }
2938 return rc;
2939}
2940
2941
2942/**
2943 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2944 *
2945 * @returns VBox status code, with the following informational code for
2946 * VM scheduling.
2947 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2948 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2949 *
2950 * @param pVCpu The cross context virtual CPU structure.
2951 * @param cr0 The new cr0.
2952 * @param cr4 The new cr4.
2953 * @param efer The new extended feature enable register.
2954 * @param fForce Whether to force a mode change.
2955 */
2956VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2957{
2958 VMCPU_ASSERT_EMT(pVCpu);
2959
2960 /*
2961 * Calc the new guest mode.
2962 *
2963 * Note! We check PG before PE and without requiring PE because of the
2964 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2965 */
2966 PGMMODE enmGuestMode;
2967 if (cr0 & X86_CR0_PG)
2968 {
2969 if (!(cr4 & X86_CR4_PAE))
2970 {
2971 bool const fPse = !!(cr4 & X86_CR4_PSE);
2972 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2973 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2974 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2975 enmGuestMode = PGMMODE_32_BIT;
2976 }
2977 else if (!(efer & MSR_K6_EFER_LME))
2978 {
2979 if (!(efer & MSR_K6_EFER_NXE))
2980 enmGuestMode = PGMMODE_PAE;
2981 else
2982 enmGuestMode = PGMMODE_PAE_NX;
2983 }
2984 else
2985 {
2986 if (!(efer & MSR_K6_EFER_NXE))
2987 enmGuestMode = PGMMODE_AMD64;
2988 else
2989 enmGuestMode = PGMMODE_AMD64_NX;
2990 }
2991 }
2992 else if (!(cr0 & X86_CR0_PE))
2993 enmGuestMode = PGMMODE_REAL;
2994 else
2995 enmGuestMode = PGMMODE_PROTECTED;
2996
2997 /*
2998 * Did it change?
2999 */
3000 if ( !fForce
3001 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3002 return VINF_SUCCESS;
3003
3004 /* Flush the TLB */
3005 PGM_INVL_VCPU_TLBS(pVCpu);
3006 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
3007}
3008
3009
3010/**
3011 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3012 *
3013 * @returns PGM_TYPE_*.
3014 * @param pgmMode The mode value to convert.
3015 */
3016DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3017{
3018 switch (pgmMode)
3019 {
3020 case PGMMODE_REAL: return PGM_TYPE_REAL;
3021 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3022 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3023 case PGMMODE_PAE:
3024 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3025 case PGMMODE_AMD64:
3026 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3027 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3028 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3029 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3030 case PGMMODE_EPT: return PGM_TYPE_EPT;
3031 case PGMMODE_NONE: return PGM_TYPE_NONE;
3032 default:
3033 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3034 }
3035}
3036
3037
3038/**
3039 * Calculates the shadow paging mode.
3040 *
3041 * @returns The shadow paging mode.
3042 * @param pVM The cross context VM structure.
3043 * @param enmGuestMode The guest mode.
3044 * @param enmHostMode The host mode.
3045 * @param enmShadowMode The current shadow mode.
3046 */
3047static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3048{
3049 switch (enmGuestMode)
3050 {
3051 /*
3052 * When switching to real or protected mode we don't change
3053 * anything since it's likely that we'll switch back pretty soon.
3054 *
3055 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
3056 * and is supposed to determine which shadow paging and switcher to
3057 * use during init.
3058 */
3059 case PGMMODE_REAL:
3060 case PGMMODE_PROTECTED:
3061 if ( enmShadowMode != PGMMODE_INVALID
3062 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
3063 break; /* (no change) */
3064
3065 switch (enmHostMode)
3066 {
3067 case SUPPAGINGMODE_32_BIT:
3068 case SUPPAGINGMODE_32_BIT_GLOBAL:
3069 enmShadowMode = PGMMODE_32_BIT;
3070 break;
3071
3072 case SUPPAGINGMODE_PAE:
3073 case SUPPAGINGMODE_PAE_NX:
3074 case SUPPAGINGMODE_PAE_GLOBAL:
3075 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3076 enmShadowMode = PGMMODE_PAE;
3077 break;
3078
3079 case SUPPAGINGMODE_AMD64:
3080 case SUPPAGINGMODE_AMD64_GLOBAL:
3081 case SUPPAGINGMODE_AMD64_NX:
3082 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3083 enmShadowMode = PGMMODE_PAE;
3084 break;
3085
3086 default:
3087 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3088 }
3089 break;
3090
3091 case PGMMODE_32_BIT:
3092 switch (enmHostMode)
3093 {
3094 case SUPPAGINGMODE_32_BIT:
3095 case SUPPAGINGMODE_32_BIT_GLOBAL:
3096 enmShadowMode = PGMMODE_32_BIT;
3097 break;
3098
3099 case SUPPAGINGMODE_PAE:
3100 case SUPPAGINGMODE_PAE_NX:
3101 case SUPPAGINGMODE_PAE_GLOBAL:
3102 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3103 enmShadowMode = PGMMODE_PAE;
3104 break;
3105
3106 case SUPPAGINGMODE_AMD64:
3107 case SUPPAGINGMODE_AMD64_GLOBAL:
3108 case SUPPAGINGMODE_AMD64_NX:
3109 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3110 enmShadowMode = PGMMODE_PAE;
3111 break;
3112
3113 default:
3114 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3115 }
3116 break;
3117
3118 case PGMMODE_PAE:
3119 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3120 switch (enmHostMode)
3121 {
3122 case SUPPAGINGMODE_32_BIT:
3123 case SUPPAGINGMODE_32_BIT_GLOBAL:
3124 enmShadowMode = PGMMODE_PAE;
3125 break;
3126
3127 case SUPPAGINGMODE_PAE:
3128 case SUPPAGINGMODE_PAE_NX:
3129 case SUPPAGINGMODE_PAE_GLOBAL:
3130 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3131 enmShadowMode = PGMMODE_PAE;
3132 break;
3133
3134 case SUPPAGINGMODE_AMD64:
3135 case SUPPAGINGMODE_AMD64_GLOBAL:
3136 case SUPPAGINGMODE_AMD64_NX:
3137 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3138 enmShadowMode = PGMMODE_PAE;
3139 break;
3140
3141 default:
3142 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3143 }
3144 break;
3145
3146 case PGMMODE_AMD64:
3147 case PGMMODE_AMD64_NX:
3148 switch (enmHostMode)
3149 {
3150 case SUPPAGINGMODE_32_BIT:
3151 case SUPPAGINGMODE_32_BIT_GLOBAL:
3152 enmShadowMode = PGMMODE_AMD64;
3153 break;
3154
3155 case SUPPAGINGMODE_PAE:
3156 case SUPPAGINGMODE_PAE_NX:
3157 case SUPPAGINGMODE_PAE_GLOBAL:
3158 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3159 enmShadowMode = PGMMODE_AMD64;
3160 break;
3161
3162 case SUPPAGINGMODE_AMD64:
3163 case SUPPAGINGMODE_AMD64_GLOBAL:
3164 case SUPPAGINGMODE_AMD64_NX:
3165 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3166 enmShadowMode = PGMMODE_AMD64;
3167 break;
3168
3169 default:
3170 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3171 }
3172 break;
3173
3174 default:
3175 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3176 }
3177
3178 /*
3179 * Override the shadow mode when NEM or nested paging is active.
3180 */
3181 if (VM_IS_NEM_ENABLED(pVM))
3182 {
3183 pVM->pgm.s.fNestedPaging = true;
3184 enmShadowMode = PGMMODE_NONE;
3185 }
3186 else
3187 {
3188 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3189 pVM->pgm.s.fNestedPaging = fNestedPaging;
3190 if (fNestedPaging)
3191 {
3192 if (HMIsVmxActive(pVM))
3193 enmShadowMode = PGMMODE_EPT;
3194 else
3195 {
3196 /* The nested SVM paging depends on the host one. */
3197 Assert(HMIsSvmActive(pVM));
3198 if ( enmGuestMode == PGMMODE_AMD64
3199 || enmGuestMode == PGMMODE_AMD64_NX)
3200 enmShadowMode = PGMMODE_NESTED_AMD64;
3201 else
3202 switch (pVM->pgm.s.enmHostMode)
3203 {
3204 case SUPPAGINGMODE_32_BIT:
3205 case SUPPAGINGMODE_32_BIT_GLOBAL:
3206 enmShadowMode = PGMMODE_NESTED_32BIT;
3207 break;
3208
3209 case SUPPAGINGMODE_PAE:
3210 case SUPPAGINGMODE_PAE_GLOBAL:
3211 case SUPPAGINGMODE_PAE_NX:
3212 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3213 enmShadowMode = PGMMODE_NESTED_PAE;
3214 break;
3215
3216 case SUPPAGINGMODE_AMD64:
3217 case SUPPAGINGMODE_AMD64_GLOBAL:
3218 case SUPPAGINGMODE_AMD64_NX:
3219 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3220 enmShadowMode = PGMMODE_NESTED_AMD64;
3221 break;
3222
3223 default:
3224 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3225 }
3226 }
3227 }
3228#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3229 else
3230 {
3231 /* Nested paging is a requirement for nested VT-x. */
3232 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3233 }
3234#endif
3235 }
3236
3237 return enmShadowMode;
3238}
3239
3240
3241/**
3242 * Performs the actual mode change.
3243 * This is called by PGMChangeMode and pgmR3InitPaging().
3244 *
3245 * @returns VBox status code. May suspend or power off the VM on error, but this
3246 * will trigger using FFs and not informational status codes.
3247 *
3248 * @param pVM The cross context VM structure.
3249 * @param pVCpu The cross context virtual CPU structure.
3250 * @param enmGuestMode The new guest mode. This is assumed to be different from
3251 * the current mode.
3252 */
3253VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3254{
3255 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3256 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3257
3258 /*
3259 * Calc the shadow mode and switcher.
3260 */
3261 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3262
3263 /*
3264 * Exit old mode(s).
3265 */
3266 /* shadow */
3267 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3268 {
3269 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3270 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3271 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3272 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3273 {
3274 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3275 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3276 }
3277 }
3278 else
3279 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3280
3281 /* guest */
3282 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3283 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3284 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3285 {
3286 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3287 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3288 }
3289 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3290 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3291 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3292
3293 /*
3294 * Change the paging mode data indexes.
3295 */
3296 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3297 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3298 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3299 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3300 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3301 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3302 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3303#ifdef IN_RING3
3304 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3305#endif
3306
3307 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3308 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3309 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3310 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3311 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3312 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3313 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3314#ifdef IN_RING3
3315 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3316#endif
3317
3318 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3319 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3320 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3321 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3322 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3323 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3324 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3325 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3326 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3327 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3328#ifdef VBOX_STRICT
3329 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3330#endif
3331
3332 /*
3333 * Enter new shadow mode (if changed).
3334 */
3335 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3336 {
3337 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3338 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3339 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3340 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3341 }
3342
3343 /*
3344 * Always flag the necessary updates
3345 */
3346 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3347
3348 /*
3349 * Enter the new guest and shadow+guest modes.
3350 */
3351 /* Calc the new CR3 value. */
3352 RTGCPHYS GCPhysCR3;
3353 switch (enmGuestMode)
3354 {
3355 case PGMMODE_REAL:
3356 case PGMMODE_PROTECTED:
3357 GCPhysCR3 = NIL_RTGCPHYS;
3358 break;
3359
3360 case PGMMODE_32_BIT:
3361 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3362 break;
3363
3364 case PGMMODE_PAE_NX:
3365 case PGMMODE_PAE:
3366 if (!pVM->cpum.ro.GuestFeatures.fPae)
3367#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3368 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3369 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3370#else
3371 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3372
3373#endif
3374 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3375 break;
3376
3377#ifdef VBOX_WITH_64_BITS_GUESTS
3378 case PGMMODE_AMD64_NX:
3379 case PGMMODE_AMD64:
3380 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3381 break;
3382#endif
3383#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3384 case PGMMODE_EPT:
3385 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_EPT_PAGE_MASK;
3386 break;
3387#endif
3388 default:
3389 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3390 }
3391
3392#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3393 /*
3394 * If a nested-guest is using EPT paging:
3395 * - Update the second-level address translation (SLAT) mode.
3396 * - Indicate that the CR3 is nested-guest physical address.
3397 */
3398 if ( CPUMIsGuestVmxEptPagingEnabled(pVCpu)
3399 && PGMMODE_WITH_PAGING(enmGuestMode))
3400 {
3401 /*
3402 * Translate CR3 to its guest-physical address.
3403 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3404 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3405 */
3406 PGMPTWALK Walk;
3407 PGMPTWALKGST GstWalk;
3408 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3409 if (RT_SUCCESS(rc))
3410 { /* likely */ }
3411 else
3412 {
3413 /*
3414 * SLAT failed but we avoid reporting this to the caller because the caller
3415 * is not supposed to fail. The only time the caller needs to indicate a
3416 * failure to software is when PAE paging is used by the nested-guest, but
3417 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3418 * In all other cases, the failure will be indicated when CR3 tries to be
3419 * translated on the next linear-address memory access.
3420 * See Intel spec. 27.2.1 "EPT Overview".
3421 */
3422 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3423
3424 /* Trying to coax PGM to succeed for the time being... */
3425 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3426 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3427 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3428 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3429 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3430 return VINF_SUCCESS;
3431 }
3432
3433 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3434 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3435 GCPhysCR3 = Walk.GCPhys;
3436 }
3437 else
3438 {
3439 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3440 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3441 }
3442#endif
3443
3444 /*
3445 * Enter the new guest mode.
3446 */
3447 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3448 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3449 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3450
3451 /* Set the new guest CR3 (and nested-guest CR3). */
3452 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3453
3454 /* status codes. */
3455 AssertRC(rc);
3456 AssertRC(rc2);
3457 if (RT_SUCCESS(rc))
3458 {
3459 rc = rc2;
3460 if (RT_SUCCESS(rc)) /* no informational status codes. */
3461 rc = VINF_SUCCESS;
3462 }
3463
3464 /*
3465 * Notify HM.
3466 */
3467 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3468 return rc;
3469}
3470
3471
3472/**
3473 * Called by CPUM or REM when CR0.WP changes to 1.
3474 *
3475 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3476 * @thread EMT
3477 */
3478VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3479{
3480 /*
3481 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3482 *
3483 * Use the counter to judge whether there might be pool pages with active
3484 * hacks in them. If there are, we will be running the risk of messing up
3485 * the guest by allowing it to write to read-only pages. Thus, we have to
3486 * clear the page pool ASAP if there is the slightest chance.
3487 */
3488 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3489 {
3490 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3491
3492 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3493 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3494 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3495 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3496 }
3497}
3498
3499
3500/**
3501 * Gets the current guest paging mode.
3502 *
3503 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3504 *
3505 * @returns The current paging mode.
3506 * @param pVCpu The cross context virtual CPU structure.
3507 */
3508VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3509{
3510 return pVCpu->pgm.s.enmGuestMode;
3511}
3512
3513
3514/**
3515 * Gets the current shadow paging mode.
3516 *
3517 * @returns The current paging mode.
3518 * @param pVCpu The cross context virtual CPU structure.
3519 */
3520VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3521{
3522 return pVCpu->pgm.s.enmShadowMode;
3523}
3524
3525
3526/**
3527 * Gets the current host paging mode.
3528 *
3529 * @returns The current paging mode.
3530 * @param pVM The cross context VM structure.
3531 */
3532VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3533{
3534 switch (pVM->pgm.s.enmHostMode)
3535 {
3536 case SUPPAGINGMODE_32_BIT:
3537 case SUPPAGINGMODE_32_BIT_GLOBAL:
3538 return PGMMODE_32_BIT;
3539
3540 case SUPPAGINGMODE_PAE:
3541 case SUPPAGINGMODE_PAE_GLOBAL:
3542 return PGMMODE_PAE;
3543
3544 case SUPPAGINGMODE_PAE_NX:
3545 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3546 return PGMMODE_PAE_NX;
3547
3548 case SUPPAGINGMODE_AMD64:
3549 case SUPPAGINGMODE_AMD64_GLOBAL:
3550 return PGMMODE_AMD64;
3551
3552 case SUPPAGINGMODE_AMD64_NX:
3553 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3554 return PGMMODE_AMD64_NX;
3555
3556 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3557 }
3558
3559 return PGMMODE_INVALID;
3560}
3561
3562
3563/**
3564 * Get mode name.
3565 *
3566 * @returns read-only name string.
3567 * @param enmMode The mode which name is desired.
3568 */
3569VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3570{
3571 switch (enmMode)
3572 {
3573 case PGMMODE_REAL: return "Real";
3574 case PGMMODE_PROTECTED: return "Protected";
3575 case PGMMODE_32_BIT: return "32-bit";
3576 case PGMMODE_PAE: return "PAE";
3577 case PGMMODE_PAE_NX: return "PAE+NX";
3578 case PGMMODE_AMD64: return "AMD64";
3579 case PGMMODE_AMD64_NX: return "AMD64+NX";
3580 case PGMMODE_NESTED_32BIT: return "Nested-32";
3581 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3582 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3583 case PGMMODE_EPT: return "EPT";
3584 case PGMMODE_NONE: return "None";
3585 default: return "unknown mode value";
3586 }
3587}
3588
3589
3590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3591/**
3592 * Gets the SLAT mode name.
3593 *
3594 * @returns The read-only SLAT mode descriptive string.
3595 * @param enmSlatMode The SLAT mode value.
3596 */
3597VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3598{
3599 switch (enmSlatMode)
3600 {
3601 case PGMSLAT_DIRECT: return "Direct";
3602 case PGMSLAT_EPT: return "EPT";
3603 case PGMSLAT_32BIT: return "32-bit";
3604 case PGMSLAT_PAE: return "PAE";
3605 case PGMSLAT_AMD64: return "AMD64";
3606 default: return "Unknown";
3607 }
3608}
3609#endif
3610
3611
3612/**
3613 * Gets the physical address represented in the guest CR3 as PGM sees it.
3614 *
3615 * This is mainly for logging and debugging.
3616 *
3617 * @returns PGM's guest CR3 value.
3618 * @param pVCpu The cross context virtual CPU structure.
3619 */
3620VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3621{
3622 return pVCpu->pgm.s.GCPhysCR3;
3623}
3624
3625
3626
3627/**
3628 * Notification from CPUM that the EFER.NXE bit has changed.
3629 *
3630 * @param pVCpu The cross context virtual CPU structure of the CPU for
3631 * which EFER changed.
3632 * @param fNxe The new NXE state.
3633 */
3634VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3635{
3636/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3637 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3638
3639 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3640 if (fNxe)
3641 {
3642 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3643 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3644 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3645 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3646 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3647 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3648 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3649 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3650 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3651 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3652 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3653
3654 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3655 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3656 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3657 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3658 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3659 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3660 }
3661 else
3662 {
3663 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3664 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3665 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3666 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3667 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3668 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3669 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3670 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3671 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3672 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3673 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3674
3675 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3676 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3677 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3678 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3679 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3680 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3681 }
3682}
3683
3684
3685/**
3686 * Check if any pgm pool pages are marked dirty (not monitored)
3687 *
3688 * @returns bool locked/not locked
3689 * @param pVM The cross context VM structure.
3690 */
3691VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3692{
3693 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3694}
3695
3696
3697/**
3698 * Check if this VCPU currently owns the PGM lock.
3699 *
3700 * @returns bool owner/not owner
3701 * @param pVM The cross context VM structure.
3702 */
3703VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3704{
3705 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3706}
3707
3708
3709/**
3710 * Enable or disable large page usage
3711 *
3712 * @returns VBox status code.
3713 * @param pVM The cross context VM structure.
3714 * @param fUseLargePages Use/not use large pages
3715 */
3716VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3717{
3718 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3719
3720 pVM->pgm.s.fUseLargePages = fUseLargePages;
3721 return VINF_SUCCESS;
3722}
3723
3724
3725/**
3726 * Acquire the PGM lock.
3727 *
3728 * @returns VBox status code
3729 * @param pVM The cross context VM structure.
3730 * @param fVoid Set if the caller cannot handle failure returns.
3731 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3732 */
3733#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3734int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3735#else
3736int pgmLock(PVMCC pVM, bool fVoid)
3737#endif
3738{
3739#if defined(VBOX_STRICT)
3740 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3741#else
3742 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3743#endif
3744 if (RT_SUCCESS(rc))
3745 return rc;
3746 if (fVoid)
3747 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3748 else
3749 AssertRC(rc);
3750 return rc;
3751}
3752
3753
3754/**
3755 * Release the PGM lock.
3756 *
3757 * @returns VBox status code
3758 * @param pVM The cross context VM structure.
3759 */
3760void pgmUnlock(PVMCC pVM)
3761{
3762 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3763 pVM->pgm.s.cDeprecatedPageLocks = 0;
3764 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3765 if (rc == VINF_SEM_NESTED)
3766 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3767}
3768
3769
3770#if !defined(IN_R0) || defined(LOG_ENABLED)
3771
3772/** Format handler for PGMPAGE.
3773 * @copydoc FNRTSTRFORMATTYPE */
3774static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3775 const char *pszType, void const *pvValue,
3776 int cchWidth, int cchPrecision, unsigned fFlags,
3777 void *pvUser)
3778{
3779 size_t cch;
3780 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3781 if (RT_VALID_PTR(pPage))
3782 {
3783 char szTmp[64+80];
3784
3785 cch = 0;
3786
3787 /* The single char state stuff. */
3788 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3789 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3790
3791# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3792 if (IS_PART_INCLUDED(5))
3793 {
3794 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3795 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3796 }
3797
3798 /* The type. */
3799 if (IS_PART_INCLUDED(4))
3800 {
3801 szTmp[cch++] = ':';
3802 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3803 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3804 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3805 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3806 }
3807
3808 /* The numbers. */
3809 if (IS_PART_INCLUDED(3))
3810 {
3811 szTmp[cch++] = ':';
3812 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3813 }
3814
3815 if (IS_PART_INCLUDED(2))
3816 {
3817 szTmp[cch++] = ':';
3818 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3819 }
3820
3821 if (IS_PART_INCLUDED(6))
3822 {
3823 szTmp[cch++] = ':';
3824 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3825 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3826 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3827 }
3828# undef IS_PART_INCLUDED
3829
3830 cch = pfnOutput(pvArgOutput, szTmp, cch);
3831 }
3832 else
3833 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3834 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3835 return cch;
3836}
3837
3838
3839/** Format handler for PGMRAMRANGE.
3840 * @copydoc FNRTSTRFORMATTYPE */
3841static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3842 const char *pszType, void const *pvValue,
3843 int cchWidth, int cchPrecision, unsigned fFlags,
3844 void *pvUser)
3845{
3846 size_t cch;
3847 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3848 if (RT_VALID_PTR(pRam))
3849 {
3850 char szTmp[80];
3851 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3852 cch = pfnOutput(pvArgOutput, szTmp, cch);
3853 }
3854 else
3855 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3856 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3857 return cch;
3858}
3859
3860/** Format type andlers to be registered/deregistered. */
3861static const struct
3862{
3863 char szType[24];
3864 PFNRTSTRFORMATTYPE pfnHandler;
3865} g_aPgmFormatTypes[] =
3866{
3867 { "pgmpage", pgmFormatTypeHandlerPage },
3868 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3869};
3870
3871#endif /* !IN_R0 || LOG_ENABLED */
3872
3873/**
3874 * Registers the global string format types.
3875 *
3876 * This should be called at module load time or in some other manner that ensure
3877 * that it's called exactly one time.
3878 *
3879 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3880 */
3881VMMDECL(int) PGMRegisterStringFormatTypes(void)
3882{
3883#if !defined(IN_R0) || defined(LOG_ENABLED)
3884 int rc = VINF_SUCCESS;
3885 unsigned i;
3886 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3887 {
3888 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3889# ifdef IN_RING0
3890 if (rc == VERR_ALREADY_EXISTS)
3891 {
3892 /* in case of cleanup failure in ring-0 */
3893 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3894 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3895 }
3896# endif
3897 }
3898 if (RT_FAILURE(rc))
3899 while (i-- > 0)
3900 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3901
3902 return rc;
3903#else
3904 return VINF_SUCCESS;
3905#endif
3906}
3907
3908
3909/**
3910 * Deregisters the global string format types.
3911 *
3912 * This should be called at module unload time or in some other manner that
3913 * ensure that it's called exactly one time.
3914 */
3915VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3916{
3917#if !defined(IN_R0) || defined(LOG_ENABLED)
3918 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3919 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3920#endif
3921}
3922
3923
3924#ifdef VBOX_STRICT
3925/**
3926 * Asserts that everything related to the guest CR3 is correctly shadowed.
3927 *
3928 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3929 * and assert the correctness of the guest CR3 mapping before asserting that the
3930 * shadow page tables is in sync with the guest page tables.
3931 *
3932 * @returns Number of conflicts.
3933 * @param pVM The cross context VM structure.
3934 * @param pVCpu The cross context virtual CPU structure.
3935 * @param cr3 The current guest CR3 register value.
3936 * @param cr4 The current guest CR4 register value.
3937 */
3938VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3939{
3940 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3941
3942 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3943 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3944 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3945
3946 PGM_LOCK_VOID(pVM);
3947 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3948 PGM_UNLOCK(pVM);
3949
3950 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3951 return cErrors;
3952}
3953#endif /* VBOX_STRICT */
3954
3955
3956/**
3957 * Updates PGM's copy of the guest's EPT pointer.
3958 *
3959 * @param pVCpu The cross context virtual CPU structure.
3960 * @param uEptPtr The EPT pointer.
3961 *
3962 * @remarks This can be called as part of VM-entry so we might be in the midst of
3963 * switching to VMX non-root mode.
3964 */
3965VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3966{
3967 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3968 PGM_LOCK_VOID(pVM);
3969 pVCpu->pgm.s.uEptPtr = uEptPtr;
3970 PGM_UNLOCK(pVM);
3971}
3972
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette