VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 93901

Last change on this file since 93901 was 93901, checked in by vboxsync, 3 years ago

VMM,Main,++: Removed VM_IS_RAW_MODE_ENABLED/VM_EXEC_ENGINE_RAW_MODE and added VM_IS_EXEC_ENGINE_IEM/VM_EXEC_ENGINE_IEM instead. In IMachineDebugger::getExecutionEngine VMExecutionEngine_RawMode was removed and VMExecutionEngine_Emulated added. Removed dead code and updated frontends accordingly. On darwin.arm64 HM now falls back on IEM execution since neither HM or NEM is availble there. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 141.5 KB
Line 
1/* $Id: PGMAll.cpp 93901 2022-02-23 15:35:26Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
41# include <iprt/asm-amd64-x86.h>
42#endif
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*********************************************************************************************************************************
50* Internal Functions *
51*********************************************************************************************************************************/
52DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
53DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
54DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
55#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
56static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
57 PPGMPTWALKGST pGstWalk);
58static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk);
59static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
60#endif
61static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
62static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
63
64
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66/* Guest - EPT SLAT is identical for all guest paging mode. */
67# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
68# define PGM_GST_TYPE PGM_TYPE_EPT
69# include "PGMGstDefs.h"
70# include "PGMAllGstSlatEpt.cpp.h"
71# undef PGM_GST_TYPE
72#endif
73
74
75/*
76 * Shadow - 32-bit mode
77 */
78#define PGM_SHW_TYPE PGM_TYPE_32BIT
79#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
80#include "PGMAllShw.h"
81
82/* Guest - real mode */
83#define PGM_GST_TYPE PGM_TYPE_REAL
84#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
85#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
86#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
87#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
88#include "PGMGstDefs.h"
89#include "PGMAllGst.h"
90#include "PGMAllBth.h"
91#undef BTH_PGMPOOLKIND_PT_FOR_PT
92#undef BTH_PGMPOOLKIND_ROOT
93#undef PGM_BTH_NAME
94#undef PGM_GST_TYPE
95#undef PGM_GST_NAME
96
97/* Guest - protected mode */
98#define PGM_GST_TYPE PGM_TYPE_PROT
99#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
100#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
101#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
102#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
103#include "PGMGstDefs.h"
104#include "PGMAllGst.h"
105#include "PGMAllBth.h"
106#undef BTH_PGMPOOLKIND_PT_FOR_PT
107#undef BTH_PGMPOOLKIND_ROOT
108#undef PGM_BTH_NAME
109#undef PGM_GST_TYPE
110#undef PGM_GST_NAME
111
112/* Guest - 32-bit mode */
113#define PGM_GST_TYPE PGM_TYPE_32BIT
114#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
115#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
116#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
117#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
118#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
119#include "PGMGstDefs.h"
120#include "PGMAllGst.h"
121#include "PGMAllBth.h"
122#undef BTH_PGMPOOLKIND_PT_FOR_BIG
123#undef BTH_PGMPOOLKIND_PT_FOR_PT
124#undef BTH_PGMPOOLKIND_ROOT
125#undef PGM_BTH_NAME
126#undef PGM_GST_TYPE
127#undef PGM_GST_NAME
128
129#undef PGM_SHW_TYPE
130#undef PGM_SHW_NAME
131
132
133/*
134 * Shadow - PAE mode
135 */
136#define PGM_SHW_TYPE PGM_TYPE_PAE
137#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#include "PGMAllShw.h"
140
141/* Guest - real mode */
142#define PGM_GST_TYPE PGM_TYPE_REAL
143#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
147#include "PGMGstDefs.h"
148#include "PGMAllBth.h"
149#undef BTH_PGMPOOLKIND_PT_FOR_PT
150#undef BTH_PGMPOOLKIND_ROOT
151#undef PGM_BTH_NAME
152#undef PGM_GST_TYPE
153#undef PGM_GST_NAME
154
155/* Guest - protected mode */
156#define PGM_GST_TYPE PGM_TYPE_PROT
157#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - 32-bit mode */
170#define PGM_GST_TYPE PGM_TYPE_32BIT
171#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
174#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
176#include "PGMGstDefs.h"
177#include "PGMAllBth.h"
178#undef BTH_PGMPOOLKIND_PT_FOR_BIG
179#undef BTH_PGMPOOLKIND_PT_FOR_PT
180#undef BTH_PGMPOOLKIND_ROOT
181#undef PGM_BTH_NAME
182#undef PGM_GST_TYPE
183#undef PGM_GST_NAME
184
185
186/* Guest - PAE mode */
187#define PGM_GST_TYPE PGM_TYPE_PAE
188#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
189#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
190#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
191#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
192#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
193#include "PGMGstDefs.h"
194#include "PGMAllGst.h"
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_BIG
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef BTH_PGMPOOLKIND_ROOT
199#undef PGM_BTH_NAME
200#undef PGM_GST_TYPE
201#undef PGM_GST_NAME
202
203#undef PGM_SHW_TYPE
204#undef PGM_SHW_NAME
205
206
207/*
208 * Shadow - AMD64 mode
209 */
210#define PGM_SHW_TYPE PGM_TYPE_AMD64
211#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
212#include "PGMAllShw.h"
213
214/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
215/** @todo retire this hack. */
216#define PGM_GST_TYPE PGM_TYPE_PROT
217#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
218#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
219#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
220#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
221#include "PGMGstDefs.h"
222#include "PGMAllBth.h"
223#undef BTH_PGMPOOLKIND_PT_FOR_PT
224#undef BTH_PGMPOOLKIND_ROOT
225#undef PGM_BTH_NAME
226#undef PGM_GST_TYPE
227#undef PGM_GST_NAME
228
229#ifdef VBOX_WITH_64_BITS_GUESTS
230/* Guest - AMD64 mode */
231# define PGM_GST_TYPE PGM_TYPE_AMD64
232# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
233# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
234# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
235# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
236# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
237# include "PGMGstDefs.h"
238# include "PGMAllGst.h"
239# include "PGMAllBth.h"
240# undef BTH_PGMPOOLKIND_PT_FOR_BIG
241# undef BTH_PGMPOOLKIND_PT_FOR_PT
242# undef BTH_PGMPOOLKIND_ROOT
243# undef PGM_BTH_NAME
244# undef PGM_GST_TYPE
245# undef PGM_GST_NAME
246#endif /* VBOX_WITH_64_BITS_GUESTS */
247
248#undef PGM_SHW_TYPE
249#undef PGM_SHW_NAME
250
251
252/*
253 * Shadow - 32-bit nested paging mode.
254 */
255#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
256#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
257#include "PGMAllShw.h"
258
259/* Guest - real mode */
260#define PGM_GST_TYPE PGM_TYPE_REAL
261#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
262#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
263#include "PGMGstDefs.h"
264#include "PGMAllBth.h"
265#undef PGM_BTH_NAME
266#undef PGM_GST_TYPE
267#undef PGM_GST_NAME
268
269/* Guest - protected mode */
270#define PGM_GST_TYPE PGM_TYPE_PROT
271#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
272#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
273#include "PGMGstDefs.h"
274#include "PGMAllBth.h"
275#undef PGM_BTH_NAME
276#undef PGM_GST_TYPE
277#undef PGM_GST_NAME
278
279/* Guest - 32-bit mode */
280#define PGM_GST_TYPE PGM_TYPE_32BIT
281#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
282#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
283#include "PGMGstDefs.h"
284#include "PGMAllBth.h"
285#undef PGM_BTH_NAME
286#undef PGM_GST_TYPE
287#undef PGM_GST_NAME
288
289/* Guest - PAE mode */
290#define PGM_GST_TYPE PGM_TYPE_PAE
291#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
292#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
293#include "PGMGstDefs.h"
294#include "PGMAllBth.h"
295#undef PGM_BTH_NAME
296#undef PGM_GST_TYPE
297#undef PGM_GST_NAME
298
299#ifdef VBOX_WITH_64_BITS_GUESTS
300/* Guest - AMD64 mode */
301# define PGM_GST_TYPE PGM_TYPE_AMD64
302# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
303# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
304# include "PGMGstDefs.h"
305# include "PGMAllBth.h"
306# undef PGM_BTH_NAME
307# undef PGM_GST_TYPE
308# undef PGM_GST_NAME
309#endif /* VBOX_WITH_64_BITS_GUESTS */
310
311#undef PGM_SHW_TYPE
312#undef PGM_SHW_NAME
313
314
315/*
316 * Shadow - PAE nested paging mode.
317 */
318#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
319#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
320#include "PGMAllShw.h"
321
322/* Guest - real mode */
323#define PGM_GST_TYPE PGM_TYPE_REAL
324#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
325#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
326#include "PGMGstDefs.h"
327#include "PGMAllBth.h"
328#undef PGM_BTH_NAME
329#undef PGM_GST_TYPE
330#undef PGM_GST_NAME
331
332/* Guest - protected mode */
333#define PGM_GST_TYPE PGM_TYPE_PROT
334#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
335#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
336#include "PGMGstDefs.h"
337#include "PGMAllBth.h"
338#undef PGM_BTH_NAME
339#undef PGM_GST_TYPE
340#undef PGM_GST_NAME
341
342/* Guest - 32-bit mode */
343#define PGM_GST_TYPE PGM_TYPE_32BIT
344#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
345#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
346#include "PGMGstDefs.h"
347#include "PGMAllBth.h"
348#undef PGM_BTH_NAME
349#undef PGM_GST_TYPE
350#undef PGM_GST_NAME
351
352/* Guest - PAE mode */
353#define PGM_GST_TYPE PGM_TYPE_PAE
354#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
355#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
356#include "PGMGstDefs.h"
357#include "PGMAllBth.h"
358#undef PGM_BTH_NAME
359#undef PGM_GST_TYPE
360#undef PGM_GST_NAME
361
362#ifdef VBOX_WITH_64_BITS_GUESTS
363/* Guest - AMD64 mode */
364# define PGM_GST_TYPE PGM_TYPE_AMD64
365# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
366# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
367# include "PGMGstDefs.h"
368# include "PGMAllBth.h"
369# undef PGM_BTH_NAME
370# undef PGM_GST_TYPE
371# undef PGM_GST_NAME
372#endif /* VBOX_WITH_64_BITS_GUESTS */
373
374#undef PGM_SHW_TYPE
375#undef PGM_SHW_NAME
376
377
378/*
379 * Shadow - AMD64 nested paging mode.
380 */
381#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
382#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
383#include "PGMAllShw.h"
384
385/* Guest - real mode */
386#define PGM_GST_TYPE PGM_TYPE_REAL
387#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
388#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
389#include "PGMGstDefs.h"
390#include "PGMAllBth.h"
391#undef PGM_BTH_NAME
392#undef PGM_GST_TYPE
393#undef PGM_GST_NAME
394
395/* Guest - protected mode */
396#define PGM_GST_TYPE PGM_TYPE_PROT
397#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
398#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
399#include "PGMGstDefs.h"
400#include "PGMAllBth.h"
401#undef PGM_BTH_NAME
402#undef PGM_GST_TYPE
403#undef PGM_GST_NAME
404
405/* Guest - 32-bit mode */
406#define PGM_GST_TYPE PGM_TYPE_32BIT
407#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
408#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
409#include "PGMGstDefs.h"
410#include "PGMAllBth.h"
411#undef PGM_BTH_NAME
412#undef PGM_GST_TYPE
413#undef PGM_GST_NAME
414
415/* Guest - PAE mode */
416#define PGM_GST_TYPE PGM_TYPE_PAE
417#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
418#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
419#include "PGMGstDefs.h"
420#include "PGMAllBth.h"
421#undef PGM_BTH_NAME
422#undef PGM_GST_TYPE
423#undef PGM_GST_NAME
424
425#ifdef VBOX_WITH_64_BITS_GUESTS
426/* Guest - AMD64 mode */
427# define PGM_GST_TYPE PGM_TYPE_AMD64
428# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
429# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
430# include "PGMGstDefs.h"
431# include "PGMAllBth.h"
432# undef PGM_BTH_NAME
433# undef PGM_GST_TYPE
434# undef PGM_GST_NAME
435#endif /* VBOX_WITH_64_BITS_GUESTS */
436
437#undef PGM_SHW_TYPE
438#undef PGM_SHW_NAME
439
440
441/*
442 * Shadow - EPT.
443 */
444#define PGM_SHW_TYPE PGM_TYPE_EPT
445#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
446#include "PGMAllShw.h"
447
448/* Guest - real mode */
449#define PGM_GST_TYPE PGM_TYPE_REAL
450#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
451#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
452#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
453#include "PGMGstDefs.h"
454#include "PGMAllBth.h"
455#undef BTH_PGMPOOLKIND_PT_FOR_PT
456#undef PGM_BTH_NAME
457#undef PGM_GST_TYPE
458#undef PGM_GST_NAME
459
460/* Guest - protected mode */
461#define PGM_GST_TYPE PGM_TYPE_PROT
462#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
463#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
464#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
465#include "PGMGstDefs.h"
466#include "PGMAllBth.h"
467#undef BTH_PGMPOOLKIND_PT_FOR_PT
468#undef PGM_BTH_NAME
469#undef PGM_GST_TYPE
470#undef PGM_GST_NAME
471
472/* Guest - 32-bit mode */
473#define PGM_GST_TYPE PGM_TYPE_32BIT
474#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
475#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
476#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
477#include "PGMGstDefs.h"
478#include "PGMAllBth.h"
479#undef BTH_PGMPOOLKIND_PT_FOR_PT
480#undef PGM_BTH_NAME
481#undef PGM_GST_TYPE
482#undef PGM_GST_NAME
483
484/* Guest - PAE mode */
485#define PGM_GST_TYPE PGM_TYPE_PAE
486#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
487#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
488#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
489#include "PGMGstDefs.h"
490#include "PGMAllBth.h"
491#undef BTH_PGMPOOLKIND_PT_FOR_PT
492#undef PGM_BTH_NAME
493#undef PGM_GST_TYPE
494#undef PGM_GST_NAME
495
496#ifdef VBOX_WITH_64_BITS_GUESTS
497/* Guest - AMD64 mode */
498# define PGM_GST_TYPE PGM_TYPE_AMD64
499# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
500# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
501# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
502# include "PGMGstDefs.h"
503# include "PGMAllBth.h"
504# undef BTH_PGMPOOLKIND_PT_FOR_PT
505# undef PGM_BTH_NAME
506# undef PGM_GST_TYPE
507# undef PGM_GST_NAME
508#endif /* VBOX_WITH_64_BITS_GUESTS */
509
510#undef PGM_SHW_TYPE
511#undef PGM_SHW_NAME
512
513
514/*
515 * Shadow - NEM / None.
516 */
517#define PGM_SHW_TYPE PGM_TYPE_NONE
518#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
519#include "PGMAllShw.h"
520
521/* Guest - real mode */
522#define PGM_GST_TYPE PGM_TYPE_REAL
523#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
524#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
525#include "PGMGstDefs.h"
526#include "PGMAllBth.h"
527#undef PGM_BTH_NAME
528#undef PGM_GST_TYPE
529#undef PGM_GST_NAME
530
531/* Guest - protected mode */
532#define PGM_GST_TYPE PGM_TYPE_PROT
533#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
534#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
535#include "PGMGstDefs.h"
536#include "PGMAllBth.h"
537#undef PGM_BTH_NAME
538#undef PGM_GST_TYPE
539#undef PGM_GST_NAME
540
541/* Guest - 32-bit mode */
542#define PGM_GST_TYPE PGM_TYPE_32BIT
543#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
544#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
545#include "PGMGstDefs.h"
546#include "PGMAllBth.h"
547#undef PGM_BTH_NAME
548#undef PGM_GST_TYPE
549#undef PGM_GST_NAME
550
551/* Guest - PAE mode */
552#define PGM_GST_TYPE PGM_TYPE_PAE
553#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
554#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
555#include "PGMGstDefs.h"
556#include "PGMAllBth.h"
557#undef PGM_BTH_NAME
558#undef PGM_GST_TYPE
559#undef PGM_GST_NAME
560
561#ifdef VBOX_WITH_64_BITS_GUESTS
562/* Guest - AMD64 mode */
563# define PGM_GST_TYPE PGM_TYPE_AMD64
564# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
565# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
566# include "PGMGstDefs.h"
567# include "PGMAllBth.h"
568# undef PGM_BTH_NAME
569# undef PGM_GST_TYPE
570# undef PGM_GST_NAME
571#endif /* VBOX_WITH_64_BITS_GUESTS */
572
573#undef PGM_SHW_TYPE
574#undef PGM_SHW_NAME
575
576
577
578/**
579 * Guest mode data array.
580 */
581PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
582{
583 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
584 {
585 PGM_TYPE_REAL,
586 PGM_GST_NAME_REAL(GetPage),
587 PGM_GST_NAME_REAL(ModifyPage),
588 PGM_GST_NAME_REAL(Enter),
589 PGM_GST_NAME_REAL(Exit),
590#ifdef IN_RING3
591 PGM_GST_NAME_REAL(Relocate),
592#endif
593 },
594 {
595 PGM_TYPE_PROT,
596 PGM_GST_NAME_PROT(GetPage),
597 PGM_GST_NAME_PROT(ModifyPage),
598 PGM_GST_NAME_PROT(Enter),
599 PGM_GST_NAME_PROT(Exit),
600#ifdef IN_RING3
601 PGM_GST_NAME_PROT(Relocate),
602#endif
603 },
604 {
605 PGM_TYPE_32BIT,
606 PGM_GST_NAME_32BIT(GetPage),
607 PGM_GST_NAME_32BIT(ModifyPage),
608 PGM_GST_NAME_32BIT(Enter),
609 PGM_GST_NAME_32BIT(Exit),
610#ifdef IN_RING3
611 PGM_GST_NAME_32BIT(Relocate),
612#endif
613 },
614 {
615 PGM_TYPE_PAE,
616 PGM_GST_NAME_PAE(GetPage),
617 PGM_GST_NAME_PAE(ModifyPage),
618 PGM_GST_NAME_PAE(Enter),
619 PGM_GST_NAME_PAE(Exit),
620#ifdef IN_RING3
621 PGM_GST_NAME_PAE(Relocate),
622#endif
623 },
624#ifdef VBOX_WITH_64_BITS_GUESTS
625 {
626 PGM_TYPE_AMD64,
627 PGM_GST_NAME_AMD64(GetPage),
628 PGM_GST_NAME_AMD64(ModifyPage),
629 PGM_GST_NAME_AMD64(Enter),
630 PGM_GST_NAME_AMD64(Exit),
631# ifdef IN_RING3
632 PGM_GST_NAME_AMD64(Relocate),
633# endif
634 },
635#endif
636};
637
638
639/**
640 * The shadow mode data array.
641 */
642PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
643{
644 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
645 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
646 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
647 {
648 PGM_TYPE_32BIT,
649 PGM_SHW_NAME_32BIT(GetPage),
650 PGM_SHW_NAME_32BIT(ModifyPage),
651 PGM_SHW_NAME_32BIT(Enter),
652 PGM_SHW_NAME_32BIT(Exit),
653#ifdef IN_RING3
654 PGM_SHW_NAME_32BIT(Relocate),
655#endif
656 },
657 {
658 PGM_TYPE_PAE,
659 PGM_SHW_NAME_PAE(GetPage),
660 PGM_SHW_NAME_PAE(ModifyPage),
661 PGM_SHW_NAME_PAE(Enter),
662 PGM_SHW_NAME_PAE(Exit),
663#ifdef IN_RING3
664 PGM_SHW_NAME_PAE(Relocate),
665#endif
666 },
667 {
668 PGM_TYPE_AMD64,
669 PGM_SHW_NAME_AMD64(GetPage),
670 PGM_SHW_NAME_AMD64(ModifyPage),
671 PGM_SHW_NAME_AMD64(Enter),
672 PGM_SHW_NAME_AMD64(Exit),
673#ifdef IN_RING3
674 PGM_SHW_NAME_AMD64(Relocate),
675#endif
676 },
677 {
678 PGM_TYPE_NESTED_32BIT,
679 PGM_SHW_NAME_NESTED_32BIT(GetPage),
680 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
681 PGM_SHW_NAME_NESTED_32BIT(Enter),
682 PGM_SHW_NAME_NESTED_32BIT(Exit),
683#ifdef IN_RING3
684 PGM_SHW_NAME_NESTED_32BIT(Relocate),
685#endif
686 },
687 {
688 PGM_TYPE_NESTED_PAE,
689 PGM_SHW_NAME_NESTED_PAE(GetPage),
690 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
691 PGM_SHW_NAME_NESTED_PAE(Enter),
692 PGM_SHW_NAME_NESTED_PAE(Exit),
693#ifdef IN_RING3
694 PGM_SHW_NAME_NESTED_PAE(Relocate),
695#endif
696 },
697 {
698 PGM_TYPE_NESTED_AMD64,
699 PGM_SHW_NAME_NESTED_AMD64(GetPage),
700 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
701 PGM_SHW_NAME_NESTED_AMD64(Enter),
702 PGM_SHW_NAME_NESTED_AMD64(Exit),
703#ifdef IN_RING3
704 PGM_SHW_NAME_NESTED_AMD64(Relocate),
705#endif
706 },
707 {
708 PGM_TYPE_EPT,
709 PGM_SHW_NAME_EPT(GetPage),
710 PGM_SHW_NAME_EPT(ModifyPage),
711 PGM_SHW_NAME_EPT(Enter),
712 PGM_SHW_NAME_EPT(Exit),
713#ifdef IN_RING3
714 PGM_SHW_NAME_EPT(Relocate),
715#endif
716 },
717 {
718 PGM_TYPE_NONE,
719 PGM_SHW_NAME_NONE(GetPage),
720 PGM_SHW_NAME_NONE(ModifyPage),
721 PGM_SHW_NAME_NONE(Enter),
722 PGM_SHW_NAME_NONE(Exit),
723#ifdef IN_RING3
724 PGM_SHW_NAME_NONE(Relocate),
725#endif
726 },
727};
728
729
730/**
731 * The guest+shadow mode data array.
732 */
733PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
734{
735#if !defined(IN_RING3) && !defined(VBOX_STRICT)
736# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
737# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
738 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
739
740#elif !defined(IN_RING3) && defined(VBOX_STRICT)
741# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
742# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
743 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
744
745#elif defined(IN_RING3) && !defined(VBOX_STRICT)
746# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
747# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
748 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
749
750#elif defined(IN_RING3) && defined(VBOX_STRICT)
751# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
752# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
753 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
754
755#else
756# error "Misconfig."
757#endif
758
759 /* 32-bit shadow paging mode: */
760 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
762 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
763 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
769 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
770 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
771
772 /* PAE shadow paging mode: */
773 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
774 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
775 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
778 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
784
785 /* AMD64 shadow paging mode: */
786 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
787 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
788 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
789 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
790 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
791#ifdef VBOX_WITH_64_BITS_GUESTS
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
793#else
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
795#endif
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
801
802 /* 32-bit nested paging mode: */
803 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
804 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
805 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
806 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
808#ifdef VBOX_WITH_64_BITS_GUESTS
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
810#else
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
812#endif
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
817 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
818
819 /* PAE nested paging mode: */
820 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
823 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
825#ifdef VBOX_WITH_64_BITS_GUESTS
826 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
827#else
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
829#endif
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
834 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
835
836 /* AMD64 nested paging mode: */
837 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
842#ifdef VBOX_WITH_64_BITS_GUESTS
843 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
844#else
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
846#endif
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
851 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
852
853 /* EPT nested paging mode: */
854 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
859#ifdef VBOX_WITH_64_BITS_GUESTS
860 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
861#else
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
863#endif
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
868 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
869
870 /* NONE / NEM: */
871 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
876#ifdef VBOX_WITH_64_BITS_GUESTS
877 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
878#else
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
880#endif
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
884 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
885 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
886
887
888#undef PGMMODEDATABTH_ENTRY
889#undef PGMMODEDATABTH_NULL_ENTRY
890};
891
892
893#ifdef IN_RING0
894/**
895 * #PF Handler.
896 *
897 * @returns VBox status code (appropriate for trap handling and GC return).
898 * @param pVCpu The cross context virtual CPU structure.
899 * @param uErr The trap error code.
900 * @param pRegFrame Trap register frame.
901 * @param pvFault The fault address.
902 */
903VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
904{
905 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
906
907 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
908 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
909 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
910
911
912# ifdef VBOX_WITH_STATISTICS
913 /*
914 * Error code stats.
915 */
916 if (uErr & X86_TRAP_PF_US)
917 {
918 if (!(uErr & X86_TRAP_PF_P))
919 {
920 if (uErr & X86_TRAP_PF_RW)
921 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
922 else
923 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
924 }
925 else if (uErr & X86_TRAP_PF_RW)
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
927 else if (uErr & X86_TRAP_PF_RSVD)
928 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
929 else if (uErr & X86_TRAP_PF_ID)
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
931 else
932 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
933 }
934 else
935 { /* Supervisor */
936 if (!(uErr & X86_TRAP_PF_P))
937 {
938 if (uErr & X86_TRAP_PF_RW)
939 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
940 else
941 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
942 }
943 else if (uErr & X86_TRAP_PF_RW)
944 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
945 else if (uErr & X86_TRAP_PF_ID)
946 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
947 else if (uErr & X86_TRAP_PF_RSVD)
948 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
949 }
950# endif /* VBOX_WITH_STATISTICS */
951
952 /*
953 * Call the worker.
954 */
955 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
956 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
957 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
958 bool fLockTaken = false;
959 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
960 if (fLockTaken)
961 {
962 PGM_LOCK_ASSERT_OWNER(pVM);
963 PGM_UNLOCK(pVM);
964 }
965 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
966
967 /*
968 * Return code tweaks.
969 */
970 if (rc != VINF_SUCCESS)
971 {
972 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
973 rc = VINF_SUCCESS;
974
975 /* Note: hack alert for difficult to reproduce problem. */
976 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
977 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
978 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
979 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
980 {
981 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
982 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
983 rc = VINF_SUCCESS;
984 }
985 }
986
987 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
988 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
989 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
990 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
991 return rc;
992}
993#endif /* IN_RING0 */
994
995
996/**
997 * Prefetch a page
998 *
999 * Typically used to sync commonly used pages before entering raw mode
1000 * after a CR3 reload.
1001 *
1002 * @returns VBox status code suitable for scheduling.
1003 * @retval VINF_SUCCESS on success.
1004 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1005 * @param pVCpu The cross context virtual CPU structure.
1006 * @param GCPtrPage Page to invalidate.
1007 */
1008VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1009{
1010 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1011
1012 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1013 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1014 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1015 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1016
1017 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1018 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1019 return rc;
1020}
1021
1022
1023/**
1024 * Emulation of the invlpg instruction (HC only actually).
1025 *
1026 * @returns Strict VBox status code, special care required.
1027 * @retval VINF_PGM_SYNC_CR3 - handled.
1028 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1029 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1030 *
1031 * @param pVCpu The cross context virtual CPU structure.
1032 * @param GCPtrPage Page to invalidate.
1033 *
1034 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1035 * safe, but there could be edge cases!
1036 *
1037 * @todo Flush page or page directory only if necessary!
1038 * @todo VBOXSTRICTRC
1039 */
1040VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1041{
1042 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1043 int rc;
1044 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1045
1046 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1047
1048 /*
1049 * Call paging mode specific worker.
1050 */
1051 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1052 PGM_LOCK_VOID(pVM);
1053
1054 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1055 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1056 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1057 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1058
1059 PGM_UNLOCK(pVM);
1060 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1061
1062 /* Ignore all irrelevant error codes. */
1063 if ( rc == VERR_PAGE_NOT_PRESENT
1064 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1065 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1066 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1067 rc = VINF_SUCCESS;
1068
1069 return rc;
1070}
1071
1072
1073/**
1074 * Executes an instruction using the interpreter.
1075 *
1076 * @returns VBox status code (appropriate for trap handling and GC return).
1077 * @param pVM The cross context VM structure.
1078 * @param pVCpu The cross context virtual CPU structure.
1079 * @param pRegFrame Register frame.
1080 * @param pvFault Fault address.
1081 */
1082VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1083{
1084 NOREF(pVM);
1085 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1086 if (rc == VERR_EM_INTERPRETER)
1087 rc = VINF_EM_RAW_EMULATE_INSTR;
1088 if (rc != VINF_SUCCESS)
1089 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1090 return rc;
1091}
1092
1093
1094/**
1095 * Gets effective page information (from the VMM page directory).
1096 *
1097 * @returns VBox status code.
1098 * @param pVCpu The cross context virtual CPU structure.
1099 * @param GCPtr Guest Context virtual address of the page.
1100 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1101 * @param pHCPhys Where to store the HC physical address of the page.
1102 * This is page aligned.
1103 * @remark You should use PGMMapGetPage() for pages in a mapping.
1104 */
1105VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1106{
1107 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1108 PGM_LOCK_VOID(pVM);
1109
1110 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1111 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1112 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1113 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1114
1115 PGM_UNLOCK(pVM);
1116 return rc;
1117}
1118
1119
1120/**
1121 * Modify page flags for a range of pages in the shadow context.
1122 *
1123 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1124 *
1125 * @returns VBox status code.
1126 * @param pVCpu The cross context virtual CPU structure.
1127 * @param GCPtr Virtual address of the first page in the range.
1128 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1129 * @param fMask The AND mask - page flags X86_PTE_*.
1130 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1131 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1132 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1133 */
1134DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1135{
1136 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1137 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1138
1139 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1140
1141 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1142 PGM_LOCK_VOID(pVM);
1143
1144 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1145 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1146 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1147 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1148
1149 PGM_UNLOCK(pVM);
1150 return rc;
1151}
1152
1153
1154/**
1155 * Changing the page flags for a single page in the shadow page tables so as to
1156 * make it read-only.
1157 *
1158 * @returns VBox status code.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 * @param GCPtr Virtual address of the first page in the range.
1161 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1162 */
1163VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1164{
1165 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1166}
1167
1168
1169/**
1170 * Changing the page flags for a single page in the shadow page tables so as to
1171 * make it writable.
1172 *
1173 * The call must know with 101% certainty that the guest page tables maps this
1174 * as writable too. This function will deal shared, zero and write monitored
1175 * pages.
1176 *
1177 * @returns VBox status code.
1178 * @param pVCpu The cross context virtual CPU structure.
1179 * @param GCPtr Virtual address of the first page in the range.
1180 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1181 */
1182VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1183{
1184 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1185 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1186 return VINF_SUCCESS;
1187}
1188
1189
1190/**
1191 * Changing the page flags for a single page in the shadow page tables so as to
1192 * make it not present.
1193 *
1194 * @returns VBox status code.
1195 * @param pVCpu The cross context virtual CPU structure.
1196 * @param GCPtr Virtual address of the first page in the range.
1197 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1198 */
1199VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1200{
1201 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1202}
1203
1204
1205/**
1206 * Changing the page flags for a single page in the shadow page tables so as to
1207 * make it supervisor and writable.
1208 *
1209 * This if for dealing with CR0.WP=0 and readonly user pages.
1210 *
1211 * @returns VBox status code.
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param GCPtr Virtual address of the first page in the range.
1214 * @param fBigPage Whether or not this is a big page. If it is, we have to
1215 * change the shadow PDE as well. If it isn't, the caller
1216 * has checked that the shadow PDE doesn't need changing.
1217 * We ASSUME 4KB pages backing the big page here!
1218 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1219 */
1220int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1221{
1222 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1223 if (rc == VINF_SUCCESS && fBigPage)
1224 {
1225 /* this is a bit ugly... */
1226 switch (pVCpu->pgm.s.enmShadowMode)
1227 {
1228 case PGMMODE_32_BIT:
1229 {
1230 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1231 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1232 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1233 pPde->u |= X86_PDE_RW;
1234 Log(("-> PDE=%#llx (32)\n", pPde->u));
1235 break;
1236 }
1237 case PGMMODE_PAE:
1238 case PGMMODE_PAE_NX:
1239 {
1240 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1241 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1242 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1243 pPde->u |= X86_PDE_RW;
1244 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1245 break;
1246 }
1247 default:
1248 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1249 }
1250 }
1251 return rc;
1252}
1253
1254
1255/**
1256 * Gets the shadow page directory for the specified address, PAE.
1257 *
1258 * @returns Pointer to the shadow PD.
1259 * @param pVCpu The cross context virtual CPU structure.
1260 * @param GCPtr The address.
1261 * @param uGstPdpe Guest PDPT entry. Valid.
1262 * @param ppPD Receives address of page directory
1263 */
1264int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1265{
1266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1267 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1268 PPGMPOOLPAGE pShwPage;
1269 int rc;
1270 PGM_LOCK_ASSERT_OWNER(pVM);
1271
1272
1273 /* Allocate page directory if not present. */
1274 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1275 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1276 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1277 X86PGPAEUINT const uPdpe = pPdpe->u;
1278 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1279 {
1280 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1281 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1282 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1283
1284 pgmPoolCacheUsed(pPool, pShwPage);
1285
1286 /* Update the entry if necessary. */
1287 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1288 if (uPdpeNew == uPdpe)
1289 { /* likely */ }
1290 else
1291 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1292 }
1293 else
1294 {
1295 RTGCPTR64 GCPdPt;
1296 PGMPOOLKIND enmKind;
1297 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1298 {
1299 /* AMD-V nested paging or real/protected mode without paging. */
1300 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1301 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1302 }
1303 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1304 {
1305 if (uGstPdpe & X86_PDPE_P)
1306 {
1307 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1308 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1309 }
1310 else
1311 {
1312 /* PD not present; guest must reload CR3 to change it.
1313 * No need to monitor anything in this case. */
1314 /** @todo r=bird: WTF is hit?!? */
1315 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1316 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1317 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1318 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1319 }
1320 }
1321 else
1322 {
1323 GCPdPt = CPUMGetGuestCR3(pVCpu);
1324 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1325 }
1326
1327 /* Create a reference back to the PDPT by using the index in its shadow page. */
1328 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1329 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1330 &pShwPage);
1331 AssertRCReturn(rc, rc);
1332
1333 /* Hook it up. */
1334 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1335 }
1336 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1337
1338 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1339 return VINF_SUCCESS;
1340}
1341
1342
1343/**
1344 * Gets the pointer to the shadow page directory entry for an address, PAE.
1345 *
1346 * @returns Pointer to the PDE.
1347 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1348 * @param GCPtr The address.
1349 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1350 */
1351DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1352{
1353 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1354 PGM_LOCK_ASSERT_OWNER(pVM);
1355
1356 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1357 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1358 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1359 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1360 if (!(uPdpe & X86_PDPE_P))
1361 {
1362 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1363 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1364 }
1365 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1366
1367 /* Fetch the pgm pool shadow descriptor. */
1368 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1369 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1370
1371 *ppShwPde = pShwPde;
1372 return VINF_SUCCESS;
1373}
1374
1375
1376/**
1377 * Syncs the SHADOW page directory pointer for the specified address.
1378 *
1379 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1380 *
1381 * The caller is responsible for making sure the guest has a valid PD before
1382 * calling this function.
1383 *
1384 * @returns VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure.
1386 * @param GCPtr The address.
1387 * @param uGstPml4e Guest PML4 entry (valid).
1388 * @param uGstPdpe Guest PDPT entry (valid).
1389 * @param ppPD Receives address of page directory
1390 */
1391static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1392{
1393 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1394 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1395 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1396 int rc;
1397
1398 PGM_LOCK_ASSERT_OWNER(pVM);
1399
1400 /*
1401 * PML4.
1402 */
1403 PPGMPOOLPAGE pShwPage;
1404 {
1405 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1406 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1407 X86PGPAEUINT const uPml4e = pPml4e->u;
1408
1409 /* Allocate page directory pointer table if not present. */
1410 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1411 {
1412 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1413 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1414
1415 pgmPoolCacheUsed(pPool, pShwPage);
1416
1417 /* Update the entry if needed. */
1418 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1419 | (uPml4e & PGM_PML4_FLAGS);
1420 if (uPml4e == uPml4eNew)
1421 { /* likely */ }
1422 else
1423 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1424 }
1425 else
1426 {
1427 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1428
1429 RTGCPTR64 GCPml4;
1430 PGMPOOLKIND enmKind;
1431 if (fNestedPagingOrNoGstPaging)
1432 {
1433 /* AMD-V nested paging or real/protected mode without paging */
1434 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1435 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1436 }
1437 else
1438 {
1439 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1440 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1441 }
1442
1443 /* Create a reference back to the PDPT by using the index in its shadow page. */
1444 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1445 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1446 &pShwPage);
1447 AssertRCReturn(rc, rc);
1448
1449 /* Hook it up. */
1450 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1451 | (uPml4e & PGM_PML4_FLAGS));
1452 }
1453 }
1454
1455 /*
1456 * PDPT.
1457 */
1458 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1459 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1460 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1461 X86PGPAEUINT const uPdpe = pPdpe->u;
1462
1463 /* Allocate page directory if not present. */
1464 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1465 {
1466 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1467 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1468
1469 pgmPoolCacheUsed(pPool, pShwPage);
1470
1471 /* Update the entry if needed. */
1472 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1473 | (uPdpe & PGM_PDPT_FLAGS);
1474 if (uPdpe == uPdpeNew)
1475 { /* likely */ }
1476 else
1477 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1478 }
1479 else
1480 {
1481 RTGCPTR64 GCPdPt;
1482 PGMPOOLKIND enmKind;
1483 if (fNestedPagingOrNoGstPaging)
1484 {
1485 /* AMD-V nested paging or real/protected mode without paging */
1486 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1487 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1488 }
1489 else
1490 {
1491 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1492 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1493 }
1494
1495 /* Create a reference back to the PDPT by using the index in its shadow page. */
1496 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1497 pShwPage->idx, iPdPt, false /*fLockPage*/,
1498 &pShwPage);
1499 AssertRCReturn(rc, rc);
1500
1501 /* Hook it up. */
1502 ASMAtomicWriteU64(&pPdpe->u,
1503 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1504 }
1505
1506 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1507 return VINF_SUCCESS;
1508}
1509
1510
1511/**
1512 * Gets the SHADOW page directory pointer for the specified address (long mode).
1513 *
1514 * @returns VBox status code.
1515 * @param pVCpu The cross context virtual CPU structure.
1516 * @param GCPtr The address.
1517 * @param ppPml4e Receives the address of the page map level 4 entry.
1518 * @param ppPdpt Receives the address of the page directory pointer table.
1519 * @param ppPD Receives the address of the page directory.
1520 */
1521DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1522{
1523 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1524 PGM_LOCK_ASSERT_OWNER(pVM);
1525
1526 /*
1527 * PML4
1528 */
1529 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1530 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1531 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1532 if (ppPml4e)
1533 *ppPml4e = (PX86PML4E)pPml4e;
1534 X86PGPAEUINT const uPml4e = pPml4e->u;
1535 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1536 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1537 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1538
1539 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1540 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1541 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1542
1543 /*
1544 * PDPT
1545 */
1546 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1547 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1548 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1549 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1550 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1551
1552 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1553 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1554
1555 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1556 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1557 return VINF_SUCCESS;
1558}
1559
1560
1561/**
1562 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1563 * backing pages in case the PDPT or PML4 entry is missing.
1564 *
1565 * @returns VBox status code.
1566 * @param pVCpu The cross context virtual CPU structure.
1567 * @param GCPtr The address.
1568 * @param ppPdpt Receives address of pdpt
1569 * @param ppPD Receives address of page directory
1570 */
1571static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1572{
1573 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1574 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1575 int rc;
1576
1577 Assert(pVM->pgm.s.fNestedPaging);
1578 PGM_LOCK_ASSERT_OWNER(pVM);
1579
1580 /*
1581 * PML4 level.
1582 */
1583 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1584 Assert(pPml4);
1585
1586 /* Allocate page directory pointer table if not present. */
1587 PPGMPOOLPAGE pShwPage;
1588 {
1589 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1590 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1591 EPTPML4E Pml4e;
1592 Pml4e.u = pPml4e->u;
1593 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1594 {
1595 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1596 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1597 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1598 &pShwPage);
1599 AssertRCReturn(rc, rc);
1600
1601 /* Hook up the new PDPT now. */
1602 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1603 }
1604 else
1605 {
1606 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1607 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1608
1609 pgmPoolCacheUsed(pPool, pShwPage);
1610
1611 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1612 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1613 { }
1614 else
1615 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1616 }
1617 }
1618
1619 /*
1620 * PDPT level.
1621 */
1622 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1623 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1624 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1625
1626 if (ppPdpt)
1627 *ppPdpt = pPdpt;
1628
1629 /* Allocate page directory if not present. */
1630 EPTPDPTE Pdpe;
1631 Pdpe.u = pPdpe->u;
1632 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1633 {
1634 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1635 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1636 pShwPage->idx, iPdPt, false /*fLockPage*/,
1637 &pShwPage);
1638 AssertRCReturn(rc, rc);
1639
1640 /* Hook up the new PD now. */
1641 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1642 }
1643 else
1644 {
1645 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1646 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1647
1648 pgmPoolCacheUsed(pPool, pShwPage);
1649
1650 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1651 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1652 { }
1653 else
1654 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1655 }
1656
1657 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1658 return VINF_SUCCESS;
1659}
1660
1661
1662#ifdef IN_RING0
1663/**
1664 * Synchronizes a range of nested page table entries.
1665 *
1666 * The caller must own the PGM lock.
1667 *
1668 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1669 * @param GCPhys Where to start.
1670 * @param cPages How many pages which entries should be synced.
1671 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1672 * host paging mode for AMD-V).
1673 */
1674int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1675{
1676 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1677
1678/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1679 int rc;
1680 switch (enmShwPagingMode)
1681 {
1682 case PGMMODE_32_BIT:
1683 {
1684 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1685 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1686 break;
1687 }
1688
1689 case PGMMODE_PAE:
1690 case PGMMODE_PAE_NX:
1691 {
1692 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1693 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1694 break;
1695 }
1696
1697 case PGMMODE_AMD64:
1698 case PGMMODE_AMD64_NX:
1699 {
1700 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1701 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1702 break;
1703 }
1704
1705 case PGMMODE_EPT:
1706 {
1707 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1708 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1709 break;
1710 }
1711
1712 default:
1713 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1714 }
1715 return rc;
1716}
1717#endif /* IN_RING0 */
1718
1719
1720/**
1721 * Gets effective Guest OS page information.
1722 *
1723 * When GCPtr is in a big page, the function will return as if it was a normal
1724 * 4KB page. If the need for distinguishing between big and normal page becomes
1725 * necessary at a later point, a PGMGstGetPage() will be created for that
1726 * purpose.
1727 *
1728 * @returns VBox status code.
1729 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1730 * @param GCPtr Guest Context virtual address of the page.
1731 * @param pWalk Where to store the page walk information.
1732 */
1733VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1734{
1735 VMCPU_ASSERT_EMT(pVCpu);
1736 Assert(pWalk);
1737 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1738 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1739 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1740 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1741}
1742
1743
1744/**
1745 * Maps the guest CR3.
1746 *
1747 * @returns VBox status code.
1748 * @param pVCpu The cross context virtual CPU structure.
1749 * @param GCPhysCr3 The guest CR3 value.
1750 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1751 */
1752DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1753{
1754 /** @todo this needs some reworking wrt. locking? */
1755 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1756 PGM_LOCK_VOID(pVM);
1757 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1758 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1759
1760 RTHCPTR HCPtrGuestCr3;
1761 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1762 PGM_UNLOCK(pVM);
1763
1764 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1765 return rc;
1766}
1767
1768
1769/**
1770 * Unmaps the guest CR3.
1771 *
1772 * @returns VBox status code.
1773 * @param pVCpu The cross context virtual CPU structure.
1774 */
1775DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1776{
1777 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1778 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1779 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
1780 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1781}
1782
1783
1784/**
1785 * Performs a guest page table walk.
1786 *
1787 * The guest should be in paged protect mode or long mode when making a call to
1788 * this function.
1789 *
1790 * @returns VBox status code.
1791 * @retval VINF_SUCCESS on success.
1792 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1793 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1794 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1795 *
1796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1797 * @param GCPtr The guest virtual address to walk by.
1798 * @param pWalk Where to return the walk result. This is valid for some
1799 * error codes as well.
1800 * @param pGstWalk The guest mode specific page walk information.
1801 */
1802int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1803{
1804 VMCPU_ASSERT_EMT(pVCpu);
1805 switch (pVCpu->pgm.s.enmGuestMode)
1806 {
1807 case PGMMODE_32_BIT:
1808 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1809 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1810
1811 case PGMMODE_PAE:
1812 case PGMMODE_PAE_NX:
1813 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1814 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
1815
1816 case PGMMODE_AMD64:
1817 case PGMMODE_AMD64_NX:
1818 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1819 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
1820
1821 case PGMMODE_REAL:
1822 case PGMMODE_PROTECTED:
1823 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1824 return VERR_PGM_NOT_USED_IN_MODE;
1825
1826 case PGMMODE_EPT:
1827 case PGMMODE_NESTED_32BIT:
1828 case PGMMODE_NESTED_PAE:
1829 case PGMMODE_NESTED_AMD64:
1830 default:
1831 AssertFailed();
1832 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1833 return VERR_PGM_NOT_USED_IN_MODE;
1834 }
1835}
1836
1837
1838#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1839/**
1840 * Performs a guest second-level address translation (SLAT).
1841 *
1842 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1843 * function.
1844 *
1845 * @returns VBox status code.
1846 * @retval VINF_SUCCESS on success.
1847 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1848 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1849 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1850 *
1851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1852 * @param GCPhysNested The nested-guest physical address being translated
1853 * (input).
1854 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1855 * valid. This indicates the SLAT is caused when
1856 * translating a nested-guest linear address.
1857 * @param GCPtrNested The nested-guest virtual address that initiated the
1858 * SLAT. If none, pass NIL_RTGCPTR.
1859 * @param pWalk Where to return the walk result. This is valid for
1860 * some error codes as well.
1861 * @param pGstWalk The second-level paging-mode specific walk
1862 * information.
1863 */
1864static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1865 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1866{
1867 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
1868 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
1869 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
1870 switch (pVCpu->pgm.s.enmGuestSlatMode)
1871 {
1872 case PGMSLAT_EPT:
1873 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1874 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
1875
1876 default:
1877 AssertFailed();
1878 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1879 return VERR_PGM_NOT_USED_IN_MODE;
1880 }
1881}
1882
1883
1884/**
1885 * Performs a guest second-level address translation (SLAT) for a nested-guest
1886 * physical address.
1887 *
1888 * This version requires the SLAT mode to be provided by the caller because we could
1889 * be in the process of switching paging modes (MOV CRX) and cannot presume control
1890 * register values.
1891 *
1892 * @returns VBox status code.
1893 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1894 * @param enmSlatMode The second-level paging mode to use.
1895 * @param GCPhysNested The nested-guest physical address to translate.
1896 * @param pWalk Where to store the walk result.
1897 * @param pGstWalk Where to store the second-level paging-mode specific
1898 * walk information.
1899 */
1900static int pgmGstSlatWalkPhys(PVMCPUCC pVCpu, PGMSLAT enmSlatMode, RTGCPHYS GCPhysNested, PPGMPTWALK pWalk,
1901 PPGMPTWALKGST pGstWalk)
1902{
1903 AssertPtr(pWalk);
1904 AssertPtr(pGstWalk);
1905 switch (enmSlatMode)
1906 {
1907 case PGMSLAT_EPT:
1908 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1909 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, false /* fIsLinearaddrValid */, NIL_RTGCPTR, pWalk,
1910 &pGstWalk->u.Ept);
1911
1912 default:
1913 AssertFailed();
1914 return VERR_PGM_NOT_USED_IN_MODE;
1915 }
1916}
1917#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1918
1919
1920/**
1921 * Tries to continue the previous walk.
1922 *
1923 * @note Requires the caller to hold the PGM lock from the first
1924 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1925 * we cannot use the pointers.
1926 *
1927 * @returns VBox status code.
1928 * @retval VINF_SUCCESS on success.
1929 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1930 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1931 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1932 *
1933 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1934 * @param GCPtr The guest virtual address to walk by.
1935 * @param pWalk Pointer to the previous walk result and where to return
1936 * the result of this walk. This is valid for some error
1937 * codes as well.
1938 * @param pGstWalk The guest-mode specific walk information.
1939 */
1940int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1941{
1942 /*
1943 * We can only handle successfully walks.
1944 * We also limit ourselves to the next page.
1945 */
1946 if ( pWalk->fSucceeded
1947 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
1948 {
1949 Assert(pWalk->uLevel == 0);
1950 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1951 {
1952 /*
1953 * AMD64
1954 */
1955 if (!pWalk->fGigantPage && !pWalk->fBigPage)
1956 {
1957 /*
1958 * We fall back to full walk if the PDE table changes, if any
1959 * reserved bits are set, or if the effective page access changes.
1960 */
1961 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1962 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1963 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1964 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1965
1966 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
1967 {
1968 if (pGstWalk->u.Amd64.pPte)
1969 {
1970 X86PTEPAE Pte;
1971 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
1972 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
1973 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1974 {
1975 pWalk->GCPtr = GCPtr;
1976 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1977 pGstWalk->u.Amd64.Pte.u = Pte.u;
1978 pGstWalk->u.Amd64.pPte++;
1979 return VINF_SUCCESS;
1980 }
1981 }
1982 }
1983 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
1984 {
1985 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1986 if (pGstWalk->u.Amd64.pPde)
1987 {
1988 X86PDEPAE Pde;
1989 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
1990 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
1991 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1992 {
1993 /* Get the new PTE and check out the first entry. */
1994 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1995 &pGstWalk->u.Amd64.pPt);
1996 if (RT_SUCCESS(rc))
1997 {
1998 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
1999 X86PTEPAE Pte;
2000 Pte.u = pGstWalk->u.Amd64.pPte->u;
2001 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2002 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2003 {
2004 pWalk->GCPtr = GCPtr;
2005 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2006 pGstWalk->u.Amd64.Pte.u = Pte.u;
2007 pGstWalk->u.Amd64.Pde.u = Pde.u;
2008 pGstWalk->u.Amd64.pPde++;
2009 return VINF_SUCCESS;
2010 }
2011 }
2012 }
2013 }
2014 }
2015 }
2016 else if (!pWalk->fGigantPage)
2017 {
2018 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2019 {
2020 pWalk->GCPtr = GCPtr;
2021 pWalk->GCPhys += GUEST_PAGE_SIZE;
2022 return VINF_SUCCESS;
2023 }
2024 }
2025 else
2026 {
2027 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2028 {
2029 pWalk->GCPtr = GCPtr;
2030 pWalk->GCPhys += GUEST_PAGE_SIZE;
2031 return VINF_SUCCESS;
2032 }
2033 }
2034 }
2035 }
2036 /* Case we don't handle. Do full walk. */
2037 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2038}
2039
2040
2041/**
2042 * Modify page flags for a range of pages in the guest's tables
2043 *
2044 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2045 *
2046 * @returns VBox status code.
2047 * @param pVCpu The cross context virtual CPU structure.
2048 * @param GCPtr Virtual address of the first page in the range.
2049 * @param cb Size (in bytes) of the range to apply the modification to.
2050 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2051 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2052 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2053 */
2054VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2055{
2056 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2057 VMCPU_ASSERT_EMT(pVCpu);
2058
2059 /*
2060 * Validate input.
2061 */
2062 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2063 Assert(cb);
2064
2065 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2066
2067 /*
2068 * Adjust input.
2069 */
2070 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2071 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2072 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2073
2074 /*
2075 * Call worker.
2076 */
2077 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2078 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2079 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2080 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2081
2082 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2083 return rc;
2084}
2085
2086
2087/**
2088 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2089 *
2090 * @returns @c true if the PDPE is valid, @c false otherwise.
2091 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2092 * @param paPaePdpes The PAE PDPEs to validate.
2093 *
2094 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2095 */
2096VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2097{
2098 Assert(paPaePdpes);
2099 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2100 {
2101 X86PDPE const PaePdpe = paPaePdpes[i];
2102 if ( !(PaePdpe.u & X86_PDPE_P)
2103 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2104 { /* likely */ }
2105 else
2106 return false;
2107 }
2108 return true;
2109}
2110
2111
2112/**
2113 * Performs the lazy mapping of the 32-bit guest PD.
2114 *
2115 * @returns VBox status code.
2116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2117 * @param ppPd Where to return the pointer to the mapping. This is
2118 * always set.
2119 */
2120int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2121{
2122 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2123 PGM_LOCK_VOID(pVM);
2124
2125 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2126
2127 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2128 PPGMPAGE pPage;
2129 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2130 if (RT_SUCCESS(rc))
2131 {
2132 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2133 if (RT_SUCCESS(rc))
2134 {
2135# ifdef IN_RING3
2136 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2137 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2138# else
2139 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2140 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2141# endif
2142 PGM_UNLOCK(pVM);
2143 return VINF_SUCCESS;
2144 }
2145 AssertRC(rc);
2146 }
2147 PGM_UNLOCK(pVM);
2148
2149 *ppPd = NULL;
2150 return rc;
2151}
2152
2153
2154/**
2155 * Performs the lazy mapping of the PAE guest PDPT.
2156 *
2157 * @returns VBox status code.
2158 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2159 * @param ppPdpt Where to return the pointer to the mapping. This is
2160 * always set.
2161 */
2162int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2163{
2164 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2165 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2166 PGM_LOCK_VOID(pVM);
2167
2168 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2169 PPGMPAGE pPage;
2170 /** @todo Nested VMX: convert GCPhysCR3 from nested-guest physical to
2171 * guest-physical address here. */
2172 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2173 if (RT_SUCCESS(rc))
2174 {
2175 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2176 if (RT_SUCCESS(rc))
2177 {
2178# ifdef IN_RING3
2179 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2180 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2181# else
2182 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2183 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2184# endif
2185 PGM_UNLOCK(pVM);
2186 return VINF_SUCCESS;
2187 }
2188 AssertRC(rc);
2189 }
2190
2191 PGM_UNLOCK(pVM);
2192 *ppPdpt = NULL;
2193 return rc;
2194}
2195
2196
2197/**
2198 * Performs the lazy mapping / updating of a PAE guest PD.
2199 *
2200 * @returns Pointer to the mapping.
2201 * @returns VBox status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2203 * @param iPdpt Which PD entry to map (0..3).
2204 * @param ppPd Where to return the pointer to the mapping. This is
2205 * always set.
2206 */
2207int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2208{
2209 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2210 PGM_LOCK_VOID(pVM);
2211
2212 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2213 Assert(pGuestPDPT);
2214 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2215 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2216 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2217
2218 PPGMPAGE pPage;
2219 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2220 if (RT_SUCCESS(rc))
2221 {
2222 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2223 AssertRC(rc);
2224 if (RT_SUCCESS(rc))
2225 {
2226# ifdef IN_RING3
2227 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2228 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2229# else
2230 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2231 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2232# endif
2233 if (fChanged)
2234 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2235 PGM_UNLOCK(pVM);
2236 return VINF_SUCCESS;
2237 }
2238 }
2239
2240 /* Invalid page or some failure, invalidate the entry. */
2241 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2242 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2243 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2244
2245 PGM_UNLOCK(pVM);
2246 return rc;
2247}
2248
2249
2250/**
2251 * Performs the lazy mapping of the 32-bit guest PD.
2252 *
2253 * @returns VBox status code.
2254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2255 * @param ppPml4 Where to return the pointer to the mapping. This will
2256 * always be set.
2257 */
2258int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2259{
2260 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2261 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2262 PGM_LOCK_VOID(pVM);
2263
2264 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2265 PPGMPAGE pPage;
2266 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2267 if (RT_SUCCESS(rc))
2268 {
2269 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2270 if (RT_SUCCESS(rc))
2271 {
2272# ifdef IN_RING3
2273 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2274 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2275# else
2276 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2277 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2278# endif
2279 PGM_UNLOCK(pVM);
2280 return VINF_SUCCESS;
2281 }
2282 }
2283
2284 PGM_UNLOCK(pVM);
2285 *ppPml4 = NULL;
2286 return rc;
2287}
2288
2289
2290#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2291 /**
2292 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2293 *
2294 * @returns VBox status code.
2295 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2296 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2297 * always be set.
2298 */
2299int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2300{
2301 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2302 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2303 PGM_LOCK_VOID(pVM);
2304
2305 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2306 PPGMPAGE pPage;
2307 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2308 if (RT_SUCCESS(rc))
2309 {
2310 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2311 if (RT_SUCCESS(rc))
2312 {
2313# ifdef IN_RING3
2314 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2315 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2316# else
2317 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2318 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2319# endif
2320 PGM_UNLOCK(pVM);
2321 return VINF_SUCCESS;
2322 }
2323 }
2324
2325 PGM_UNLOCK(pVM);
2326 *ppEptPml4 = NULL;
2327 return rc;
2328}
2329#endif
2330
2331
2332/**
2333 * Gets the current CR3 register value for the shadow memory context.
2334 * @returns CR3 value.
2335 * @param pVCpu The cross context virtual CPU structure.
2336 */
2337VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2338{
2339 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2340 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2341 return pPoolPage->Core.Key;
2342}
2343
2344
2345/**
2346 * Forces lazy remapping of the guest's PAE page-directory structures.
2347 *
2348 * @param pVCpu The cross context virtual CPU structure.
2349 */
2350static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2351{
2352 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2353 {
2354 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2355 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2356 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2357 }
2358}
2359
2360
2361/**
2362 * Gets the CR3 mask corresponding to the given paging mode.
2363 *
2364 * @returns The CR3 mask.
2365 * @param enmMode The paging mode.
2366 */
2367DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode)
2368{
2369 /** @todo This work can be optimized either by storing the masks in
2370 * pVCpu->pgm.s.afGstCr3Masks[] for all PGMMODEs -or- just do this once and
2371 * store the result when entering guest mode since we currently use it only
2372 * for enmGuestMode. */
2373 switch (enmMode)
2374 {
2375 case PGMMODE_PAE:
2376 case PGMMODE_PAE_NX:
2377 return X86_CR3_PAE_PAGE_MASK;
2378 case PGMMODE_AMD64:
2379 case PGMMODE_AMD64_NX:
2380 return X86_CR3_AMD64_PAGE_MASK;
2381 case PGMMODE_EPT:
2382 return X86_CR3_EPT_PAGE_MASK;
2383 default:
2384 return X86_CR3_PAGE_MASK;
2385 }
2386}
2387
2388
2389/**
2390 * Gets the masked CR3 value according to the current guest paging mode.
2391 *
2392 * @returns The masked PGM CR3 value.
2393 * @param pVCpu The cross context virtual CPU structure.
2394 * @param uCr3 The raw guest CR3 value.
2395 */
2396DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2397{
2398 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode);
2399 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
2400 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2401 return GCPhysCR3;
2402}
2403
2404
2405#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2406/**
2407 * Performs second-level address translation for the given CR3 and updates the
2408 * nested-guest CR3 when successful.
2409 *
2410 * @returns VBox status code.
2411 * @param pVCpu The cross context virtual CPU structure.
2412 * @param uCr3 The masked nested-guest CR3 value.
2413 * @param pGCPhysCR3 Where to store the translated CR3.
2414 *
2415 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2416 * mindful of this in code that's hyper sensitive to the order of
2417 * operations.
2418 */
2419static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2420{
2421 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2422 {
2423 PGMPTWALK Walk;
2424 PGMPTWALKGST GstWalk;
2425 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, uCr3, &Walk, &GstWalk);
2426 if (RT_SUCCESS(rc))
2427 {
2428 /* Update nested-guest CR3. */
2429 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2430
2431 /* Pass back the translated result. */
2432 *pGCPhysCr3 = Walk.GCPhys;
2433 return VINF_SUCCESS;
2434 }
2435
2436 /* Translation failed. */
2437 *pGCPhysCr3 = NIL_RTGCPHYS;
2438 return rc;
2439 }
2440
2441 /*
2442 * If the nested-guest CR3 has not changed, then the previously
2443 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2444 */
2445 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2446 return VINF_SUCCESS;
2447}
2448#endif
2449
2450
2451/**
2452 * Performs and schedules necessary updates following a CR3 load or reload.
2453 *
2454 * This will normally involve mapping the guest PD or nPDPT
2455 *
2456 * @returns VBox status code.
2457 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2458 * safely be ignored and overridden since the FF will be set too then.
2459 * @param pVCpu The cross context virtual CPU structure.
2460 * @param cr3 The new cr3.
2461 * @param fGlobal Indicates whether this is a global flush or not.
2462 */
2463VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2464{
2465 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2467
2468 VMCPU_ASSERT_EMT(pVCpu);
2469
2470 /*
2471 * Always flag the necessary updates; necessary for hardware acceleration
2472 */
2473 /** @todo optimize this, it shouldn't always be necessary. */
2474 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2475 if (fGlobal)
2476 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2477
2478 /*
2479 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2480 */
2481 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2482 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2484 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2485 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2486 {
2487 LogFlowFunc(("nested_cr3=%RX64 old=%RX64\n", GCPhysCR3, pVCpu->pgm.s.GCPhysNstGstCR3));
2488 RTGCPHYS GCPhysOut;
2489 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2490 if (RT_SUCCESS(rc))
2491 GCPhysCR3 = GCPhysOut;
2492 else
2493 {
2494 /* CR3 SLAT translation failed but we try to pretend it
2495 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2496 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2497 int const rc2 = pgmGstUnmapCr3(pVCpu);
2498 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2499 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2500 return rc2;
2501 }
2502 }
2503#endif
2504
2505 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2506 int rc = VINF_SUCCESS;
2507 if (GCPhysOldCR3 != GCPhysCR3)
2508 {
2509 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2510 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2511 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2512
2513 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2514 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2515 if (RT_LIKELY(rc == VINF_SUCCESS))
2516 { }
2517 else
2518 {
2519 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2520 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2521 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2522 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2523 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2524 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2525 }
2526
2527 if (fGlobal)
2528 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2529 else
2530 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2531 }
2532 else
2533 {
2534#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2535 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2536 if (pPool->cDirtyPages)
2537 {
2538 PGM_LOCK_VOID(pVM);
2539 pgmPoolResetDirtyPages(pVM);
2540 PGM_UNLOCK(pVM);
2541 }
2542#endif
2543 if (fGlobal)
2544 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2545 else
2546 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2547
2548 /*
2549 * Flush PAE PDPTEs.
2550 */
2551 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2552 pgmGstFlushPaePdpes(pVCpu);
2553 }
2554
2555 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2556 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2557 return rc;
2558}
2559
2560
2561/**
2562 * Performs and schedules necessary updates following a CR3 load or reload when
2563 * using nested or extended paging.
2564 *
2565 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2566 * TLB and triggering a SyncCR3.
2567 *
2568 * This will normally involve mapping the guest PD or nPDPT
2569 *
2570 * @returns VBox status code.
2571 * @retval VINF_SUCCESS.
2572 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2573 * paging modes). This can safely be ignored and overridden since the
2574 * FF will be set too then.
2575 * @param pVCpu The cross context virtual CPU structure.
2576 * @param cr3 The new CR3.
2577 */
2578VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2579{
2580 VMCPU_ASSERT_EMT(pVCpu);
2581
2582 /* We assume we're only called in nested paging mode. */
2583 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2584
2585 /*
2586 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2587 */
2588 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2589 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2591 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2592 {
2593 LogFlowFunc(("nested_cr3=%RX64 old_nested_cr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysNstGstCR3));
2594 RTGCPHYS GCPhysOut;
2595 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2596 if (RT_SUCCESS(rc))
2597 GCPhysCR3 = GCPhysOut;
2598 else
2599 {
2600 /* CR3 SLAT translation failed but we try to pretend it
2601 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2602 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2603 int const rc2 = pgmGstUnmapCr3(pVCpu);
2604 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2605 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2606 return rc2;
2607 }
2608 }
2609#endif
2610
2611 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2612 int rc = VINF_SUCCESS;
2613 if (GCPhysOldCR3 != GCPhysCR3)
2614 {
2615 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2616 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2617 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2618
2619 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2620 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2621
2622 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2623 }
2624 /*
2625 * Flush PAE PDPTEs.
2626 */
2627 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2628 pgmGstFlushPaePdpes(pVCpu);
2629
2630 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2631 return rc;
2632}
2633
2634
2635/**
2636 * Synchronize the paging structures.
2637 *
2638 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2639 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2640 * in several places, most importantly whenever the CR3 is loaded.
2641 *
2642 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2643 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2644 * the VMM into guest context.
2645 * @param pVCpu The cross context virtual CPU structure.
2646 * @param cr0 Guest context CR0 register
2647 * @param cr3 Guest context CR3 register
2648 * @param cr4 Guest context CR4 register
2649 * @param fGlobal Including global page directories or not
2650 */
2651VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2652{
2653 int rc;
2654
2655 VMCPU_ASSERT_EMT(pVCpu);
2656
2657 /*
2658 * The pool may have pending stuff and even require a return to ring-3 to
2659 * clear the whole thing.
2660 */
2661 rc = pgmPoolSyncCR3(pVCpu);
2662 if (rc != VINF_SUCCESS)
2663 return rc;
2664
2665 /*
2666 * We might be called when we shouldn't.
2667 *
2668 * The mode switching will ensure that the PD is resynced after every mode
2669 * switch. So, if we find ourselves here when in protected or real mode
2670 * we can safely clear the FF and return immediately.
2671 */
2672 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2673 {
2674 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2675 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2676 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2677 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2678 return VINF_SUCCESS;
2679 }
2680
2681 /* If global pages are not supported, then all flushes are global. */
2682 if (!(cr4 & X86_CR4_PGE))
2683 fGlobal = true;
2684 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2685 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2686
2687 /*
2688 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2689 * This should be done before SyncCR3.
2690 */
2691 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2692 {
2693 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2694
2695 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2696 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2697#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2698 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
2699 {
2700 RTGCPHYS GCPhysOut;
2701 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2702 if (RT_SUCCESS(rc2))
2703 GCPhysCR3 = GCPhysOut;
2704 else
2705 {
2706 /* CR3 SLAT translation failed but we try to pretend it
2707 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2708 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2709 rc2 = pgmGstUnmapCr3(pVCpu);
2710 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2711 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2712 return rc2;
2713 }
2714 }
2715#endif
2716 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2717 if (GCPhysOldCR3 != GCPhysCR3)
2718 {
2719 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2720 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2721 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2722 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2723 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2724 }
2725
2726 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2727 if ( rc == VINF_PGM_SYNC_CR3
2728 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2729 {
2730 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2731#ifdef IN_RING3
2732 rc = pgmPoolSyncCR3(pVCpu);
2733#else
2734 if (rc == VINF_PGM_SYNC_CR3)
2735 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2736 return VINF_PGM_SYNC_CR3;
2737#endif
2738 }
2739 AssertRCReturn(rc, rc);
2740 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2741 }
2742
2743 /*
2744 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2745 */
2746 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2747
2748 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2749 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2750 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2751 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2752
2753 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2754 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2755 if (rc == VINF_SUCCESS)
2756 {
2757 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2758 {
2759 /* Go back to ring 3 if a pgm pool sync is again pending. */
2760 return VINF_PGM_SYNC_CR3;
2761 }
2762
2763 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2764 {
2765 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2766 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2767 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2768 }
2769 }
2770
2771 /*
2772 * Now flush the CR3 (guest context).
2773 */
2774 if (rc == VINF_SUCCESS)
2775 PGM_INVL_VCPU_TLBS(pVCpu);
2776 return rc;
2777}
2778
2779
2780/**
2781 * Maps all the PAE PDPE entries.
2782 *
2783 * @returns VBox status code.
2784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2785 * @param paPaePdpes The new PAE PDPE values.
2786 *
2787 * @remarks This function may be invoked during the process of changing the guest
2788 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2789 * reflect PAE paging just yet.
2790 */
2791VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2792{
2793 Assert(paPaePdpes);
2794 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2795 {
2796 X86PDPE const PaePdpe = paPaePdpes[i];
2797
2798 /*
2799 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2800 * are deferred.[1] Also, different situations require different handling of invalid
2801 * PDPE entries. Here we assume the caller has already validated or doesn't require
2802 * validation of the PDPEs.
2803 *
2804 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2805 */
2806 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2807 {
2808 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2809 RTHCPTR HCPtr;
2810 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2811
2812 PGM_LOCK_VOID(pVM);
2813 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2814 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2815 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2816 PGM_UNLOCK(pVM);
2817 if (RT_SUCCESS(rc))
2818 {
2819#ifdef IN_RING3
2820 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2821 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2822#else
2823 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2824 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2825#endif
2826 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2827 continue;
2828 }
2829 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2830 }
2831 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2832 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2833 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2834 }
2835
2836 return VINF_SUCCESS;
2837}
2838
2839
2840/**
2841 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2842 *
2843 * @returns VBox status code.
2844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2845 * @param cr3 The guest CR3 value.
2846 *
2847 * @remarks This function may be invoked during the process of changing the guest
2848 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2849 * PAE paging just yet.
2850 */
2851VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2852{
2853 /*
2854 * Read the page-directory-pointer table (PDPT) at CR3.
2855 */
2856 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2857 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2858
2859#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2860 if (CPUMIsGuestVmxEptPaePagingEnabled(pVCpu))
2861 {
2862 RTGCPHYS GCPhysOut;
2863 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2864 if (RT_SUCCESS(rc))
2865 GCPhysCR3 = GCPhysOut;
2866 else
2867 {
2868 AssertMsgFailed(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
2869 return rc;
2870 }
2871 }
2872#endif
2873
2874 RTHCPTR HCPtrGuestCr3;
2875 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
2876 if (RT_SUCCESS(rc))
2877 {
2878 /*
2879 * Validate the page-directory-pointer table entries (PDPE).
2880 */
2881 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2882 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2883 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2884 {
2885 /*
2886 * Map the PDPT.
2887 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2888 * that PGMFlushTLB will be called soon and only a change to CR3 then
2889 * will cause the shadow page tables to be updated.
2890 */
2891#ifdef IN_RING3
2892 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2893 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2894#else
2895 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2896 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2897#endif
2898
2899 /*
2900 * Update CPUM.
2901 * We do this prior to mapping the PDPEs to keep the order consistent
2902 * with what's used in HM. In practice, it doesn't really matter.
2903 */
2904 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2905
2906 /*
2907 * Map the PDPEs.
2908 */
2909 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2910 if (RT_SUCCESS(rc))
2911 {
2912#ifdef IN_RING3
2913 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
2914 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
2915#else
2916 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
2917 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
2918#endif
2919 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
2920 }
2921 }
2922 else
2923 rc = VERR_PGM_PAE_PDPE_RSVD;
2924 }
2925 return rc;
2926}
2927
2928
2929/**
2930 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2931 *
2932 * @returns VBox status code, with the following informational code for
2933 * VM scheduling.
2934 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2935 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2936 *
2937 * @param pVCpu The cross context virtual CPU structure.
2938 * @param cr0 The new cr0.
2939 * @param cr4 The new cr4.
2940 * @param efer The new extended feature enable register.
2941 * @param fForce Whether to force a mode change.
2942 */
2943VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
2944{
2945 VMCPU_ASSERT_EMT(pVCpu);
2946
2947 /*
2948 * Calc the new guest mode.
2949 *
2950 * Note! We check PG before PE and without requiring PE because of the
2951 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2952 */
2953 PGMMODE enmGuestMode;
2954 if (cr0 & X86_CR0_PG)
2955 {
2956 if (!(cr4 & X86_CR4_PAE))
2957 {
2958 bool const fPse = !!(cr4 & X86_CR4_PSE);
2959 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2960 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2961 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2962 enmGuestMode = PGMMODE_32_BIT;
2963 }
2964 else if (!(efer & MSR_K6_EFER_LME))
2965 {
2966 if (!(efer & MSR_K6_EFER_NXE))
2967 enmGuestMode = PGMMODE_PAE;
2968 else
2969 enmGuestMode = PGMMODE_PAE_NX;
2970 }
2971 else
2972 {
2973 if (!(efer & MSR_K6_EFER_NXE))
2974 enmGuestMode = PGMMODE_AMD64;
2975 else
2976 enmGuestMode = PGMMODE_AMD64_NX;
2977 }
2978 }
2979 else if (!(cr0 & X86_CR0_PE))
2980 enmGuestMode = PGMMODE_REAL;
2981 else
2982 enmGuestMode = PGMMODE_PROTECTED;
2983
2984 /*
2985 * Did it change?
2986 */
2987 if ( !fForce
2988 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2989 return VINF_SUCCESS;
2990
2991 /* Flush the TLB */
2992 PGM_INVL_VCPU_TLBS(pVCpu);
2993 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2994}
2995
2996
2997/**
2998 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2999 *
3000 * @returns PGM_TYPE_*.
3001 * @param pgmMode The mode value to convert.
3002 */
3003DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3004{
3005 switch (pgmMode)
3006 {
3007 case PGMMODE_REAL: return PGM_TYPE_REAL;
3008 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3009 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3010 case PGMMODE_PAE:
3011 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3012 case PGMMODE_AMD64:
3013 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3014 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3015 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3016 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3017 case PGMMODE_EPT: return PGM_TYPE_EPT;
3018 case PGMMODE_NONE: return PGM_TYPE_NONE;
3019 default:
3020 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3021 }
3022}
3023
3024
3025/**
3026 * Calculates the shadow paging mode.
3027 *
3028 * @returns The shadow paging mode.
3029 * @param pVM The cross context VM structure.
3030 * @param enmGuestMode The guest mode.
3031 * @param enmHostMode The host mode.
3032 * @param enmShadowMode The current shadow mode.
3033 */
3034static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3035{
3036 switch (enmGuestMode)
3037 {
3038 case PGMMODE_REAL:
3039 case PGMMODE_PROTECTED:
3040 switch (enmHostMode)
3041 {
3042 case SUPPAGINGMODE_32_BIT:
3043 case SUPPAGINGMODE_32_BIT_GLOBAL:
3044 enmShadowMode = PGMMODE_32_BIT;
3045 break;
3046
3047 case SUPPAGINGMODE_PAE:
3048 case SUPPAGINGMODE_PAE_NX:
3049 case SUPPAGINGMODE_PAE_GLOBAL:
3050 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3051 enmShadowMode = PGMMODE_PAE;
3052 break;
3053
3054 case SUPPAGINGMODE_AMD64:
3055 case SUPPAGINGMODE_AMD64_GLOBAL:
3056 case SUPPAGINGMODE_AMD64_NX:
3057 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3058 enmShadowMode = PGMMODE_PAE;
3059 break;
3060
3061 default:
3062 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3063 }
3064 break;
3065
3066 case PGMMODE_32_BIT:
3067 switch (enmHostMode)
3068 {
3069 case SUPPAGINGMODE_32_BIT:
3070 case SUPPAGINGMODE_32_BIT_GLOBAL:
3071 enmShadowMode = PGMMODE_32_BIT;
3072 break;
3073
3074 case SUPPAGINGMODE_PAE:
3075 case SUPPAGINGMODE_PAE_NX:
3076 case SUPPAGINGMODE_PAE_GLOBAL:
3077 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3078 enmShadowMode = PGMMODE_PAE;
3079 break;
3080
3081 case SUPPAGINGMODE_AMD64:
3082 case SUPPAGINGMODE_AMD64_GLOBAL:
3083 case SUPPAGINGMODE_AMD64_NX:
3084 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3085 enmShadowMode = PGMMODE_PAE;
3086 break;
3087
3088 default:
3089 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3090 }
3091 break;
3092
3093 case PGMMODE_PAE:
3094 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3095 switch (enmHostMode)
3096 {
3097 case SUPPAGINGMODE_32_BIT:
3098 case SUPPAGINGMODE_32_BIT_GLOBAL:
3099 enmShadowMode = PGMMODE_PAE;
3100 break;
3101
3102 case SUPPAGINGMODE_PAE:
3103 case SUPPAGINGMODE_PAE_NX:
3104 case SUPPAGINGMODE_PAE_GLOBAL:
3105 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3106 enmShadowMode = PGMMODE_PAE;
3107 break;
3108
3109 case SUPPAGINGMODE_AMD64:
3110 case SUPPAGINGMODE_AMD64_GLOBAL:
3111 case SUPPAGINGMODE_AMD64_NX:
3112 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3113 enmShadowMode = PGMMODE_PAE;
3114 break;
3115
3116 default:
3117 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3118 }
3119 break;
3120
3121 case PGMMODE_AMD64:
3122 case PGMMODE_AMD64_NX:
3123 switch (enmHostMode)
3124 {
3125 case SUPPAGINGMODE_32_BIT:
3126 case SUPPAGINGMODE_32_BIT_GLOBAL:
3127 enmShadowMode = PGMMODE_AMD64;
3128 break;
3129
3130 case SUPPAGINGMODE_PAE:
3131 case SUPPAGINGMODE_PAE_NX:
3132 case SUPPAGINGMODE_PAE_GLOBAL:
3133 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3134 enmShadowMode = PGMMODE_AMD64;
3135 break;
3136
3137 case SUPPAGINGMODE_AMD64:
3138 case SUPPAGINGMODE_AMD64_GLOBAL:
3139 case SUPPAGINGMODE_AMD64_NX:
3140 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3141 enmShadowMode = PGMMODE_AMD64;
3142 break;
3143
3144 default:
3145 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3146 }
3147 break;
3148
3149 default:
3150 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3151 }
3152
3153 /*
3154 * Override the shadow mode when NEM or nested paging is active.
3155 */
3156 if (VM_IS_NEM_ENABLED(pVM))
3157 {
3158 pVM->pgm.s.fNestedPaging = true;
3159 enmShadowMode = PGMMODE_NONE;
3160 }
3161 else
3162 {
3163 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3164 pVM->pgm.s.fNestedPaging = fNestedPaging;
3165 if (fNestedPaging)
3166 {
3167 if (HMIsVmxActive(pVM))
3168 enmShadowMode = PGMMODE_EPT;
3169 else
3170 {
3171 /* The nested SVM paging depends on the host one. */
3172 Assert(HMIsSvmActive(pVM));
3173 if ( enmGuestMode == PGMMODE_AMD64
3174 || enmGuestMode == PGMMODE_AMD64_NX)
3175 enmShadowMode = PGMMODE_NESTED_AMD64;
3176 else
3177 switch (pVM->pgm.s.enmHostMode)
3178 {
3179 case SUPPAGINGMODE_32_BIT:
3180 case SUPPAGINGMODE_32_BIT_GLOBAL:
3181 enmShadowMode = PGMMODE_NESTED_32BIT;
3182 break;
3183
3184 case SUPPAGINGMODE_PAE:
3185 case SUPPAGINGMODE_PAE_GLOBAL:
3186 case SUPPAGINGMODE_PAE_NX:
3187 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3188 enmShadowMode = PGMMODE_NESTED_PAE;
3189 break;
3190
3191 case SUPPAGINGMODE_AMD64:
3192 case SUPPAGINGMODE_AMD64_GLOBAL:
3193 case SUPPAGINGMODE_AMD64_NX:
3194 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3195 enmShadowMode = PGMMODE_NESTED_AMD64;
3196 break;
3197
3198 default:
3199 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3200 }
3201 }
3202 }
3203#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3204 else
3205 {
3206 /* Nested paging is a requirement for nested VT-x. */
3207 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3208 }
3209#endif
3210 }
3211
3212 return enmShadowMode;
3213}
3214
3215
3216/**
3217 * Performs the actual mode change.
3218 * This is called by PGMChangeMode and pgmR3InitPaging().
3219 *
3220 * @returns VBox status code. May suspend or power off the VM on error, but this
3221 * will trigger using FFs and not informational status codes.
3222 *
3223 * @param pVM The cross context VM structure.
3224 * @param pVCpu The cross context virtual CPU structure.
3225 * @param enmGuestMode The new guest mode. This is assumed to be different from
3226 * the current mode.
3227 */
3228VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3229{
3230 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3231 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3232
3233 /*
3234 * Calc the shadow mode and switcher.
3235 */
3236 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3237
3238 /*
3239 * Exit old mode(s).
3240 */
3241 /* shadow */
3242 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3243 {
3244 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3245 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3246 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3247 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3248 {
3249 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3250 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3251 }
3252 }
3253 else
3254 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3255
3256 /* guest */
3257 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3258 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3259 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3260 {
3261 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3262 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3263 }
3264 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3265 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3266 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3267 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3268
3269 /*
3270 * Change the paging mode data indexes.
3271 */
3272 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3273 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3274 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3275 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3276 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3277 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3278 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3279#ifdef IN_RING3
3280 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3281#endif
3282
3283 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3284 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3285 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3286 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3287 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3288 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3289 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3290#ifdef IN_RING3
3291 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3292#endif
3293
3294 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3295 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3296 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3297 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3298 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3299 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3300 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3301 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3302 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3303 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3304#ifdef VBOX_STRICT
3305 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3306#endif
3307
3308 /*
3309 * Enter new shadow mode (if changed).
3310 */
3311 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3312 {
3313 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3314 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3315 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3316 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3317 }
3318
3319 /*
3320 * Always flag the necessary updates
3321 */
3322 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3323
3324 /*
3325 * Enter the new guest and shadow+guest modes.
3326 */
3327 /* Calc the new CR3 value. */
3328 RTGCPHYS GCPhysCR3;
3329 switch (enmGuestMode)
3330 {
3331 case PGMMODE_REAL:
3332 case PGMMODE_PROTECTED:
3333 GCPhysCR3 = NIL_RTGCPHYS;
3334 break;
3335
3336 case PGMMODE_32_BIT:
3337 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3338 break;
3339
3340 case PGMMODE_PAE_NX:
3341 case PGMMODE_PAE:
3342 if (!pVM->cpum.ro.GuestFeatures.fPae)
3343#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3344 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3345 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3346#else
3347 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3348
3349#endif
3350 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3351 break;
3352
3353#ifdef VBOX_WITH_64_BITS_GUESTS
3354 case PGMMODE_AMD64_NX:
3355 case PGMMODE_AMD64:
3356 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3357 break;
3358#endif
3359 default:
3360 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3361 }
3362
3363#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3364 /*
3365 * If a nested-guest is using EPT paging:
3366 * - Update the second-level address translation (SLAT) mode.
3367 * - Indicate that the CR3 is nested-guest physical address.
3368 */
3369 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
3370 {
3371 if (PGMMODE_WITH_PAGING(enmGuestMode))
3372 {
3373 /*
3374 * Translate CR3 to its guest-physical address.
3375 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3376 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3377 */
3378 PGMPTWALK Walk;
3379 PGMPTWALKGST GstWalk;
3380 int const rc = pgmGstSlatWalkPhys(pVCpu, PGMSLAT_EPT, GCPhysCR3, &Walk, &GstWalk);
3381 if (RT_SUCCESS(rc))
3382 { /* likely */ }
3383 else
3384 {
3385 /*
3386 * SLAT failed but we avoid reporting this to the caller because the caller
3387 * is not supposed to fail. The only time the caller needs to indicate a
3388 * failure to software is when PAE paging is used by the nested-guest, but
3389 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3390 * In all other cases, the failure will be indicated when CR3 tries to be
3391 * translated on the next linear-address memory access.
3392 * See Intel spec. 27.2.1 "EPT Overview".
3393 */
3394 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3395
3396 /* Trying to coax PGM to succeed for the time being... */
3397 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3398 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3399 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3400 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3401 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3402 return VINF_SUCCESS;
3403 }
3404 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3405 GCPhysCR3 = Walk.GCPhys;
3406 }
3407 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3408 }
3409 else
3410 {
3411 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3412 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3413 }
3414#endif
3415
3416 /*
3417 * Enter the new guest mode.
3418 */
3419 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3420 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3421 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3422
3423 /* Set the new guest CR3 (and nested-guest CR3). */
3424 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3425
3426 /* status codes. */
3427 AssertRC(rc);
3428 AssertRC(rc2);
3429 if (RT_SUCCESS(rc))
3430 {
3431 rc = rc2;
3432 if (RT_SUCCESS(rc)) /* no informational status codes. */
3433 rc = VINF_SUCCESS;
3434 }
3435
3436 /*
3437 * Notify HM.
3438 */
3439 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3440 return rc;
3441}
3442
3443
3444/**
3445 * Called by CPUM or REM when CR0.WP changes to 1.
3446 *
3447 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3448 * @thread EMT
3449 */
3450VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3451{
3452 /*
3453 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3454 *
3455 * Use the counter to judge whether there might be pool pages with active
3456 * hacks in them. If there are, we will be running the risk of messing up
3457 * the guest by allowing it to write to read-only pages. Thus, we have to
3458 * clear the page pool ASAP if there is the slightest chance.
3459 */
3460 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3461 {
3462 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3463
3464 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3465 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3466 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3467 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3468 }
3469}
3470
3471
3472/**
3473 * Gets the current guest paging mode.
3474 *
3475 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3476 *
3477 * @returns The current paging mode.
3478 * @param pVCpu The cross context virtual CPU structure.
3479 */
3480VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3481{
3482 return pVCpu->pgm.s.enmGuestMode;
3483}
3484
3485
3486/**
3487 * Gets the current shadow paging mode.
3488 *
3489 * @returns The current paging mode.
3490 * @param pVCpu The cross context virtual CPU structure.
3491 */
3492VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3493{
3494 return pVCpu->pgm.s.enmShadowMode;
3495}
3496
3497
3498/**
3499 * Gets the current host paging mode.
3500 *
3501 * @returns The current paging mode.
3502 * @param pVM The cross context VM structure.
3503 */
3504VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3505{
3506 switch (pVM->pgm.s.enmHostMode)
3507 {
3508 case SUPPAGINGMODE_32_BIT:
3509 case SUPPAGINGMODE_32_BIT_GLOBAL:
3510 return PGMMODE_32_BIT;
3511
3512 case SUPPAGINGMODE_PAE:
3513 case SUPPAGINGMODE_PAE_GLOBAL:
3514 return PGMMODE_PAE;
3515
3516 case SUPPAGINGMODE_PAE_NX:
3517 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3518 return PGMMODE_PAE_NX;
3519
3520 case SUPPAGINGMODE_AMD64:
3521 case SUPPAGINGMODE_AMD64_GLOBAL:
3522 return PGMMODE_AMD64;
3523
3524 case SUPPAGINGMODE_AMD64_NX:
3525 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3526 return PGMMODE_AMD64_NX;
3527
3528 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3529 }
3530
3531 return PGMMODE_INVALID;
3532}
3533
3534
3535/**
3536 * Get mode name.
3537 *
3538 * @returns read-only name string.
3539 * @param enmMode The mode which name is desired.
3540 */
3541VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3542{
3543 switch (enmMode)
3544 {
3545 case PGMMODE_REAL: return "Real";
3546 case PGMMODE_PROTECTED: return "Protected";
3547 case PGMMODE_32_BIT: return "32-bit";
3548 case PGMMODE_PAE: return "PAE";
3549 case PGMMODE_PAE_NX: return "PAE+NX";
3550 case PGMMODE_AMD64: return "AMD64";
3551 case PGMMODE_AMD64_NX: return "AMD64+NX";
3552 case PGMMODE_NESTED_32BIT: return "Nested-32";
3553 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3554 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3555 case PGMMODE_EPT: return "EPT";
3556 case PGMMODE_NONE: return "None";
3557 default: return "unknown mode value";
3558 }
3559}
3560
3561
3562#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3563/**
3564 * Gets the SLAT mode name.
3565 *
3566 * @returns The read-only SLAT mode descriptive string.
3567 * @param enmSlatMode The SLAT mode value.
3568 */
3569VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3570{
3571 switch (enmSlatMode)
3572 {
3573 case PGMSLAT_DIRECT: return "Direct";
3574 case PGMSLAT_EPT: return "EPT";
3575 case PGMSLAT_32BIT: return "32-bit";
3576 case PGMSLAT_PAE: return "PAE";
3577 case PGMSLAT_AMD64: return "AMD64";
3578 default: return "Unknown";
3579 }
3580}
3581#endif
3582
3583
3584/**
3585 * Gets the physical address represented in the guest CR3 as PGM sees it.
3586 *
3587 * This is mainly for logging and debugging.
3588 *
3589 * @returns PGM's guest CR3 value.
3590 * @param pVCpu The cross context virtual CPU structure.
3591 */
3592VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3593{
3594 return pVCpu->pgm.s.GCPhysCR3;
3595}
3596
3597
3598
3599/**
3600 * Notification from CPUM that the EFER.NXE bit has changed.
3601 *
3602 * @param pVCpu The cross context virtual CPU structure of the CPU for
3603 * which EFER changed.
3604 * @param fNxe The new NXE state.
3605 */
3606VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3607{
3608/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3609 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3610
3611 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3612 if (fNxe)
3613 {
3614 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3615 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3616 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3617 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3618 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3619 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3620 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3621 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3622 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3623 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3624 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3625
3626 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3627 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3628 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3629 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3630 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3631 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3632 }
3633 else
3634 {
3635 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3636 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3637 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3638 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3639 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3640 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3641 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3642 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3643 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3644 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3645 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3646
3647 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3648 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3649 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3650 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3651 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3652 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3653 }
3654}
3655
3656
3657/**
3658 * Check if any pgm pool pages are marked dirty (not monitored)
3659 *
3660 * @returns bool locked/not locked
3661 * @param pVM The cross context VM structure.
3662 */
3663VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3664{
3665 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3666}
3667
3668
3669/**
3670 * Check if this VCPU currently owns the PGM lock.
3671 *
3672 * @returns bool owner/not owner
3673 * @param pVM The cross context VM structure.
3674 */
3675VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3676{
3677 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3678}
3679
3680
3681/**
3682 * Enable or disable large page usage
3683 *
3684 * @returns VBox status code.
3685 * @param pVM The cross context VM structure.
3686 * @param fUseLargePages Use/not use large pages
3687 */
3688VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3689{
3690 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3691
3692 pVM->pgm.s.fUseLargePages = fUseLargePages;
3693 return VINF_SUCCESS;
3694}
3695
3696
3697/**
3698 * Acquire the PGM lock.
3699 *
3700 * @returns VBox status code
3701 * @param pVM The cross context VM structure.
3702 * @param fVoid Set if the caller cannot handle failure returns.
3703 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3704 */
3705#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3706int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3707#else
3708int pgmLock(PVMCC pVM, bool fVoid)
3709#endif
3710{
3711#if defined(VBOX_STRICT)
3712 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3713#else
3714 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3715#endif
3716 if (RT_SUCCESS(rc))
3717 return rc;
3718 if (fVoid)
3719 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3720 else
3721 AssertRC(rc);
3722 return rc;
3723}
3724
3725
3726/**
3727 * Release the PGM lock.
3728 *
3729 * @returns VBox status code
3730 * @param pVM The cross context VM structure.
3731 */
3732void pgmUnlock(PVMCC pVM)
3733{
3734 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3735 pVM->pgm.s.cDeprecatedPageLocks = 0;
3736 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3737 if (rc == VINF_SEM_NESTED)
3738 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3739}
3740
3741
3742#if !defined(IN_R0) || defined(LOG_ENABLED)
3743
3744/** Format handler for PGMPAGE.
3745 * @copydoc FNRTSTRFORMATTYPE */
3746static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3747 const char *pszType, void const *pvValue,
3748 int cchWidth, int cchPrecision, unsigned fFlags,
3749 void *pvUser)
3750{
3751 size_t cch;
3752 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3753 if (RT_VALID_PTR(pPage))
3754 {
3755 char szTmp[64+80];
3756
3757 cch = 0;
3758
3759 /* The single char state stuff. */
3760 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3761 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3762
3763# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3764 if (IS_PART_INCLUDED(5))
3765 {
3766 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3767 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3768 }
3769
3770 /* The type. */
3771 if (IS_PART_INCLUDED(4))
3772 {
3773 szTmp[cch++] = ':';
3774 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3775 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3776 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3777 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3778 }
3779
3780 /* The numbers. */
3781 if (IS_PART_INCLUDED(3))
3782 {
3783 szTmp[cch++] = ':';
3784 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3785 }
3786
3787 if (IS_PART_INCLUDED(2))
3788 {
3789 szTmp[cch++] = ':';
3790 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3791 }
3792
3793 if (IS_PART_INCLUDED(6))
3794 {
3795 szTmp[cch++] = ':';
3796 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3797 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3798 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3799 }
3800# undef IS_PART_INCLUDED
3801
3802 cch = pfnOutput(pvArgOutput, szTmp, cch);
3803 }
3804 else
3805 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3806 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3807 return cch;
3808}
3809
3810
3811/** Format handler for PGMRAMRANGE.
3812 * @copydoc FNRTSTRFORMATTYPE */
3813static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3814 const char *pszType, void const *pvValue,
3815 int cchWidth, int cchPrecision, unsigned fFlags,
3816 void *pvUser)
3817{
3818 size_t cch;
3819 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3820 if (RT_VALID_PTR(pRam))
3821 {
3822 char szTmp[80];
3823 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3824 cch = pfnOutput(pvArgOutput, szTmp, cch);
3825 }
3826 else
3827 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3828 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3829 return cch;
3830}
3831
3832/** Format type andlers to be registered/deregistered. */
3833static const struct
3834{
3835 char szType[24];
3836 PFNRTSTRFORMATTYPE pfnHandler;
3837} g_aPgmFormatTypes[] =
3838{
3839 { "pgmpage", pgmFormatTypeHandlerPage },
3840 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3841};
3842
3843#endif /* !IN_R0 || LOG_ENABLED */
3844
3845/**
3846 * Registers the global string format types.
3847 *
3848 * This should be called at module load time or in some other manner that ensure
3849 * that it's called exactly one time.
3850 *
3851 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3852 */
3853VMMDECL(int) PGMRegisterStringFormatTypes(void)
3854{
3855#if !defined(IN_R0) || defined(LOG_ENABLED)
3856 int rc = VINF_SUCCESS;
3857 unsigned i;
3858 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3859 {
3860 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3861# ifdef IN_RING0
3862 if (rc == VERR_ALREADY_EXISTS)
3863 {
3864 /* in case of cleanup failure in ring-0 */
3865 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3866 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3867 }
3868# endif
3869 }
3870 if (RT_FAILURE(rc))
3871 while (i-- > 0)
3872 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3873
3874 return rc;
3875#else
3876 return VINF_SUCCESS;
3877#endif
3878}
3879
3880
3881/**
3882 * Deregisters the global string format types.
3883 *
3884 * This should be called at module unload time or in some other manner that
3885 * ensure that it's called exactly one time.
3886 */
3887VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3888{
3889#if !defined(IN_R0) || defined(LOG_ENABLED)
3890 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3891 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3892#endif
3893}
3894
3895
3896#ifdef VBOX_STRICT
3897/**
3898 * Asserts that everything related to the guest CR3 is correctly shadowed.
3899 *
3900 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3901 * and assert the correctness of the guest CR3 mapping before asserting that the
3902 * shadow page tables is in sync with the guest page tables.
3903 *
3904 * @returns Number of conflicts.
3905 * @param pVM The cross context VM structure.
3906 * @param pVCpu The cross context virtual CPU structure.
3907 * @param cr3 The current guest CR3 register value.
3908 * @param cr4 The current guest CR4 register value.
3909 */
3910VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3911{
3912 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3913
3914 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3915 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3916 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3917
3918 PGM_LOCK_VOID(pVM);
3919 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3920 PGM_UNLOCK(pVM);
3921
3922 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3923 return cErrors;
3924}
3925#endif /* VBOX_STRICT */
3926
3927
3928/**
3929 * Updates PGM's copy of the guest's EPT pointer.
3930 *
3931 * @param pVCpu The cross context virtual CPU structure.
3932 * @param uEptPtr The EPT pointer.
3933 *
3934 * @remarks This can be called as part of VM-entry so we might be in the midst of
3935 * switching to VMX non-root mode.
3936 */
3937VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3938{
3939 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3940 PGM_LOCK_VOID(pVM);
3941 pVCpu->pgm.s.uEptPtr = uEptPtr;
3942 PGM_UNLOCK(pVM);
3943}
3944
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette