VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 92339

Last change on this file since 92339 was 92286, checked in by vboxsync, 3 years ago

VMM/PGMAll.cpp: @todos bugref:10142

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 138.6 KB
Line 
1/* $Id: PGMAll.cpp 92286 2021-11-09 11:48:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
53static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALKGST pWalk);
54#endif
55static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
56static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
57
58
59#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
60/* Guest - EPT SLAT is identical for all guest paging mode. */
61# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
62# define PGM_GST_TYPE PGM_TYPE_EPT
63# include "PGMGstDefs.h"
64# include "PGMAllGstSlatEpt.cpp.h"
65# undef PGM_GST_TYPE
66#endif
67
68
69/*
70 * Shadow - 32-bit mode
71 */
72#define PGM_SHW_TYPE PGM_TYPE_32BIT
73#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
74#include "PGMAllShw.h"
75
76/* Guest - real mode */
77#define PGM_GST_TYPE PGM_TYPE_REAL
78#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
79#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
80#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
81#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
82#include "PGMGstDefs.h"
83#include "PGMAllGst.h"
84#include "PGMAllBth.h"
85#undef BTH_PGMPOOLKIND_PT_FOR_PT
86#undef BTH_PGMPOOLKIND_ROOT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
97#include "PGMGstDefs.h"
98#include "PGMAllGst.h"
99#include "PGMAllBth.h"
100#undef BTH_PGMPOOLKIND_PT_FOR_PT
101#undef BTH_PGMPOOLKIND_ROOT
102#undef PGM_BTH_NAME
103#undef PGM_GST_TYPE
104#undef PGM_GST_NAME
105
106/* Guest - 32-bit mode */
107#define PGM_GST_TYPE PGM_TYPE_32BIT
108#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
109#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
110#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
111#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
112#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
113#include "PGMGstDefs.h"
114#include "PGMAllGst.h"
115#include "PGMAllBth.h"
116#undef BTH_PGMPOOLKIND_PT_FOR_BIG
117#undef BTH_PGMPOOLKIND_PT_FOR_PT
118#undef BTH_PGMPOOLKIND_ROOT
119#undef PGM_BTH_NAME
120#undef PGM_GST_TYPE
121#undef PGM_GST_NAME
122
123#undef PGM_SHW_TYPE
124#undef PGM_SHW_NAME
125
126
127/*
128 * Shadow - PAE mode
129 */
130#define PGM_SHW_TYPE PGM_TYPE_PAE
131#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
132#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
133#include "PGMAllShw.h"
134
135/* Guest - real mode */
136#define PGM_GST_TYPE PGM_TYPE_REAL
137#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
140#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
141#include "PGMGstDefs.h"
142#include "PGMAllBth.h"
143#undef BTH_PGMPOOLKIND_PT_FOR_PT
144#undef BTH_PGMPOOLKIND_ROOT
145#undef PGM_BTH_NAME
146#undef PGM_GST_TYPE
147#undef PGM_GST_NAME
148
149/* Guest - protected mode */
150#define PGM_GST_TYPE PGM_TYPE_PROT
151#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
152#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
153#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
154#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
155#include "PGMGstDefs.h"
156#include "PGMAllBth.h"
157#undef BTH_PGMPOOLKIND_PT_FOR_PT
158#undef BTH_PGMPOOLKIND_ROOT
159#undef PGM_BTH_NAME
160#undef PGM_GST_TYPE
161#undef PGM_GST_NAME
162
163/* Guest - 32-bit mode */
164#define PGM_GST_TYPE PGM_TYPE_32BIT
165#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
166#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
167#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
168#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
169#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
170#include "PGMGstDefs.h"
171#include "PGMAllBth.h"
172#undef BTH_PGMPOOLKIND_PT_FOR_BIG
173#undef BTH_PGMPOOLKIND_PT_FOR_PT
174#undef BTH_PGMPOOLKIND_ROOT
175#undef PGM_BTH_NAME
176#undef PGM_GST_TYPE
177#undef PGM_GST_NAME
178
179
180/* Guest - PAE mode */
181#define PGM_GST_TYPE PGM_TYPE_PAE
182#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
183#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
184#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
185#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
186#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
187#include "PGMGstDefs.h"
188#include "PGMAllGst.h"
189#include "PGMAllBth.h"
190#undef BTH_PGMPOOLKIND_PT_FOR_BIG
191#undef BTH_PGMPOOLKIND_PT_FOR_PT
192#undef BTH_PGMPOOLKIND_ROOT
193#undef PGM_BTH_NAME
194#undef PGM_GST_TYPE
195#undef PGM_GST_NAME
196
197#undef PGM_SHW_TYPE
198#undef PGM_SHW_NAME
199
200
201/*
202 * Shadow - AMD64 mode
203 */
204#define PGM_SHW_TYPE PGM_TYPE_AMD64
205#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
206#include "PGMAllShw.h"
207
208/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
209/** @todo retire this hack. */
210#define PGM_GST_TYPE PGM_TYPE_PROT
211#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
212#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
213#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
214#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
215#include "PGMGstDefs.h"
216#include "PGMAllBth.h"
217#undef BTH_PGMPOOLKIND_PT_FOR_PT
218#undef BTH_PGMPOOLKIND_ROOT
219#undef PGM_BTH_NAME
220#undef PGM_GST_TYPE
221#undef PGM_GST_NAME
222
223#ifdef VBOX_WITH_64_BITS_GUESTS
224/* Guest - AMD64 mode */
225# define PGM_GST_TYPE PGM_TYPE_AMD64
226# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
227# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
228# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
229# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
230# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
231# include "PGMGstDefs.h"
232# include "PGMAllGst.h"
233# include "PGMAllBth.h"
234# undef BTH_PGMPOOLKIND_PT_FOR_BIG
235# undef BTH_PGMPOOLKIND_PT_FOR_PT
236# undef BTH_PGMPOOLKIND_ROOT
237# undef PGM_BTH_NAME
238# undef PGM_GST_TYPE
239# undef PGM_GST_NAME
240#endif /* VBOX_WITH_64_BITS_GUESTS */
241
242#undef PGM_SHW_TYPE
243#undef PGM_SHW_NAME
244
245
246/*
247 * Shadow - 32-bit nested paging mode.
248 */
249#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
250#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
251#include "PGMAllShw.h"
252
253/* Guest - real mode */
254#define PGM_GST_TYPE PGM_TYPE_REAL
255#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
256#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
257#include "PGMGstDefs.h"
258#include "PGMAllBth.h"
259#undef PGM_BTH_NAME
260#undef PGM_GST_TYPE
261#undef PGM_GST_NAME
262
263/* Guest - protected mode */
264#define PGM_GST_TYPE PGM_TYPE_PROT
265#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
266#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
267#include "PGMGstDefs.h"
268#include "PGMAllBth.h"
269#undef PGM_BTH_NAME
270#undef PGM_GST_TYPE
271#undef PGM_GST_NAME
272
273/* Guest - 32-bit mode */
274#define PGM_GST_TYPE PGM_TYPE_32BIT
275#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
277#include "PGMGstDefs.h"
278#include "PGMAllBth.h"
279#undef PGM_BTH_NAME
280#undef PGM_GST_TYPE
281#undef PGM_GST_NAME
282
283/* Guest - PAE mode */
284#define PGM_GST_TYPE PGM_TYPE_PAE
285#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
286#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
287#include "PGMGstDefs.h"
288#include "PGMAllBth.h"
289#undef PGM_BTH_NAME
290#undef PGM_GST_TYPE
291#undef PGM_GST_NAME
292
293#ifdef VBOX_WITH_64_BITS_GUESTS
294/* Guest - AMD64 mode */
295# define PGM_GST_TYPE PGM_TYPE_AMD64
296# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
297# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
298# include "PGMGstDefs.h"
299# include "PGMAllBth.h"
300# undef PGM_BTH_NAME
301# undef PGM_GST_TYPE
302# undef PGM_GST_NAME
303#endif /* VBOX_WITH_64_BITS_GUESTS */
304
305#undef PGM_SHW_TYPE
306#undef PGM_SHW_NAME
307
308
309/*
310 * Shadow - PAE nested paging mode.
311 */
312#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
313#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
314#include "PGMAllShw.h"
315
316/* Guest - real mode */
317#define PGM_GST_TYPE PGM_TYPE_REAL
318#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
319#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
320#include "PGMGstDefs.h"
321#include "PGMAllBth.h"
322#undef PGM_BTH_NAME
323#undef PGM_GST_TYPE
324#undef PGM_GST_NAME
325
326/* Guest - protected mode */
327#define PGM_GST_TYPE PGM_TYPE_PROT
328#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
329#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
330#include "PGMGstDefs.h"
331#include "PGMAllBth.h"
332#undef PGM_BTH_NAME
333#undef PGM_GST_TYPE
334#undef PGM_GST_NAME
335
336/* Guest - 32-bit mode */
337#define PGM_GST_TYPE PGM_TYPE_32BIT
338#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
339#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
340#include "PGMGstDefs.h"
341#include "PGMAllBth.h"
342#undef PGM_BTH_NAME
343#undef PGM_GST_TYPE
344#undef PGM_GST_NAME
345
346/* Guest - PAE mode */
347#define PGM_GST_TYPE PGM_TYPE_PAE
348#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
349#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
350#include "PGMGstDefs.h"
351#include "PGMAllBth.h"
352#undef PGM_BTH_NAME
353#undef PGM_GST_TYPE
354#undef PGM_GST_NAME
355
356#ifdef VBOX_WITH_64_BITS_GUESTS
357/* Guest - AMD64 mode */
358# define PGM_GST_TYPE PGM_TYPE_AMD64
359# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
360# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
361# include "PGMGstDefs.h"
362# include "PGMAllBth.h"
363# undef PGM_BTH_NAME
364# undef PGM_GST_TYPE
365# undef PGM_GST_NAME
366#endif /* VBOX_WITH_64_BITS_GUESTS */
367
368#undef PGM_SHW_TYPE
369#undef PGM_SHW_NAME
370
371
372/*
373 * Shadow - AMD64 nested paging mode.
374 */
375#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
376#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
377#include "PGMAllShw.h"
378
379/* Guest - real mode */
380#define PGM_GST_TYPE PGM_TYPE_REAL
381#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
382#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
383#include "PGMGstDefs.h"
384#include "PGMAllBth.h"
385#undef PGM_BTH_NAME
386#undef PGM_GST_TYPE
387#undef PGM_GST_NAME
388
389/* Guest - protected mode */
390#define PGM_GST_TYPE PGM_TYPE_PROT
391#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
392#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
393#include "PGMGstDefs.h"
394#include "PGMAllBth.h"
395#undef PGM_BTH_NAME
396#undef PGM_GST_TYPE
397#undef PGM_GST_NAME
398
399/* Guest - 32-bit mode */
400#define PGM_GST_TYPE PGM_TYPE_32BIT
401#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
402#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
403#include "PGMGstDefs.h"
404#include "PGMAllBth.h"
405#undef PGM_BTH_NAME
406#undef PGM_GST_TYPE
407#undef PGM_GST_NAME
408
409/* Guest - PAE mode */
410#define PGM_GST_TYPE PGM_TYPE_PAE
411#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
412#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
413#include "PGMGstDefs.h"
414#include "PGMAllBth.h"
415#undef PGM_BTH_NAME
416#undef PGM_GST_TYPE
417#undef PGM_GST_NAME
418
419#ifdef VBOX_WITH_64_BITS_GUESTS
420/* Guest - AMD64 mode */
421# define PGM_GST_TYPE PGM_TYPE_AMD64
422# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
423# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
424# include "PGMGstDefs.h"
425# include "PGMAllBth.h"
426# undef PGM_BTH_NAME
427# undef PGM_GST_TYPE
428# undef PGM_GST_NAME
429#endif /* VBOX_WITH_64_BITS_GUESTS */
430
431#undef PGM_SHW_TYPE
432#undef PGM_SHW_NAME
433
434
435/*
436 * Shadow - EPT.
437 */
438#define PGM_SHW_TYPE PGM_TYPE_EPT
439#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
440#include "PGMAllShw.h"
441
442/* Guest - real mode */
443#define PGM_GST_TYPE PGM_TYPE_REAL
444#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
445#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
446#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
447#include "PGMGstDefs.h"
448#include "PGMAllBth.h"
449#undef BTH_PGMPOOLKIND_PT_FOR_PT
450#undef PGM_BTH_NAME
451#undef PGM_GST_TYPE
452#undef PGM_GST_NAME
453
454/* Guest - protected mode */
455#define PGM_GST_TYPE PGM_TYPE_PROT
456#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
457#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
458#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
459#include "PGMGstDefs.h"
460#include "PGMAllBth.h"
461#undef BTH_PGMPOOLKIND_PT_FOR_PT
462#undef PGM_BTH_NAME
463#undef PGM_GST_TYPE
464#undef PGM_GST_NAME
465
466/* Guest - 32-bit mode */
467#define PGM_GST_TYPE PGM_TYPE_32BIT
468#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
469#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
470#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
471#include "PGMGstDefs.h"
472#include "PGMAllBth.h"
473#undef BTH_PGMPOOLKIND_PT_FOR_PT
474#undef PGM_BTH_NAME
475#undef PGM_GST_TYPE
476#undef PGM_GST_NAME
477
478/* Guest - PAE mode */
479#define PGM_GST_TYPE PGM_TYPE_PAE
480#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
481#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
482#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
483#include "PGMGstDefs.h"
484#include "PGMAllBth.h"
485#undef BTH_PGMPOOLKIND_PT_FOR_PT
486#undef PGM_BTH_NAME
487#undef PGM_GST_TYPE
488#undef PGM_GST_NAME
489
490#ifdef VBOX_WITH_64_BITS_GUESTS
491/* Guest - AMD64 mode */
492# define PGM_GST_TYPE PGM_TYPE_AMD64
493# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
494# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
495# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
496# include "PGMGstDefs.h"
497# include "PGMAllBth.h"
498# undef BTH_PGMPOOLKIND_PT_FOR_PT
499# undef PGM_BTH_NAME
500# undef PGM_GST_TYPE
501# undef PGM_GST_NAME
502#endif /* VBOX_WITH_64_BITS_GUESTS */
503
504#undef PGM_SHW_TYPE
505#undef PGM_SHW_NAME
506
507
508/*
509 * Shadow - NEM / None.
510 */
511#define PGM_SHW_TYPE PGM_TYPE_NONE
512#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
513#include "PGMAllShw.h"
514
515/* Guest - real mode */
516#define PGM_GST_TYPE PGM_TYPE_REAL
517#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
518#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
519#include "PGMGstDefs.h"
520#include "PGMAllBth.h"
521#undef PGM_BTH_NAME
522#undef PGM_GST_TYPE
523#undef PGM_GST_NAME
524
525/* Guest - protected mode */
526#define PGM_GST_TYPE PGM_TYPE_PROT
527#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
528#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
529#include "PGMGstDefs.h"
530#include "PGMAllBth.h"
531#undef PGM_BTH_NAME
532#undef PGM_GST_TYPE
533#undef PGM_GST_NAME
534
535/* Guest - 32-bit mode */
536#define PGM_GST_TYPE PGM_TYPE_32BIT
537#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
538#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
539#include "PGMGstDefs.h"
540#include "PGMAllBth.h"
541#undef PGM_BTH_NAME
542#undef PGM_GST_TYPE
543#undef PGM_GST_NAME
544
545/* Guest - PAE mode */
546#define PGM_GST_TYPE PGM_TYPE_PAE
547#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
548#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
549#include "PGMGstDefs.h"
550#include "PGMAllBth.h"
551#undef PGM_BTH_NAME
552#undef PGM_GST_TYPE
553#undef PGM_GST_NAME
554
555#ifdef VBOX_WITH_64_BITS_GUESTS
556/* Guest - AMD64 mode */
557# define PGM_GST_TYPE PGM_TYPE_AMD64
558# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
559# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
560# include "PGMGstDefs.h"
561# include "PGMAllBth.h"
562# undef PGM_BTH_NAME
563# undef PGM_GST_TYPE
564# undef PGM_GST_NAME
565#endif /* VBOX_WITH_64_BITS_GUESTS */
566
567#undef PGM_SHW_TYPE
568#undef PGM_SHW_NAME
569
570
571
572/**
573 * Guest mode data array.
574 */
575PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
576{
577 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
578 {
579 PGM_TYPE_REAL,
580 PGM_GST_NAME_REAL(GetPage),
581 PGM_GST_NAME_REAL(ModifyPage),
582 PGM_GST_NAME_REAL(Enter),
583 PGM_GST_NAME_REAL(Exit),
584#ifdef IN_RING3
585 PGM_GST_NAME_REAL(Relocate),
586#endif
587 },
588 {
589 PGM_TYPE_PROT,
590 PGM_GST_NAME_PROT(GetPage),
591 PGM_GST_NAME_PROT(ModifyPage),
592 PGM_GST_NAME_PROT(Enter),
593 PGM_GST_NAME_PROT(Exit),
594#ifdef IN_RING3
595 PGM_GST_NAME_PROT(Relocate),
596#endif
597 },
598 {
599 PGM_TYPE_32BIT,
600 PGM_GST_NAME_32BIT(GetPage),
601 PGM_GST_NAME_32BIT(ModifyPage),
602 PGM_GST_NAME_32BIT(Enter),
603 PGM_GST_NAME_32BIT(Exit),
604#ifdef IN_RING3
605 PGM_GST_NAME_32BIT(Relocate),
606#endif
607 },
608 {
609 PGM_TYPE_PAE,
610 PGM_GST_NAME_PAE(GetPage),
611 PGM_GST_NAME_PAE(ModifyPage),
612 PGM_GST_NAME_PAE(Enter),
613 PGM_GST_NAME_PAE(Exit),
614#ifdef IN_RING3
615 PGM_GST_NAME_PAE(Relocate),
616#endif
617 },
618#ifdef VBOX_WITH_64_BITS_GUESTS
619 {
620 PGM_TYPE_AMD64,
621 PGM_GST_NAME_AMD64(GetPage),
622 PGM_GST_NAME_AMD64(ModifyPage),
623 PGM_GST_NAME_AMD64(Enter),
624 PGM_GST_NAME_AMD64(Exit),
625# ifdef IN_RING3
626 PGM_GST_NAME_AMD64(Relocate),
627# endif
628 },
629#endif
630};
631
632
633/**
634 * The shadow mode data array.
635 */
636PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
637{
638 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
639 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
640 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
641 {
642 PGM_TYPE_32BIT,
643 PGM_SHW_NAME_32BIT(GetPage),
644 PGM_SHW_NAME_32BIT(ModifyPage),
645 PGM_SHW_NAME_32BIT(Enter),
646 PGM_SHW_NAME_32BIT(Exit),
647#ifdef IN_RING3
648 PGM_SHW_NAME_32BIT(Relocate),
649#endif
650 },
651 {
652 PGM_TYPE_PAE,
653 PGM_SHW_NAME_PAE(GetPage),
654 PGM_SHW_NAME_PAE(ModifyPage),
655 PGM_SHW_NAME_PAE(Enter),
656 PGM_SHW_NAME_PAE(Exit),
657#ifdef IN_RING3
658 PGM_SHW_NAME_PAE(Relocate),
659#endif
660 },
661 {
662 PGM_TYPE_AMD64,
663 PGM_SHW_NAME_AMD64(GetPage),
664 PGM_SHW_NAME_AMD64(ModifyPage),
665 PGM_SHW_NAME_AMD64(Enter),
666 PGM_SHW_NAME_AMD64(Exit),
667#ifdef IN_RING3
668 PGM_SHW_NAME_AMD64(Relocate),
669#endif
670 },
671 {
672 PGM_TYPE_NESTED_32BIT,
673 PGM_SHW_NAME_NESTED_32BIT(GetPage),
674 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
675 PGM_SHW_NAME_NESTED_32BIT(Enter),
676 PGM_SHW_NAME_NESTED_32BIT(Exit),
677#ifdef IN_RING3
678 PGM_SHW_NAME_NESTED_32BIT(Relocate),
679#endif
680 },
681 {
682 PGM_TYPE_NESTED_PAE,
683 PGM_SHW_NAME_NESTED_PAE(GetPage),
684 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
685 PGM_SHW_NAME_NESTED_PAE(Enter),
686 PGM_SHW_NAME_NESTED_PAE(Exit),
687#ifdef IN_RING3
688 PGM_SHW_NAME_NESTED_PAE(Relocate),
689#endif
690 },
691 {
692 PGM_TYPE_NESTED_AMD64,
693 PGM_SHW_NAME_NESTED_AMD64(GetPage),
694 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
695 PGM_SHW_NAME_NESTED_AMD64(Enter),
696 PGM_SHW_NAME_NESTED_AMD64(Exit),
697#ifdef IN_RING3
698 PGM_SHW_NAME_NESTED_AMD64(Relocate),
699#endif
700 },
701 {
702 PGM_TYPE_EPT,
703 PGM_SHW_NAME_EPT(GetPage),
704 PGM_SHW_NAME_EPT(ModifyPage),
705 PGM_SHW_NAME_EPT(Enter),
706 PGM_SHW_NAME_EPT(Exit),
707#ifdef IN_RING3
708 PGM_SHW_NAME_EPT(Relocate),
709#endif
710 },
711 {
712 PGM_TYPE_NONE,
713 PGM_SHW_NAME_NONE(GetPage),
714 PGM_SHW_NAME_NONE(ModifyPage),
715 PGM_SHW_NAME_NONE(Enter),
716 PGM_SHW_NAME_NONE(Exit),
717#ifdef IN_RING3
718 PGM_SHW_NAME_NONE(Relocate),
719#endif
720 },
721};
722
723
724/**
725 * The guest+shadow mode data array.
726 */
727PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
728{
729#if !defined(IN_RING3) && !defined(VBOX_STRICT)
730# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
731# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
732 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
733
734#elif !defined(IN_RING3) && defined(VBOX_STRICT)
735# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
736# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
737 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
738
739#elif defined(IN_RING3) && !defined(VBOX_STRICT)
740# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
741# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
742 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
743
744#elif defined(IN_RING3) && defined(VBOX_STRICT)
745# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
746# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
747 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
748
749#else
750# error "Misconfig."
751#endif
752
753 /* 32-bit shadow paging mode: */
754 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
755 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
756 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
757 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
758 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
759 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
760 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
761 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
762 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
763 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
765
766 /* PAE shadow paging mode: */
767 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
768 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
769 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
770 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
771 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
772 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
773 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
774 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
775 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
776 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
777 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
778
779 /* AMD64 shadow paging mode: */
780 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
781 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
782 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
783 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
784 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
785#ifdef VBOX_WITH_64_BITS_GUESTS
786 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
787#else
788 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
789#endif
790 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
791 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
792 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
795
796 /* 32-bit nested paging mode: */
797 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
798 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
799 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
800 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
801 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
802#ifdef VBOX_WITH_64_BITS_GUESTS
803 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
804#else
805 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
806#endif
807 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
808 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
810 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
812
813 /* PAE nested paging mode: */
814 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
815 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
816 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
817 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
818 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
819#ifdef VBOX_WITH_64_BITS_GUESTS
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
821#else
822 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
823#endif
824 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
825 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
829
830 /* AMD64 nested paging mode: */
831 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
832 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
833 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
834 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
835 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
836#ifdef VBOX_WITH_64_BITS_GUESTS
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
838#else
839 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
840#endif
841 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
842 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
846
847 /* EPT nested paging mode: */
848 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
849 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
850 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
851 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
852 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
853#ifdef VBOX_WITH_64_BITS_GUESTS
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
855#else
856 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
857#endif
858 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
859 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
863
864 /* NONE / NEM: */
865 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
866 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
867 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
868 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
869 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
870#ifdef VBOX_WITH_64_BITS_GUESTS
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
872#else
873 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
874#endif
875 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
876 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
880
881
882#undef PGMMODEDATABTH_ENTRY
883#undef PGMMODEDATABTH_NULL_ENTRY
884};
885
886
887#ifdef IN_RING0
888/**
889 * #PF Handler.
890 *
891 * @returns VBox status code (appropriate for trap handling and GC return).
892 * @param pVCpu The cross context virtual CPU structure.
893 * @param uErr The trap error code.
894 * @param pRegFrame Trap register frame.
895 * @param pvFault The fault address.
896 */
897VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
898{
899 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
900
901 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
902 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
903 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
904
905
906# ifdef VBOX_WITH_STATISTICS
907 /*
908 * Error code stats.
909 */
910 if (uErr & X86_TRAP_PF_US)
911 {
912 if (!(uErr & X86_TRAP_PF_P))
913 {
914 if (uErr & X86_TRAP_PF_RW)
915 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
916 else
917 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
918 }
919 else if (uErr & X86_TRAP_PF_RW)
920 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
921 else if (uErr & X86_TRAP_PF_RSVD)
922 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
923 else if (uErr & X86_TRAP_PF_ID)
924 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
925 else
926 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
927 }
928 else
929 { /* Supervisor */
930 if (!(uErr & X86_TRAP_PF_P))
931 {
932 if (uErr & X86_TRAP_PF_RW)
933 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
934 else
935 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
936 }
937 else if (uErr & X86_TRAP_PF_RW)
938 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
939 else if (uErr & X86_TRAP_PF_ID)
940 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
941 else if (uErr & X86_TRAP_PF_RSVD)
942 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
943 }
944# endif /* VBOX_WITH_STATISTICS */
945
946 /*
947 * Call the worker.
948 */
949 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
950 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
951 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
952 bool fLockTaken = false;
953 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
954 if (fLockTaken)
955 {
956 PGM_LOCK_ASSERT_OWNER(pVM);
957 PGM_UNLOCK(pVM);
958 }
959 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
960
961 /*
962 * Return code tweaks.
963 */
964 if (rc != VINF_SUCCESS)
965 {
966 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
967 rc = VINF_SUCCESS;
968
969 /* Note: hack alert for difficult to reproduce problem. */
970 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
971 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
972 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
973 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
974 {
975 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
976 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
977 rc = VINF_SUCCESS;
978 }
979 }
980
981 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
982 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
983 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
984 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
985 return rc;
986}
987#endif /* IN_RING0 */
988
989
990/**
991 * Prefetch a page
992 *
993 * Typically used to sync commonly used pages before entering raw mode
994 * after a CR3 reload.
995 *
996 * @returns VBox status code suitable for scheduling.
997 * @retval VINF_SUCCESS on success.
998 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
999 * @param pVCpu The cross context virtual CPU structure.
1000 * @param GCPtrPage Page to invalidate.
1001 */
1002VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1003{
1004 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1005
1006 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1007 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1008 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1009 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1010
1011 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1012 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1013 return rc;
1014}
1015
1016
1017/**
1018 * Verifies a range of pages for read or write access
1019 *
1020 * Only checks the guest's page tables
1021 *
1022 * @returns VBox status code.
1023 * @param pVCpu The cross context virtual CPU structure.
1024 * @param Addr Guest virtual address to check
1025 * @param cbSize Access size
1026 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1027 * @remarks Current not in use.
1028 */
1029VMMDECL(int) PGMIsValidAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1030{
1031 /*
1032 * Validate input.
1033 */
1034 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
1035 {
1036 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
1037 return VERR_INVALID_PARAMETER;
1038 }
1039
1040 uint64_t fPage;
1041 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
1042 if (RT_FAILURE(rc))
1043 {
1044 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
1045 return VINF_EM_RAW_GUEST_TRAP;
1046 }
1047
1048 /*
1049 * Check if the access would cause a page fault
1050 *
1051 * Note that hypervisor page directories are not present in the guest's tables, so this check
1052 * is sufficient.
1053 */
1054 bool fWrite = !!(fAccess & X86_PTE_RW);
1055 bool fUser = !!(fAccess & X86_PTE_US);
1056 if ( !(fPage & X86_PTE_P)
1057 || (fWrite && !(fPage & X86_PTE_RW))
1058 || (fUser && !(fPage & X86_PTE_US)) )
1059 {
1060 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
1061 return VINF_EM_RAW_GUEST_TRAP;
1062 }
1063 if ( RT_SUCCESS(rc)
1064 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
1065 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
1066 return rc;
1067}
1068
1069
1070/**
1071 * Verifies a range of pages for read or write access
1072 *
1073 * Supports handling of pages marked for dirty bit tracking and CSAM
1074 *
1075 * @returns VBox status code.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param Addr Guest virtual address to check
1078 * @param cbSize Access size
1079 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1080 */
1081VMMDECL(int) PGMVerifyAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1082{
1083 PVM pVM = pVCpu->CTX_SUFF(pVM);
1084
1085 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
1086
1087 /*
1088 * Get going.
1089 */
1090 uint64_t fPageGst;
1091 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
1092 if (RT_FAILURE(rc))
1093 {
1094 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
1095 return VINF_EM_RAW_GUEST_TRAP;
1096 }
1097
1098 /*
1099 * Check if the access would cause a page fault
1100 *
1101 * Note that hypervisor page directories are not present in the guest's tables, so this check
1102 * is sufficient.
1103 */
1104 const bool fWrite = !!(fAccess & X86_PTE_RW);
1105 const bool fUser = !!(fAccess & X86_PTE_US);
1106 if ( !(fPageGst & X86_PTE_P)
1107 || (fWrite && !(fPageGst & X86_PTE_RW))
1108 || (fUser && !(fPageGst & X86_PTE_US)) )
1109 {
1110 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
1111 return VINF_EM_RAW_GUEST_TRAP;
1112 }
1113
1114 if (!pVM->pgm.s.fNestedPaging)
1115 {
1116 /*
1117 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
1118 */
1119 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
1120 if ( rc == VERR_PAGE_NOT_PRESENT
1121 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1122 {
1123 /*
1124 * Page is not present in our page tables.
1125 * Try to sync it!
1126 */
1127 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
1128 uint32_t const uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
1129 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1130 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1131 AssertReturn(g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
1132 rc = g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage(pVCpu, Addr, fPageGst, uErr);
1133 if (rc != VINF_SUCCESS)
1134 return rc;
1135 }
1136 else
1137 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
1138 }
1139
1140#if 0 /* def VBOX_STRICT; triggers too often now */
1141 /*
1142 * This check is a bit paranoid, but useful.
1143 */
1144 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
1145 uint64_t fPageShw;
1146 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
1147 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
1148 || (fWrite && !(fPageShw & X86_PTE_RW))
1149 || (fUser && !(fPageShw & X86_PTE_US)) )
1150 {
1151 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
1152 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
1153 return VINF_EM_RAW_GUEST_TRAP;
1154 }
1155#endif
1156
1157 if ( RT_SUCCESS(rc)
1158 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
1159 || Addr + cbSize < Addr))
1160 {
1161 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
1162 for (;;)
1163 {
1164 Addr += PAGE_SIZE;
1165 if (cbSize > PAGE_SIZE)
1166 cbSize -= PAGE_SIZE;
1167 else
1168 cbSize = 1;
1169 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
1170 if (rc != VINF_SUCCESS)
1171 break;
1172 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
1173 break;
1174 }
1175 }
1176 return rc;
1177}
1178
1179
1180/**
1181 * Emulation of the invlpg instruction (HC only actually).
1182 *
1183 * @returns Strict VBox status code, special care required.
1184 * @retval VINF_PGM_SYNC_CR3 - handled.
1185 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1186 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1187 *
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param GCPtrPage Page to invalidate.
1190 *
1191 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1192 * safe, but there could be edge cases!
1193 *
1194 * @todo Flush page or page directory only if necessary!
1195 * @todo VBOXSTRICTRC
1196 */
1197VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1198{
1199 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1200 int rc;
1201 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1202
1203 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1204
1205 /*
1206 * Call paging mode specific worker.
1207 */
1208 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1209 PGM_LOCK_VOID(pVM);
1210
1211 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1212 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1213 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1214 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1215
1216 PGM_UNLOCK(pVM);
1217 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1218
1219 /* Ignore all irrelevant error codes. */
1220 if ( rc == VERR_PAGE_NOT_PRESENT
1221 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1222 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1223 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1224 rc = VINF_SUCCESS;
1225
1226 return rc;
1227}
1228
1229
1230/**
1231 * Executes an instruction using the interpreter.
1232 *
1233 * @returns VBox status code (appropriate for trap handling and GC return).
1234 * @param pVM The cross context VM structure.
1235 * @param pVCpu The cross context virtual CPU structure.
1236 * @param pRegFrame Register frame.
1237 * @param pvFault Fault address.
1238 */
1239VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1240{
1241 NOREF(pVM);
1242 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1243 if (rc == VERR_EM_INTERPRETER)
1244 rc = VINF_EM_RAW_EMULATE_INSTR;
1245 if (rc != VINF_SUCCESS)
1246 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1247 return rc;
1248}
1249
1250
1251/**
1252 * Gets effective page information (from the VMM page directory).
1253 *
1254 * @returns VBox status code.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 * @param GCPtr Guest Context virtual address of the page.
1257 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1258 * @param pHCPhys Where to store the HC physical address of the page.
1259 * This is page aligned.
1260 * @remark You should use PGMMapGetPage() for pages in a mapping.
1261 */
1262VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1263{
1264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1265 PGM_LOCK_VOID(pVM);
1266
1267 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1268 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1269 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1270 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1271
1272 PGM_UNLOCK(pVM);
1273 return rc;
1274}
1275
1276
1277/**
1278 * Modify page flags for a range of pages in the shadow context.
1279 *
1280 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1281 *
1282 * @returns VBox status code.
1283 * @param pVCpu The cross context virtual CPU structure.
1284 * @param GCPtr Virtual address of the first page in the range.
1285 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1286 * @param fMask The AND mask - page flags X86_PTE_*.
1287 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1288 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1289 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1290 */
1291DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1292{
1293 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1294 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1295
1296 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1297
1298 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1299 PGM_LOCK_VOID(pVM);
1300
1301 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1302 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1303 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1304 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1305
1306 PGM_UNLOCK(pVM);
1307 return rc;
1308}
1309
1310
1311/**
1312 * Changing the page flags for a single page in the shadow page tables so as to
1313 * make it read-only.
1314 *
1315 * @returns VBox status code.
1316 * @param pVCpu The cross context virtual CPU structure.
1317 * @param GCPtr Virtual address of the first page in the range.
1318 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1319 */
1320VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1321{
1322 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1323}
1324
1325
1326/**
1327 * Changing the page flags for a single page in the shadow page tables so as to
1328 * make it writable.
1329 *
1330 * The call must know with 101% certainty that the guest page tables maps this
1331 * as writable too. This function will deal shared, zero and write monitored
1332 * pages.
1333 *
1334 * @returns VBox status code.
1335 * @param pVCpu The cross context virtual CPU structure.
1336 * @param GCPtr Virtual address of the first page in the range.
1337 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1338 */
1339VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1340{
1341 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1342 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1343 return VINF_SUCCESS;
1344}
1345
1346
1347/**
1348 * Changing the page flags for a single page in the shadow page tables so as to
1349 * make it not present.
1350 *
1351 * @returns VBox status code.
1352 * @param pVCpu The cross context virtual CPU structure.
1353 * @param GCPtr Virtual address of the first page in the range.
1354 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1355 */
1356VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1357{
1358 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1359}
1360
1361
1362/**
1363 * Changing the page flags for a single page in the shadow page tables so as to
1364 * make it supervisor and writable.
1365 *
1366 * This if for dealing with CR0.WP=0 and readonly user pages.
1367 *
1368 * @returns VBox status code.
1369 * @param pVCpu The cross context virtual CPU structure.
1370 * @param GCPtr Virtual address of the first page in the range.
1371 * @param fBigPage Whether or not this is a big page. If it is, we have to
1372 * change the shadow PDE as well. If it isn't, the caller
1373 * has checked that the shadow PDE doesn't need changing.
1374 * We ASSUME 4KB pages backing the big page here!
1375 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1376 */
1377int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1378{
1379 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1380 if (rc == VINF_SUCCESS && fBigPage)
1381 {
1382 /* this is a bit ugly... */
1383 switch (pVCpu->pgm.s.enmShadowMode)
1384 {
1385 case PGMMODE_32_BIT:
1386 {
1387 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1388 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1389 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1390 pPde->u |= X86_PDE_RW;
1391 Log(("-> PDE=%#llx (32)\n", pPde->u));
1392 break;
1393 }
1394 case PGMMODE_PAE:
1395 case PGMMODE_PAE_NX:
1396 {
1397 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1398 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1399 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1400 pPde->u |= X86_PDE_RW;
1401 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1402 break;
1403 }
1404 default:
1405 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1406 }
1407 }
1408 return rc;
1409}
1410
1411
1412/**
1413 * Gets the shadow page directory for the specified address, PAE.
1414 *
1415 * @returns Pointer to the shadow PD.
1416 * @param pVCpu The cross context virtual CPU structure.
1417 * @param GCPtr The address.
1418 * @param uGstPdpe Guest PDPT entry. Valid.
1419 * @param ppPD Receives address of page directory
1420 */
1421int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1422{
1423 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1424 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1425 PPGMPOOLPAGE pShwPage;
1426 int rc;
1427 PGM_LOCK_ASSERT_OWNER(pVM);
1428
1429
1430 /* Allocate page directory if not present. */
1431 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1432 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1433 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1434 X86PGPAEUINT const uPdpe = pPdpe->u;
1435 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1436 {
1437 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1438 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1439 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1440
1441 pgmPoolCacheUsed(pPool, pShwPage);
1442
1443 /* Update the entry if necessary. */
1444 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1445 if (uPdpeNew == uPdpe)
1446 { /* likely */ }
1447 else
1448 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1449 }
1450 else
1451 {
1452 RTGCPTR64 GCPdPt;
1453 PGMPOOLKIND enmKind;
1454 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1455 {
1456 /* AMD-V nested paging or real/protected mode without paging. */
1457 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1458 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1459 }
1460 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1461 {
1462 if (uGstPdpe & X86_PDPE_P)
1463 {
1464 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1465 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1466 }
1467 else
1468 {
1469 /* PD not present; guest must reload CR3 to change it.
1470 * No need to monitor anything in this case. */
1471 /** @todo r=bird: WTF is hit?!? */
1472 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1473 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1474 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1475 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1476 }
1477 }
1478 else
1479 {
1480 GCPdPt = CPUMGetGuestCR3(pVCpu);
1481 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1482 }
1483
1484 /* Create a reference back to the PDPT by using the index in its shadow page. */
1485 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1486 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1487 &pShwPage);
1488 AssertRCReturn(rc, rc);
1489
1490 /* Hook it up. */
1491 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1492 }
1493 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1494
1495 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Gets the pointer to the shadow page directory entry for an address, PAE.
1502 *
1503 * @returns Pointer to the PDE.
1504 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1505 * @param GCPtr The address.
1506 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1507 */
1508DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1509{
1510 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1511 PGM_LOCK_ASSERT_OWNER(pVM);
1512
1513 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1514 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1515 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1516 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1517 if (!(uPdpe & X86_PDPE_P))
1518 {
1519 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1520 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1521 }
1522 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1523
1524 /* Fetch the pgm pool shadow descriptor. */
1525 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1526 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1527
1528 *ppShwPde = pShwPde;
1529 return VINF_SUCCESS;
1530}
1531
1532
1533/**
1534 * Syncs the SHADOW page directory pointer for the specified address.
1535 *
1536 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1537 *
1538 * The caller is responsible for making sure the guest has a valid PD before
1539 * calling this function.
1540 *
1541 * @returns VBox status code.
1542 * @param pVCpu The cross context virtual CPU structure.
1543 * @param GCPtr The address.
1544 * @param uGstPml4e Guest PML4 entry (valid).
1545 * @param uGstPdpe Guest PDPT entry (valid).
1546 * @param ppPD Receives address of page directory
1547 */
1548static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1549{
1550 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1551 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1552 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1553 int rc;
1554
1555 PGM_LOCK_ASSERT_OWNER(pVM);
1556
1557 /*
1558 * PML4.
1559 */
1560 PPGMPOOLPAGE pShwPage;
1561 {
1562 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1563 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1564 X86PGPAEUINT const uPml4e = pPml4e->u;
1565
1566 /* Allocate page directory pointer table if not present. */
1567 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1568 {
1569 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1570 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1571
1572 pgmPoolCacheUsed(pPool, pShwPage);
1573
1574 /* Update the entry if needed. */
1575 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1576 | (uPml4e & PGM_PML4_FLAGS);
1577 if (uPml4e == uPml4eNew)
1578 { /* likely */ }
1579 else
1580 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1581 }
1582 else
1583 {
1584 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1585
1586 RTGCPTR64 GCPml4;
1587 PGMPOOLKIND enmKind;
1588 if (fNestedPagingOrNoGstPaging)
1589 {
1590 /* AMD-V nested paging or real/protected mode without paging */
1591 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1592 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1593 }
1594 else
1595 {
1596 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1597 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1598 }
1599
1600 /* Create a reference back to the PDPT by using the index in its shadow page. */
1601 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1602 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1603 &pShwPage);
1604 AssertRCReturn(rc, rc);
1605
1606 /* Hook it up. */
1607 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1608 | (uPml4e & PGM_PML4_FLAGS));
1609 }
1610 }
1611
1612 /*
1613 * PDPT.
1614 */
1615 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1616 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1617 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1618 X86PGPAEUINT const uPdpe = pPdpe->u;
1619
1620 /* Allocate page directory if not present. */
1621 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1622 {
1623 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1624 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1625
1626 pgmPoolCacheUsed(pPool, pShwPage);
1627
1628 /* Update the entry if needed. */
1629 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1630 | (uPdpe & PGM_PDPT_FLAGS);
1631 if (uPdpe == uPdpeNew)
1632 { /* likely */ }
1633 else
1634 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1635 }
1636 else
1637 {
1638 RTGCPTR64 GCPdPt;
1639 PGMPOOLKIND enmKind;
1640 if (fNestedPagingOrNoGstPaging)
1641 {
1642 /* AMD-V nested paging or real/protected mode without paging */
1643 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1644 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1645 }
1646 else
1647 {
1648 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1649 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1650 }
1651
1652 /* Create a reference back to the PDPT by using the index in its shadow page. */
1653 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1654 pShwPage->idx, iPdPt, false /*fLockPage*/,
1655 &pShwPage);
1656 AssertRCReturn(rc, rc);
1657
1658 /* Hook it up. */
1659 ASMAtomicWriteU64(&pPdpe->u,
1660 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1661 }
1662
1663 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1664 return VINF_SUCCESS;
1665}
1666
1667
1668/**
1669 * Gets the SHADOW page directory pointer for the specified address (long mode).
1670 *
1671 * @returns VBox status code.
1672 * @param pVCpu The cross context virtual CPU structure.
1673 * @param GCPtr The address.
1674 * @param ppPml4e Receives the address of the page map level 4 entry.
1675 * @param ppPdpt Receives the address of the page directory pointer table.
1676 * @param ppPD Receives the address of the page directory.
1677 */
1678DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1679{
1680 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1681 PGM_LOCK_ASSERT_OWNER(pVM);
1682
1683 /*
1684 * PML4
1685 */
1686 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1687 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1688 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1689 if (ppPml4e)
1690 *ppPml4e = (PX86PML4E)pPml4e;
1691 X86PGPAEUINT const uPml4e = pPml4e->u;
1692 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1693 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1694 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1695
1696 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1697 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1698 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1699
1700 /*
1701 * PDPT
1702 */
1703 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1704 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1705 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1706 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1707 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1708
1709 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1710 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1711
1712 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1713 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1714 return VINF_SUCCESS;
1715}
1716
1717
1718/**
1719 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1720 * backing pages in case the PDPT or PML4 entry is missing.
1721 *
1722 * @returns VBox status code.
1723 * @param pVCpu The cross context virtual CPU structure.
1724 * @param GCPtr The address.
1725 * @param ppPdpt Receives address of pdpt
1726 * @param ppPD Receives address of page directory
1727 */
1728static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1729{
1730 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1731 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1732 int rc;
1733
1734 Assert(pVM->pgm.s.fNestedPaging);
1735 PGM_LOCK_ASSERT_OWNER(pVM);
1736
1737 /*
1738 * PML4 level.
1739 */
1740 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1741 Assert(pPml4);
1742
1743 /* Allocate page directory pointer table if not present. */
1744 PPGMPOOLPAGE pShwPage;
1745 {
1746 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1747 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1748 EPTPML4E Pml4e;
1749 Pml4e.u = pPml4e->u;
1750 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1751 {
1752 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1753 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1754 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1755 &pShwPage);
1756 AssertRCReturn(rc, rc);
1757
1758 /* Hook up the new PDPT now. */
1759 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1760 }
1761 else
1762 {
1763 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1764 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1765
1766 pgmPoolCacheUsed(pPool, pShwPage);
1767
1768 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1769 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1770 { }
1771 else
1772 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1773 }
1774 }
1775
1776 /*
1777 * PDPT level.
1778 */
1779 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1780 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1781 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1782
1783 if (ppPdpt)
1784 *ppPdpt = pPdpt;
1785
1786 /* Allocate page directory if not present. */
1787 EPTPDPTE Pdpe;
1788 Pdpe.u = pPdpe->u;
1789 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1790 {
1791 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1792 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1793 pShwPage->idx, iPdPt, false /*fLockPage*/,
1794 &pShwPage);
1795 AssertRCReturn(rc, rc);
1796
1797 /* Hook up the new PD now. */
1798 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1799 }
1800 else
1801 {
1802 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1803 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1804
1805 pgmPoolCacheUsed(pPool, pShwPage);
1806
1807 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1808 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1809 { }
1810 else
1811 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1812 }
1813
1814 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1815 return VINF_SUCCESS;
1816}
1817
1818
1819#ifdef IN_RING0
1820/**
1821 * Synchronizes a range of nested page table entries.
1822 *
1823 * The caller must own the PGM lock.
1824 *
1825 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1826 * @param GCPhys Where to start.
1827 * @param cPages How many pages which entries should be synced.
1828 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1829 * host paging mode for AMD-V).
1830 */
1831int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1832{
1833 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1834
1835/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1836 int rc;
1837 switch (enmShwPagingMode)
1838 {
1839 case PGMMODE_32_BIT:
1840 {
1841 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1842 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1843 break;
1844 }
1845
1846 case PGMMODE_PAE:
1847 case PGMMODE_PAE_NX:
1848 {
1849 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1850 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1851 break;
1852 }
1853
1854 case PGMMODE_AMD64:
1855 case PGMMODE_AMD64_NX:
1856 {
1857 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1858 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1859 break;
1860 }
1861
1862 case PGMMODE_EPT:
1863 {
1864 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1865 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1866 break;
1867 }
1868
1869 default:
1870 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1871 }
1872 return rc;
1873}
1874#endif /* IN_RING0 */
1875
1876
1877/**
1878 * Gets effective Guest OS page information.
1879 *
1880 * When GCPtr is in a big page, the function will return as if it was a normal
1881 * 4KB page. If the need for distinguishing between big and normal page becomes
1882 * necessary at a later point, a PGMGstGetPage() will be created for that
1883 * purpose.
1884 *
1885 * @returns VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1887 * @param GCPtr Guest Context virtual address of the page.
1888 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1889 * @param pGCPhys Where to store the GC physical address of the page.
1890 * This is page aligned. The fact that the
1891 */
1892VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1893{
1894 VMCPU_ASSERT_EMT(pVCpu);
1895 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1896 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1897 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1898 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1899}
1900
1901
1902/**
1903 * Performs a guest page table walk.
1904 *
1905 * The guest should be in paged protect mode or long mode when making a call to
1906 * this function.
1907 *
1908 * @returns VBox status code.
1909 * @retval VINF_SUCCESS on success.
1910 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1911 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1912 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1913 *
1914 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1915 * @param GCPtr The guest virtual address to walk by.
1916 * @param pWalk Where to return the walk result. This is valid for some
1917 * error codes as well.
1918 */
1919int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1920{
1921 VMCPU_ASSERT_EMT(pVCpu);
1922 switch (pVCpu->pgm.s.enmGuestMode)
1923 {
1924 case PGMMODE_32_BIT:
1925 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1926 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1927
1928 case PGMMODE_PAE:
1929 case PGMMODE_PAE_NX:
1930 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1931 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1932
1933 case PGMMODE_AMD64:
1934 case PGMMODE_AMD64_NX:
1935 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1936 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1937
1938 case PGMMODE_REAL:
1939 case PGMMODE_PROTECTED:
1940 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1941 return VERR_PGM_NOT_USED_IN_MODE;
1942
1943 case PGMMODE_EPT:
1944 case PGMMODE_NESTED_32BIT:
1945 case PGMMODE_NESTED_PAE:
1946 case PGMMODE_NESTED_AMD64:
1947 default:
1948 AssertFailed();
1949 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1950 return VERR_PGM_NOT_USED_IN_MODE;
1951 }
1952}
1953
1954
1955#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1956/**
1957 * Performs a guest second-level address translation (SLAT).
1958 *
1959 * The guest paging mode must be 32-bit, PAE or AMD64 when making a call to this
1960 * function.
1961 *
1962 * @returns VBox status code.
1963 * @retval VINF_SUCCESS on success.
1964 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1965 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1966 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1967 *
1968 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1969 * @param GCPhysNested The nested-guest physical address being translated
1970 * (input).
1971 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is
1972 * valid. This indicates the SLAT is caused when
1973 * translating a nested-guest linear address.
1974 * @param GCPtrNested The nested-guest virtual address that initiated the
1975 * SLAT. If none, pass NIL_RTGCPTR.
1976 * @param pWalk Where to return the walk result. This is valid for
1977 * some error codes as well.
1978 */
1979static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
1980 PPGMPTWALKGST pWalk)
1981{
1982 Assert(pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT);
1983 switch (pVCpu->pgm.s.enmGuestSlatMode)
1984 {
1985 case PGMSLAT_EPT:
1986 pWalk->enmType = PGMPTWALKGSTTYPE_EPT;
1987 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, &pWalk->u.Ept);
1988
1989 default:
1990 AssertFailed();
1991 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1992 return VERR_PGM_NOT_USED_IN_MODE;
1993 }
1994}
1995#endif
1996
1997
1998/**
1999 * Tries to continue the previous walk.
2000 *
2001 * @note Requires the caller to hold the PGM lock from the first
2002 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2003 * we cannot use the pointers.
2004 *
2005 * @returns VBox status code.
2006 * @retval VINF_SUCCESS on success.
2007 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2008 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2009 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2010 *
2011 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2012 * @param GCPtr The guest virtual address to walk by.
2013 * @param pWalk Pointer to the previous walk result and where to return
2014 * the result of this walk. This is valid for some error
2015 * codes as well.
2016 */
2017int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
2018{
2019 /*
2020 * We can only handle successfully walks.
2021 * We also limit ourselves to the next page.
2022 */
2023 if ( pWalk->u.Core.fSucceeded
2024 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
2025 {
2026 Assert(pWalk->u.Core.uLevel == 0);
2027 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2028 {
2029 /*
2030 * AMD64
2031 */
2032 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
2033 {
2034 /*
2035 * We fall back to full walk if the PDE table changes, if any
2036 * reserved bits are set, or if the effective page access changes.
2037 */
2038 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2039 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2040 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2041 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2042
2043 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
2044 {
2045 if (pWalk->u.Amd64.pPte)
2046 {
2047 X86PTEPAE Pte;
2048 Pte.u = pWalk->u.Amd64.pPte[1].u;
2049 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2050 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2051 {
2052
2053 pWalk->u.Core.GCPtr = GCPtr;
2054 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2055 pWalk->u.Amd64.Pte.u = Pte.u;
2056 pWalk->u.Amd64.pPte++;
2057 return VINF_SUCCESS;
2058 }
2059 }
2060 }
2061 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
2062 {
2063 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2064 if (pWalk->u.Amd64.pPde)
2065 {
2066 X86PDEPAE Pde;
2067 Pde.u = pWalk->u.Amd64.pPde[1].u;
2068 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
2069 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2070 {
2071 /* Get the new PTE and check out the first entry. */
2072 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2073 &pWalk->u.Amd64.pPt);
2074 if (RT_SUCCESS(rc))
2075 {
2076 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
2077 X86PTEPAE Pte;
2078 Pte.u = pWalk->u.Amd64.pPte->u;
2079 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2080 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2081 {
2082 pWalk->u.Core.GCPtr = GCPtr;
2083 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2084 pWalk->u.Amd64.Pte.u = Pte.u;
2085 pWalk->u.Amd64.Pde.u = Pde.u;
2086 pWalk->u.Amd64.pPde++;
2087 return VINF_SUCCESS;
2088 }
2089 }
2090 }
2091 }
2092 }
2093 }
2094 else if (!pWalk->u.Core.fGigantPage)
2095 {
2096 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
2097 {
2098 pWalk->u.Core.GCPtr = GCPtr;
2099 pWalk->u.Core.GCPhys += PAGE_SIZE;
2100 return VINF_SUCCESS;
2101 }
2102 }
2103 else
2104 {
2105 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
2106 {
2107 pWalk->u.Core.GCPtr = GCPtr;
2108 pWalk->u.Core.GCPhys += PAGE_SIZE;
2109 return VINF_SUCCESS;
2110 }
2111 }
2112 }
2113 }
2114 /* Case we don't handle. Do full walk. */
2115 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
2116}
2117
2118
2119/**
2120 * Checks if the page is present.
2121 *
2122 * @returns true if the page is present.
2123 * @returns false if the page is not present.
2124 * @param pVCpu The cross context virtual CPU structure.
2125 * @param GCPtr Address within the page.
2126 */
2127VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr)
2128{
2129 VMCPU_ASSERT_EMT(pVCpu);
2130 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
2131 return RT_SUCCESS(rc);
2132}
2133
2134
2135/**
2136 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2137 *
2138 * @returns VBox status code.
2139 * @param pVCpu The cross context virtual CPU structure.
2140 * @param GCPtr The address of the first page.
2141 * @param cb The size of the range in bytes.
2142 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2143 */
2144VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2145{
2146 VMCPU_ASSERT_EMT(pVCpu);
2147 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2148}
2149
2150
2151/**
2152 * Modify page flags for a range of pages in the guest's tables
2153 *
2154 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2155 *
2156 * @returns VBox status code.
2157 * @param pVCpu The cross context virtual CPU structure.
2158 * @param GCPtr Virtual address of the first page in the range.
2159 * @param cb Size (in bytes) of the range to apply the modification to.
2160 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2161 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2162 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2163 */
2164VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2165{
2166 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2167 VMCPU_ASSERT_EMT(pVCpu);
2168
2169 /*
2170 * Validate input.
2171 */
2172 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2173 Assert(cb);
2174
2175 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2176
2177 /*
2178 * Adjust input.
2179 */
2180 cb += GCPtr & PAGE_OFFSET_MASK;
2181 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2182 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2183
2184 /*
2185 * Call worker.
2186 */
2187 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2188 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2189 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2190 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2191
2192 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2193 return rc;
2194}
2195
2196
2197/**
2198 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2199 *
2200 * @returns @c true if the PDPE is valid, @c false otherwise.
2201 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2202 * @param paPaePdpes The PAE PDPEs to validate.
2203 *
2204 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2205 */
2206VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2207{
2208 Assert(paPaePdpes);
2209 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2210 {
2211 X86PDPE const PaePdpe = paPaePdpes[i];
2212 if ( !(PaePdpe.u & X86_PDPE_P)
2213 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2214 { /* likely */ }
2215 else
2216 return false;
2217 }
2218 return true;
2219}
2220
2221
2222/**
2223 * Performs the lazy mapping of the 32-bit guest PD.
2224 *
2225 * @returns VBox status code.
2226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2227 * @param ppPd Where to return the pointer to the mapping. This is
2228 * always set.
2229 */
2230int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2231{
2232 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2233 PGM_LOCK_VOID(pVM);
2234
2235 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2236
2237 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2238 PPGMPAGE pPage;
2239 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2240 if (RT_SUCCESS(rc))
2241 {
2242 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2243 if (RT_SUCCESS(rc))
2244 {
2245# ifdef IN_RING3
2246 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2247 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2248# else
2249 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2250 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2251# endif
2252 PGM_UNLOCK(pVM);
2253 return VINF_SUCCESS;
2254 }
2255 AssertRC(rc);
2256 }
2257 PGM_UNLOCK(pVM);
2258
2259 *ppPd = NULL;
2260 return rc;
2261}
2262
2263
2264/**
2265 * Performs the lazy mapping of the PAE guest PDPT.
2266 *
2267 * @returns VBox status code.
2268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2269 * @param ppPdpt Where to return the pointer to the mapping. This is
2270 * always set.
2271 */
2272int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2273{
2274 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2275 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2276 PGM_LOCK_VOID(pVM);
2277
2278 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2279 PPGMPAGE pPage;
2280 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2281 if (RT_SUCCESS(rc))
2282 {
2283 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2284 if (RT_SUCCESS(rc))
2285 {
2286# ifdef IN_RING3
2287 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2288 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2289# else
2290 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2291 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2292# endif
2293 PGM_UNLOCK(pVM);
2294 return VINF_SUCCESS;
2295 }
2296 AssertRC(rc);
2297 }
2298
2299 PGM_UNLOCK(pVM);
2300 *ppPdpt = NULL;
2301 return rc;
2302}
2303
2304
2305/**
2306 * Performs the lazy mapping / updating of a PAE guest PD.
2307 *
2308 * @returns Pointer to the mapping.
2309 * @returns VBox status code.
2310 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2311 * @param iPdpt Which PD entry to map (0..3).
2312 * @param ppPd Where to return the pointer to the mapping. This is
2313 * always set.
2314 */
2315int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2316{
2317 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2318 PGM_LOCK_VOID(pVM);
2319
2320 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2321 Assert(pGuestPDPT);
2322 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2323 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2324 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2325
2326 PPGMPAGE pPage;
2327 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2328 if (RT_SUCCESS(rc))
2329 {
2330 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2331 AssertRC(rc);
2332 if (RT_SUCCESS(rc))
2333 {
2334# ifdef IN_RING3
2335 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2336 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2337# else
2338 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2339 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2340# endif
2341 if (fChanged)
2342 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2343 PGM_UNLOCK(pVM);
2344 return VINF_SUCCESS;
2345 }
2346 }
2347
2348 /* Invalid page or some failure, invalidate the entry. */
2349 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2350 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2351 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2352
2353 PGM_UNLOCK(pVM);
2354 return rc;
2355}
2356
2357
2358/**
2359 * Performs the lazy mapping of the 32-bit guest PD.
2360 *
2361 * @returns VBox status code.
2362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2363 * @param ppPml4 Where to return the pointer to the mapping. This will
2364 * always be set.
2365 */
2366int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2367{
2368 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2369 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2370 PGM_LOCK_VOID(pVM);
2371
2372 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2373 PPGMPAGE pPage;
2374 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2375 if (RT_SUCCESS(rc))
2376 {
2377 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2378 if (RT_SUCCESS(rc))
2379 {
2380# ifdef IN_RING3
2381 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2382 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2383# else
2384 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2385 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2386# endif
2387 PGM_UNLOCK(pVM);
2388 return VINF_SUCCESS;
2389 }
2390 }
2391
2392 PGM_UNLOCK(pVM);
2393 *ppPml4 = NULL;
2394 return rc;
2395}
2396
2397
2398#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2399 /**
2400 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2401 *
2402 * @returns VBox status code.
2403 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2404 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2405 * always be set.
2406 */
2407int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2408{
2409 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2410 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2411 PGM_LOCK_VOID(pVM);
2412
2413 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2414 PPGMPAGE pPage;
2415 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2416 if (RT_SUCCESS(rc))
2417 {
2418 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2419 if (RT_SUCCESS(rc))
2420 {
2421# ifdef IN_RING3
2422 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2423 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2424# else
2425 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2426 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2427# endif
2428 PGM_UNLOCK(pVM);
2429 return VINF_SUCCESS;
2430 }
2431 }
2432
2433 PGM_UNLOCK(pVM);
2434 *ppEptPml4 = NULL;
2435 return rc;
2436}
2437#endif
2438
2439
2440/**
2441 * Gets the current CR3 register value for the shadow memory context.
2442 * @returns CR3 value.
2443 * @param pVCpu The cross context virtual CPU structure.
2444 */
2445VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2446{
2447 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2448 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2449 return pPoolPage->Core.Key;
2450}
2451
2452
2453/**
2454 * Forces lazy remapping of the guest's PAE page-directory structures.
2455 *
2456 * @param pVCpu The cross context virtual CPU structure.
2457 */
2458static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2459{
2460 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2461 {
2462 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2463 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2464 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2465 }
2466}
2467
2468
2469/**
2470 * Gets the PGM CR3 value masked according to the current guest mode.
2471 *
2472 * @returns The masked PGM CR3 value.
2473 * @param pVCpu The cross context virtual CPU structure.
2474 * @param uCr3 The raw guest CR3 value.
2475 */
2476DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2477{
2478 RTGCPHYS GCPhysCR3;
2479 switch (pVCpu->pgm.s.enmGuestMode)
2480 {
2481 case PGMMODE_PAE:
2482 case PGMMODE_PAE_NX:
2483 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK);
2484 break;
2485 case PGMMODE_AMD64:
2486 case PGMMODE_AMD64_NX:
2487 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
2488 break;
2489#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2490 case PGMMODE_EPT:
2491 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_EPT_PAGE_MASK);
2492 break;
2493#endif
2494 default:
2495 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
2496 break;
2497 }
2498 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2499 return GCPhysCR3;
2500}
2501
2502
2503/**
2504 * Performs and schedules necessary updates following a CR3 load or reload.
2505 *
2506 * This will normally involve mapping the guest PD or nPDPT
2507 *
2508 * @returns VBox status code.
2509 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2510 * safely be ignored and overridden since the FF will be set too then.
2511 * @param pVCpu The cross context virtual CPU structure.
2512 * @param cr3 The new cr3.
2513 * @param fGlobal Indicates whether this is a global flush or not.
2514 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2515 */
2516VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
2517{
2518 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2519 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2520
2521 VMCPU_ASSERT_EMT(pVCpu);
2522
2523 /*
2524 * Always flag the necessary updates; necessary for hardware acceleration
2525 */
2526 /** @todo optimize this, it shouldn't always be necessary. */
2527 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2528 if (fGlobal)
2529 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2530 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2531
2532 /*
2533 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2534 */
2535 int rc = VINF_SUCCESS;
2536 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2537 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2538 if (GCPhysOldCR3 != GCPhysCR3)
2539 {
2540 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2541 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2542 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2543
2544 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2545 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2546 if (RT_LIKELY(rc == VINF_SUCCESS))
2547 { }
2548 else
2549 {
2550 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2551 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2552 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2553 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2554 }
2555
2556 if (fGlobal)
2557 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2558 else
2559 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2560 }
2561 else
2562 {
2563#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2564 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2565 if (pPool->cDirtyPages)
2566 {
2567 PGM_LOCK_VOID(pVM);
2568 pgmPoolResetDirtyPages(pVM);
2569 PGM_UNLOCK(pVM);
2570 }
2571#endif
2572 if (fGlobal)
2573 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2574 else
2575 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2576
2577 /*
2578 * Flush PAE PDPTEs.
2579 */
2580 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2581 pgmGstFlushPaePdpes(pVCpu);
2582 }
2583
2584 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2585 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2586 return rc;
2587}
2588
2589
2590/**
2591 * Performs and schedules necessary updates following a CR3 load or reload when
2592 * using nested or extended paging.
2593 *
2594 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2595 * TLB and triggering a SyncCR3.
2596 *
2597 * This will normally involve mapping the guest PD or nPDPT
2598 *
2599 * @returns VBox status code.
2600 * @retval VINF_SUCCESS.
2601 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2602 * paging modes). This can safely be ignored and overridden since the
2603 * FF will be set too then.
2604 * @param pVCpu The cross context virtual CPU structure.
2605 * @param cr3 The new CR3.
2606 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2607 */
2608VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
2609{
2610 VMCPU_ASSERT_EMT(pVCpu);
2611 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2612
2613 /* We assume we're only called in nested paging mode. */
2614 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2615
2616 /*
2617 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2618 */
2619 int rc = VINF_SUCCESS;
2620 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2621 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2622 {
2623 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2624 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2625 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2626
2627 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2628 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2629
2630 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2631 }
2632 /*
2633 * Flush PAE PDPTEs.
2634 */
2635 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2636 pgmGstFlushPaePdpes(pVCpu);
2637
2638 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2639 return rc;
2640}
2641
2642
2643/**
2644 * Synchronize the paging structures.
2645 *
2646 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2647 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2648 * in several places, most importantly whenever the CR3 is loaded.
2649 *
2650 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2651 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2652 * the VMM into guest context.
2653 * @param pVCpu The cross context virtual CPU structure.
2654 * @param cr0 Guest context CR0 register
2655 * @param cr3 Guest context CR3 register
2656 * @param cr4 Guest context CR4 register
2657 * @param fGlobal Including global page directories or not
2658 */
2659VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2660{
2661 int rc;
2662
2663 VMCPU_ASSERT_EMT(pVCpu);
2664
2665 /*
2666 * The pool may have pending stuff and even require a return to ring-3 to
2667 * clear the whole thing.
2668 */
2669 rc = pgmPoolSyncCR3(pVCpu);
2670 if (rc != VINF_SUCCESS)
2671 return rc;
2672
2673 /*
2674 * We might be called when we shouldn't.
2675 *
2676 * The mode switching will ensure that the PD is resynced after every mode
2677 * switch. So, if we find ourselves here when in protected or real mode
2678 * we can safely clear the FF and return immediately.
2679 */
2680 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2681 {
2682 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2683 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2684 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2685 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2686 return VINF_SUCCESS;
2687 }
2688
2689 /* If global pages are not supported, then all flushes are global. */
2690 if (!(cr4 & X86_CR4_PGE))
2691 fGlobal = true;
2692 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2693 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2694
2695 /*
2696 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2697 * This should be done before SyncCR3.
2698 */
2699 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2700 {
2701 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2702
2703 RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2704 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2705 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2706 {
2707 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2708 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2709 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2710 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2711 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
2712 }
2713
2714 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2715 if ( rc == VINF_PGM_SYNC_CR3
2716 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2717 {
2718 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2719#ifdef IN_RING3
2720 rc = pgmPoolSyncCR3(pVCpu);
2721#else
2722 if (rc == VINF_PGM_SYNC_CR3)
2723 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2724 return VINF_PGM_SYNC_CR3;
2725#endif
2726 }
2727 AssertRCReturn(rc, rc);
2728 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2729 }
2730
2731 /*
2732 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2733 */
2734 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2735
2736 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2737 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2738 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2739 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2740
2741 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2742 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2743 if (rc == VINF_SUCCESS)
2744 {
2745 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2746 {
2747 /* Go back to ring 3 if a pgm pool sync is again pending. */
2748 return VINF_PGM_SYNC_CR3;
2749 }
2750
2751 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2752 {
2753 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2756 }
2757 }
2758
2759 /*
2760 * Now flush the CR3 (guest context).
2761 */
2762 if (rc == VINF_SUCCESS)
2763 PGM_INVL_VCPU_TLBS(pVCpu);
2764 return rc;
2765}
2766
2767
2768/**
2769 * Maps all the PAE PDPE entries.
2770 *
2771 * @returns VBox status code.
2772 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2773 * @param paPaePdpes The new PAE PDPE values.
2774 *
2775 * @remarks This function may be invoked during the process of changing the guest
2776 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2777 * reflect PAE paging just yet.
2778 */
2779VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2780{
2781 Assert(paPaePdpes);
2782 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2783 {
2784 X86PDPE const PaePdpe = paPaePdpes[i];
2785
2786 /*
2787 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2788 * are deferred.[1] Also, different situations require different handling of invalid
2789 * PDPE entries. Here we assume the caller has already validated or doesn't require
2790 * validation of the PDPEs.
2791 *
2792 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2793 */
2794 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2795 {
2796 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2797 RTHCPTR HCPtr;
2798 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2799
2800 PGM_LOCK_VOID(pVM);
2801 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2802 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2803 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2804 PGM_UNLOCK(pVM);
2805 if (RT_SUCCESS(rc))
2806 {
2807# ifdef IN_RING3
2808 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2809 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2810# else
2811 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2812 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2813# endif
2814 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2815 continue;
2816 }
2817 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2818 }
2819 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2820 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2821 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2822 }
2823
2824 return VINF_SUCCESS;
2825}
2826
2827
2828/**
2829 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2830 *
2831 * @returns VBox status code.
2832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2833 * @param cr3 The guest CR3 value.
2834 *
2835 * @remarks This function may be invoked during the process of changing the guest
2836 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2837 * PAE paging just yet.
2838 */
2839VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2840{
2841 /*
2842 * Read the page-directory-pointer table (PDPT) at CR3.
2843 */
2844 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2845 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2846 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2847
2848 PGM_LOCK_VOID(pVM);
2849 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
2850 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
2851
2852 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2853 RTHCPTR HCPtrGuestCr3;
2854 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3);
2855 PGM_UNLOCK(pVM);
2856 AssertRCReturn(rc, rc);
2857 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2858
2859 /*
2860 * Validate the page-directory-pointer table entries (PDPE).
2861 */
2862 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2863 {
2864 /*
2865 * Map the PDPT.
2866 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2867 * that PGMFlushTLB will be called soon and only a change to CR3 then
2868 * will cause the shadow page tables to be updated.
2869 */
2870# ifdef IN_RING3
2871 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2872 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2873# else
2874 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2875 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2876# endif
2877
2878 /*
2879 * Update CPUM.
2880 * We do this prior to mapping the PDPEs to keep the order consistent
2881 * with what's used in HM. In practice, it doesn't really matter.
2882 */
2883 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2884
2885 /*
2886 * Map the PDPEs.
2887 */
2888 return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2889 }
2890 return VERR_PGM_PAE_PDPE_RSVD;
2891}
2892
2893
2894/**
2895 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2896 *
2897 * @returns VBox status code, with the following informational code for
2898 * VM scheduling.
2899 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2900 * @retval VINF_PGM_CHANGE_MODE if we're in RC the mode changes. This will
2901 * NOT be returned in ring-3 or ring-0.
2902 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2903 *
2904 * @param pVCpu The cross context virtual CPU structure.
2905 * @param cr0 The new cr0.
2906 * @param cr4 The new cr4.
2907 * @param efer The new extended feature enable register.
2908 */
2909VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2910{
2911 VMCPU_ASSERT_EMT(pVCpu);
2912
2913 /*
2914 * Calc the new guest mode.
2915 *
2916 * Note! We check PG before PE and without requiring PE because of the
2917 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2918 */
2919 PGMMODE enmGuestMode;
2920 if (cr0 & X86_CR0_PG)
2921 {
2922 if (!(cr4 & X86_CR4_PAE))
2923 {
2924 bool const fPse = !!(cr4 & X86_CR4_PSE);
2925 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2926 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2927 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2928 enmGuestMode = PGMMODE_32_BIT;
2929 }
2930 else if (!(efer & MSR_K6_EFER_LME))
2931 {
2932 if (!(efer & MSR_K6_EFER_NXE))
2933 enmGuestMode = PGMMODE_PAE;
2934 else
2935 enmGuestMode = PGMMODE_PAE_NX;
2936 }
2937 else
2938 {
2939 if (!(efer & MSR_K6_EFER_NXE))
2940 enmGuestMode = PGMMODE_AMD64;
2941 else
2942 enmGuestMode = PGMMODE_AMD64_NX;
2943 }
2944 }
2945 else if (!(cr0 & X86_CR0_PE))
2946 enmGuestMode = PGMMODE_REAL;
2947 else
2948 enmGuestMode = PGMMODE_PROTECTED;
2949
2950 /*
2951 * Did it change?
2952 */
2953 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2954 return VINF_SUCCESS;
2955
2956 /* Flush the TLB */
2957 PGM_INVL_VCPU_TLBS(pVCpu);
2958 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2959}
2960
2961
2962/**
2963 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2964 *
2965 * @returns PGM_TYPE_*.
2966 * @param pgmMode The mode value to convert.
2967 */
2968DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2969{
2970 switch (pgmMode)
2971 {
2972 case PGMMODE_REAL: return PGM_TYPE_REAL;
2973 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2974 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2975 case PGMMODE_PAE:
2976 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2977 case PGMMODE_AMD64:
2978 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2979 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2980 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2981 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2982 case PGMMODE_EPT: return PGM_TYPE_EPT;
2983 case PGMMODE_NONE: return PGM_TYPE_NONE;
2984 default:
2985 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2986 }
2987}
2988
2989
2990/**
2991 * Calculates the shadow paging mode.
2992 *
2993 * @returns The shadow paging mode.
2994 * @param pVM The cross context VM structure.
2995 * @param enmGuestMode The guest mode.
2996 * @param enmHostMode The host mode.
2997 * @param enmShadowMode The current shadow mode.
2998 */
2999static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3000{
3001 switch (enmGuestMode)
3002 {
3003 /*
3004 * When switching to real or protected mode we don't change
3005 * anything since it's likely that we'll switch back pretty soon.
3006 *
3007 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
3008 * and is supposed to determine which shadow paging and switcher to
3009 * use during init.
3010 */
3011 case PGMMODE_REAL:
3012 case PGMMODE_PROTECTED:
3013 if ( enmShadowMode != PGMMODE_INVALID
3014 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
3015 break; /* (no change) */
3016
3017 switch (enmHostMode)
3018 {
3019 case SUPPAGINGMODE_32_BIT:
3020 case SUPPAGINGMODE_32_BIT_GLOBAL:
3021 enmShadowMode = PGMMODE_32_BIT;
3022 break;
3023
3024 case SUPPAGINGMODE_PAE:
3025 case SUPPAGINGMODE_PAE_NX:
3026 case SUPPAGINGMODE_PAE_GLOBAL:
3027 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3028 enmShadowMode = PGMMODE_PAE;
3029 break;
3030
3031 case SUPPAGINGMODE_AMD64:
3032 case SUPPAGINGMODE_AMD64_GLOBAL:
3033 case SUPPAGINGMODE_AMD64_NX:
3034 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3035 enmShadowMode = PGMMODE_PAE;
3036 break;
3037
3038 default:
3039 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3040 }
3041 break;
3042
3043 case PGMMODE_32_BIT:
3044 switch (enmHostMode)
3045 {
3046 case SUPPAGINGMODE_32_BIT:
3047 case SUPPAGINGMODE_32_BIT_GLOBAL:
3048 enmShadowMode = PGMMODE_32_BIT;
3049 break;
3050
3051 case SUPPAGINGMODE_PAE:
3052 case SUPPAGINGMODE_PAE_NX:
3053 case SUPPAGINGMODE_PAE_GLOBAL:
3054 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3055 enmShadowMode = PGMMODE_PAE;
3056 break;
3057
3058 case SUPPAGINGMODE_AMD64:
3059 case SUPPAGINGMODE_AMD64_GLOBAL:
3060 case SUPPAGINGMODE_AMD64_NX:
3061 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3062 enmShadowMode = PGMMODE_PAE;
3063 break;
3064
3065 default:
3066 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3067 }
3068 break;
3069
3070 case PGMMODE_PAE:
3071 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3072 switch (enmHostMode)
3073 {
3074 case SUPPAGINGMODE_32_BIT:
3075 case SUPPAGINGMODE_32_BIT_GLOBAL:
3076 enmShadowMode = PGMMODE_PAE;
3077 break;
3078
3079 case SUPPAGINGMODE_PAE:
3080 case SUPPAGINGMODE_PAE_NX:
3081 case SUPPAGINGMODE_PAE_GLOBAL:
3082 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3083 enmShadowMode = PGMMODE_PAE;
3084 break;
3085
3086 case SUPPAGINGMODE_AMD64:
3087 case SUPPAGINGMODE_AMD64_GLOBAL:
3088 case SUPPAGINGMODE_AMD64_NX:
3089 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3090 enmShadowMode = PGMMODE_PAE;
3091 break;
3092
3093 default:
3094 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3095 }
3096 break;
3097
3098 case PGMMODE_AMD64:
3099 case PGMMODE_AMD64_NX:
3100 switch (enmHostMode)
3101 {
3102 case SUPPAGINGMODE_32_BIT:
3103 case SUPPAGINGMODE_32_BIT_GLOBAL:
3104 enmShadowMode = PGMMODE_AMD64;
3105 break;
3106
3107 case SUPPAGINGMODE_PAE:
3108 case SUPPAGINGMODE_PAE_NX:
3109 case SUPPAGINGMODE_PAE_GLOBAL:
3110 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3111 enmShadowMode = PGMMODE_AMD64;
3112 break;
3113
3114 case SUPPAGINGMODE_AMD64:
3115 case SUPPAGINGMODE_AMD64_GLOBAL:
3116 case SUPPAGINGMODE_AMD64_NX:
3117 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3118 enmShadowMode = PGMMODE_AMD64;
3119 break;
3120
3121 default:
3122 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3123 }
3124 break;
3125
3126 default:
3127 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3128 }
3129
3130 /*
3131 * Override the shadow mode when NEM or nested paging is active.
3132 */
3133 if (VM_IS_NEM_ENABLED(pVM))
3134 {
3135 pVM->pgm.s.fNestedPaging = true;
3136 enmShadowMode = PGMMODE_NONE;
3137 }
3138 else
3139 {
3140 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3141 pVM->pgm.s.fNestedPaging = fNestedPaging;
3142 if (fNestedPaging)
3143 {
3144 if (HMIsVmxActive(pVM))
3145 enmShadowMode = PGMMODE_EPT;
3146 else
3147 {
3148 /* The nested SVM paging depends on the host one. */
3149 Assert(HMIsSvmActive(pVM));
3150 if ( enmGuestMode == PGMMODE_AMD64
3151 || enmGuestMode == PGMMODE_AMD64_NX)
3152 enmShadowMode = PGMMODE_NESTED_AMD64;
3153 else
3154 switch (pVM->pgm.s.enmHostMode)
3155 {
3156 case SUPPAGINGMODE_32_BIT:
3157 case SUPPAGINGMODE_32_BIT_GLOBAL:
3158 enmShadowMode = PGMMODE_NESTED_32BIT;
3159 break;
3160
3161 case SUPPAGINGMODE_PAE:
3162 case SUPPAGINGMODE_PAE_GLOBAL:
3163 case SUPPAGINGMODE_PAE_NX:
3164 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3165 enmShadowMode = PGMMODE_NESTED_PAE;
3166 break;
3167
3168 case SUPPAGINGMODE_AMD64:
3169 case SUPPAGINGMODE_AMD64_GLOBAL:
3170 case SUPPAGINGMODE_AMD64_NX:
3171 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3172 enmShadowMode = PGMMODE_NESTED_AMD64;
3173 break;
3174
3175 default:
3176 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3177 }
3178 }
3179 }
3180#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3181 else
3182 {
3183 /* Nested paging is a requirement for nested VT-x. */
3184 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3185 }
3186#endif
3187 }
3188
3189 return enmShadowMode;
3190}
3191
3192
3193/**
3194 * Performs the actual mode change.
3195 * This is called by PGMChangeMode and pgmR3InitPaging().
3196 *
3197 * @returns VBox status code. May suspend or power off the VM on error, but this
3198 * will trigger using FFs and not informational status codes.
3199 *
3200 * @param pVM The cross context VM structure.
3201 * @param pVCpu The cross context virtual CPU structure.
3202 * @param enmGuestMode The new guest mode. This is assumed to be different from
3203 * the current mode.
3204 */
3205VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3206{
3207 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3208 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3209
3210 /*
3211 * Calc the shadow mode and switcher.
3212 */
3213 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3214
3215 /*
3216 * Exit old mode(s).
3217 */
3218 /* shadow */
3219 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3220 {
3221 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3222 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3223 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3224 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3225 {
3226 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3227 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3228 }
3229 }
3230 else
3231 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3232
3233 /* guest */
3234 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3235 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3236 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3237 {
3238 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3239 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3240 }
3241 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3242
3243 /*
3244 * Change the paging mode data indexes.
3245 */
3246 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3247 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3248 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3249 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3250 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3251 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3252 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3253#ifdef IN_RING3
3254 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3255#endif
3256
3257 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3258 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3259 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3260 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3261 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3262 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3263 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3264#ifdef IN_RING3
3265 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3266#endif
3267
3268 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3269 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3270 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3271 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3272 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3273 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3274 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3275 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3276 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3277 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3278#ifdef VBOX_STRICT
3279 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3280#endif
3281
3282 /*
3283 * Enter new shadow mode (if changed).
3284 */
3285 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3286 {
3287 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3288 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3289 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3290 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3291 }
3292
3293 /*
3294 * Always flag the necessary updates
3295 */
3296 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3297
3298 /*
3299 * Enter the new guest and shadow+guest modes.
3300 */
3301 /* Calc the new CR3 value. */
3302 RTGCPHYS GCPhysCR3;
3303 switch (enmGuestMode)
3304 {
3305 case PGMMODE_REAL:
3306 case PGMMODE_PROTECTED:
3307 GCPhysCR3 = NIL_RTGCPHYS;
3308 break;
3309
3310 case PGMMODE_32_BIT:
3311 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3312 break;
3313
3314 case PGMMODE_PAE_NX:
3315 case PGMMODE_PAE:
3316 if (!pVM->cpum.ro.GuestFeatures.fPae)
3317#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3318 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3319 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3320#else
3321 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3322
3323#endif
3324 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3325 break;
3326
3327#ifdef VBOX_WITH_64_BITS_GUESTS
3328 case PGMMODE_AMD64_NX:
3329 case PGMMODE_AMD64:
3330 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3331 break;
3332#endif
3333#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3334 case PGMMODE_EPT:
3335 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_EPT_PAGE_MASK;
3336 break;
3337#endif
3338 default:
3339 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3340 }
3341
3342#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3343 /* Update the guest SLAT mode if it's a nested-guest. */
3344 if (CPUMIsGuestVmxEptPagingEnabled(pVCpu))
3345 {
3346 if (PGMMODE_WITH_PAGING(enmGuestMode))
3347 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_EPT;
3348 else
3349 pVCpu->pgm.s.enmGuestSlatMode = PGMSLAT_DIRECT;
3350 }
3351 else
3352 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_DIRECT);
3353#endif
3354
3355 /* Enter the new guest mode. */
3356 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3357 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3358 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3359
3360 /* Set the new guest CR3. */
3361 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3362
3363 /* status codes. */
3364 AssertRC(rc);
3365 AssertRC(rc2);
3366 if (RT_SUCCESS(rc))
3367 {
3368 rc = rc2;
3369 if (RT_SUCCESS(rc)) /* no informational status codes. */
3370 rc = VINF_SUCCESS;
3371 }
3372
3373 /*
3374 * Notify HM.
3375 */
3376 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3377 return rc;
3378}
3379
3380
3381/**
3382 * Called by CPUM or REM when CR0.WP changes to 1.
3383 *
3384 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3385 * @thread EMT
3386 */
3387VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3388{
3389 /*
3390 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3391 *
3392 * Use the counter to judge whether there might be pool pages with active
3393 * hacks in them. If there are, we will be running the risk of messing up
3394 * the guest by allowing it to write to read-only pages. Thus, we have to
3395 * clear the page pool ASAP if there is the slightest chance.
3396 */
3397 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3398 {
3399 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3400
3401 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3402 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3403 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3404 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3405 }
3406}
3407
3408
3409/**
3410 * Gets the current guest paging mode.
3411 *
3412 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3413 *
3414 * @returns The current paging mode.
3415 * @param pVCpu The cross context virtual CPU structure.
3416 */
3417VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3418{
3419 return pVCpu->pgm.s.enmGuestMode;
3420}
3421
3422
3423/**
3424 * Gets the current shadow paging mode.
3425 *
3426 * @returns The current paging mode.
3427 * @param pVCpu The cross context virtual CPU structure.
3428 */
3429VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3430{
3431 return pVCpu->pgm.s.enmShadowMode;
3432}
3433
3434
3435/**
3436 * Gets the current host paging mode.
3437 *
3438 * @returns The current paging mode.
3439 * @param pVM The cross context VM structure.
3440 */
3441VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3442{
3443 switch (pVM->pgm.s.enmHostMode)
3444 {
3445 case SUPPAGINGMODE_32_BIT:
3446 case SUPPAGINGMODE_32_BIT_GLOBAL:
3447 return PGMMODE_32_BIT;
3448
3449 case SUPPAGINGMODE_PAE:
3450 case SUPPAGINGMODE_PAE_GLOBAL:
3451 return PGMMODE_PAE;
3452
3453 case SUPPAGINGMODE_PAE_NX:
3454 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3455 return PGMMODE_PAE_NX;
3456
3457 case SUPPAGINGMODE_AMD64:
3458 case SUPPAGINGMODE_AMD64_GLOBAL:
3459 return PGMMODE_AMD64;
3460
3461 case SUPPAGINGMODE_AMD64_NX:
3462 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3463 return PGMMODE_AMD64_NX;
3464
3465 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3466 }
3467
3468 return PGMMODE_INVALID;
3469}
3470
3471
3472/**
3473 * Get mode name.
3474 *
3475 * @returns read-only name string.
3476 * @param enmMode The mode which name is desired.
3477 */
3478VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3479{
3480 switch (enmMode)
3481 {
3482 case PGMMODE_REAL: return "Real";
3483 case PGMMODE_PROTECTED: return "Protected";
3484 case PGMMODE_32_BIT: return "32-bit";
3485 case PGMMODE_PAE: return "PAE";
3486 case PGMMODE_PAE_NX: return "PAE+NX";
3487 case PGMMODE_AMD64: return "AMD64";
3488 case PGMMODE_AMD64_NX: return "AMD64+NX";
3489 case PGMMODE_NESTED_32BIT: return "Nested-32";
3490 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3491 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3492 case PGMMODE_EPT: return "EPT";
3493 case PGMMODE_NONE: return "None";
3494 default: return "unknown mode value";
3495 }
3496}
3497
3498
3499#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3500/**
3501 * Gets the SLAT mode name.
3502 *
3503 * @returns The read-only SLAT mode descriptive string.
3504 * @param enmSlatMode The SLAT mode value.
3505 */
3506VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3507{
3508 switch (enmSlatMode)
3509 {
3510 case PGMSLAT_DIRECT: return "Direct";
3511 case PGMSLAT_EPT: return "EPT";
3512 case PGMSLAT_32BIT: return "32-bit";
3513 case PGMSLAT_PAE: return "PAE";
3514 case PGMSLAT_AMD64: return "AMD64";
3515 default: return "Unknown";
3516 }
3517}
3518#endif
3519
3520
3521/**
3522 * Gets the physical address represented in the guest CR3 as PGM sees it.
3523 *
3524 * This is mainly for logging and debugging.
3525 *
3526 * @returns PGM's guest CR3 value.
3527 * @param pVCpu The cross context virtual CPU structure.
3528 */
3529VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3530{
3531 return pVCpu->pgm.s.GCPhysCR3;
3532}
3533
3534
3535
3536/**
3537 * Notification from CPUM that the EFER.NXE bit has changed.
3538 *
3539 * @param pVCpu The cross context virtual CPU structure of the CPU for
3540 * which EFER changed.
3541 * @param fNxe The new NXE state.
3542 */
3543VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3544{
3545/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3546 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3547
3548 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3549 if (fNxe)
3550 {
3551 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3552 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3553 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3554 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3555 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3556 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3557 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3558 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3559 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3560 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3561 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3562
3563 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3564 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3565 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3566 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3567 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3568 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3569 }
3570 else
3571 {
3572 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3573 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3574 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3575 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3576 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3577 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3578 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3579 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3580 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3581 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3582 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3583
3584 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3585 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3586 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3587 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3588 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3589 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3590 }
3591}
3592
3593
3594/**
3595 * Check if any pgm pool pages are marked dirty (not monitored)
3596 *
3597 * @returns bool locked/not locked
3598 * @param pVM The cross context VM structure.
3599 */
3600VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3601{
3602 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3603}
3604
3605
3606/**
3607 * Check if this VCPU currently owns the PGM lock.
3608 *
3609 * @returns bool owner/not owner
3610 * @param pVM The cross context VM structure.
3611 */
3612VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3613{
3614 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3615}
3616
3617
3618/**
3619 * Enable or disable large page usage
3620 *
3621 * @returns VBox status code.
3622 * @param pVM The cross context VM structure.
3623 * @param fUseLargePages Use/not use large pages
3624 */
3625VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3626{
3627 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3628
3629 pVM->pgm.s.fUseLargePages = fUseLargePages;
3630 return VINF_SUCCESS;
3631}
3632
3633
3634/**
3635 * Acquire the PGM lock.
3636 *
3637 * @returns VBox status code
3638 * @param pVM The cross context VM structure.
3639 * @param fVoid Set if the caller cannot handle failure returns.
3640 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3641 */
3642#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3643int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3644#else
3645int pgmLock(PVMCC pVM, bool fVoid)
3646#endif
3647{
3648#if defined(VBOX_STRICT)
3649 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3650#else
3651 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3652#endif
3653 if (RT_SUCCESS(rc))
3654 return rc;
3655 if (fVoid)
3656 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3657 else
3658 AssertRC(rc);
3659 return rc;
3660}
3661
3662
3663/**
3664 * Release the PGM lock.
3665 *
3666 * @returns VBox status code
3667 * @param pVM The cross context VM structure.
3668 */
3669void pgmUnlock(PVMCC pVM)
3670{
3671 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3672 pVM->pgm.s.cDeprecatedPageLocks = 0;
3673 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3674 if (rc == VINF_SEM_NESTED)
3675 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3676}
3677
3678
3679#if !defined(IN_R0) || defined(LOG_ENABLED)
3680
3681/** Format handler for PGMPAGE.
3682 * @copydoc FNRTSTRFORMATTYPE */
3683static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3684 const char *pszType, void const *pvValue,
3685 int cchWidth, int cchPrecision, unsigned fFlags,
3686 void *pvUser)
3687{
3688 size_t cch;
3689 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3690 if (RT_VALID_PTR(pPage))
3691 {
3692 char szTmp[64+80];
3693
3694 cch = 0;
3695
3696 /* The single char state stuff. */
3697 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3698 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3699
3700# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3701 if (IS_PART_INCLUDED(5))
3702 {
3703 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3704 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3705 }
3706
3707 /* The type. */
3708 if (IS_PART_INCLUDED(4))
3709 {
3710 szTmp[cch++] = ':';
3711 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3712 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3713 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3714 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3715 }
3716
3717 /* The numbers. */
3718 if (IS_PART_INCLUDED(3))
3719 {
3720 szTmp[cch++] = ':';
3721 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3722 }
3723
3724 if (IS_PART_INCLUDED(2))
3725 {
3726 szTmp[cch++] = ':';
3727 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3728 }
3729
3730 if (IS_PART_INCLUDED(6))
3731 {
3732 szTmp[cch++] = ':';
3733 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3734 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3735 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3736 }
3737# undef IS_PART_INCLUDED
3738
3739 cch = pfnOutput(pvArgOutput, szTmp, cch);
3740 }
3741 else
3742 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3743 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3744 return cch;
3745}
3746
3747
3748/** Format handler for PGMRAMRANGE.
3749 * @copydoc FNRTSTRFORMATTYPE */
3750static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3751 const char *pszType, void const *pvValue,
3752 int cchWidth, int cchPrecision, unsigned fFlags,
3753 void *pvUser)
3754{
3755 size_t cch;
3756 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3757 if (RT_VALID_PTR(pRam))
3758 {
3759 char szTmp[80];
3760 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3761 cch = pfnOutput(pvArgOutput, szTmp, cch);
3762 }
3763 else
3764 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3765 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3766 return cch;
3767}
3768
3769/** Format type andlers to be registered/deregistered. */
3770static const struct
3771{
3772 char szType[24];
3773 PFNRTSTRFORMATTYPE pfnHandler;
3774} g_aPgmFormatTypes[] =
3775{
3776 { "pgmpage", pgmFormatTypeHandlerPage },
3777 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3778};
3779
3780#endif /* !IN_R0 || LOG_ENABLED */
3781
3782/**
3783 * Registers the global string format types.
3784 *
3785 * This should be called at module load time or in some other manner that ensure
3786 * that it's called exactly one time.
3787 *
3788 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3789 */
3790VMMDECL(int) PGMRegisterStringFormatTypes(void)
3791{
3792#if !defined(IN_R0) || defined(LOG_ENABLED)
3793 int rc = VINF_SUCCESS;
3794 unsigned i;
3795 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3796 {
3797 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3798# ifdef IN_RING0
3799 if (rc == VERR_ALREADY_EXISTS)
3800 {
3801 /* in case of cleanup failure in ring-0 */
3802 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3803 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3804 }
3805# endif
3806 }
3807 if (RT_FAILURE(rc))
3808 while (i-- > 0)
3809 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3810
3811 return rc;
3812#else
3813 return VINF_SUCCESS;
3814#endif
3815}
3816
3817
3818/**
3819 * Deregisters the global string format types.
3820 *
3821 * This should be called at module unload time or in some other manner that
3822 * ensure that it's called exactly one time.
3823 */
3824VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3825{
3826#if !defined(IN_R0) || defined(LOG_ENABLED)
3827 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3828 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3829#endif
3830}
3831
3832
3833#ifdef VBOX_STRICT
3834/**
3835 * Asserts that everything related to the guest CR3 is correctly shadowed.
3836 *
3837 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3838 * and assert the correctness of the guest CR3 mapping before asserting that the
3839 * shadow page tables is in sync with the guest page tables.
3840 *
3841 * @returns Number of conflicts.
3842 * @param pVM The cross context VM structure.
3843 * @param pVCpu The cross context virtual CPU structure.
3844 * @param cr3 The current guest CR3 register value.
3845 * @param cr4 The current guest CR4 register value.
3846 */
3847VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3848{
3849 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3850
3851 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3852 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3853 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3854
3855 PGM_LOCK_VOID(pVM);
3856 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3857 PGM_UNLOCK(pVM);
3858
3859 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3860 return cErrors;
3861}
3862#endif /* VBOX_STRICT */
3863
3864
3865/**
3866 * Updates PGM's copy of the guest's EPT pointer.
3867 *
3868 * @param pVCpu The cross context virtual CPU structure.
3869 * @param uEptPtr The EPT pointer.
3870 *
3871 * @remarks This can be called as part of VM-entry so we might be in the midst of
3872 * switching to VMX non-root mode.
3873 */
3874VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3875{
3876 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3877 PGM_LOCK_VOID(pVM);
3878 if (pVCpu->pgm.s.uEptPtr != uEptPtr)
3879 {
3880 pVCpu->pgm.s.uEptPtr = uEptPtr;
3881 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
3882 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
3883 }
3884 PGM_UNLOCK(pVM);
3885}
3886
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette