VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 91634

Last change on this file since 91634 was 91580, checked in by vboxsync, 4 years ago

VMM: Nested VMX: bugref:10092 Made changes to PGM++ to handle invalid PAE PDPEs being loaded.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 136.3 KB
Line 
1/* $Id: PGMAll.cpp 91580 2021-10-06 07:22:04Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/sup.h>
30#include <VBox/vmm/mm.h>
31#include <VBox/vmm/stam.h>
32#include <VBox/vmm/trpm.h>
33#include <VBox/vmm/em.h>
34#include <VBox/vmm/hm.h>
35#include <VBox/vmm/hm_vmx.h>
36#include "PGMInternal.h"
37#include <VBox/vmm/vmcc.h>
38#include "PGMInline.h"
39#include <iprt/assert.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45
46
47/*********************************************************************************************************************************
48* Internal Functions *
49*********************************************************************************************************************************/
50DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
51DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
52static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
53static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
54
55
56/*
57 * Shadow - 32-bit mode
58 */
59#define PGM_SHW_TYPE PGM_TYPE_32BIT
60#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
61#include "PGMAllShw.h"
62
63/* Guest - real mode */
64#define PGM_GST_TYPE PGM_TYPE_REAL
65#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
66#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
67#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
68#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
69#include "PGMGstDefs.h"
70#include "PGMAllGst.h"
71#include "PGMAllBth.h"
72#undef BTH_PGMPOOLKIND_PT_FOR_PT
73#undef BTH_PGMPOOLKIND_ROOT
74#undef PGM_BTH_NAME
75#undef PGM_GST_TYPE
76#undef PGM_GST_NAME
77
78/* Guest - protected mode */
79#define PGM_GST_TYPE PGM_TYPE_PROT
80#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
84#include "PGMGstDefs.h"
85#include "PGMAllGst.h"
86#include "PGMAllBth.h"
87#undef BTH_PGMPOOLKIND_PT_FOR_PT
88#undef BTH_PGMPOOLKIND_ROOT
89#undef PGM_BTH_NAME
90#undef PGM_GST_TYPE
91#undef PGM_GST_NAME
92
93/* Guest - 32-bit mode */
94#define PGM_GST_TYPE PGM_TYPE_32BIT
95#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
96#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
97#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
98#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
99#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
100#include "PGMGstDefs.h"
101#include "PGMAllGst.h"
102#include "PGMAllBth.h"
103#undef BTH_PGMPOOLKIND_PT_FOR_BIG
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef BTH_PGMPOOLKIND_ROOT
106#undef PGM_BTH_NAME
107#undef PGM_GST_TYPE
108#undef PGM_GST_NAME
109
110#undef PGM_SHW_TYPE
111#undef PGM_SHW_NAME
112
113
114/*
115 * Shadow - PAE mode
116 */
117#define PGM_SHW_TYPE PGM_TYPE_PAE
118#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
119#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
120#include "PGMAllShw.h"
121
122/* Guest - real mode */
123#define PGM_GST_TYPE PGM_TYPE_REAL
124#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
125#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
126#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
127#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
128#include "PGMGstDefs.h"
129#include "PGMAllBth.h"
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136/* Guest - protected mode */
137#define PGM_GST_TYPE PGM_TYPE_PROT
138#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
140#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
141#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
142#include "PGMGstDefs.h"
143#include "PGMAllBth.h"
144#undef BTH_PGMPOOLKIND_PT_FOR_PT
145#undef BTH_PGMPOOLKIND_ROOT
146#undef PGM_BTH_NAME
147#undef PGM_GST_TYPE
148#undef PGM_GST_NAME
149
150/* Guest - 32-bit mode */
151#define PGM_GST_TYPE PGM_TYPE_32BIT
152#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
153#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
154#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
155#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
156#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
157#include "PGMGstDefs.h"
158#include "PGMAllBth.h"
159#undef BTH_PGMPOOLKIND_PT_FOR_BIG
160#undef BTH_PGMPOOLKIND_PT_FOR_PT
161#undef BTH_PGMPOOLKIND_ROOT
162#undef PGM_BTH_NAME
163#undef PGM_GST_TYPE
164#undef PGM_GST_NAME
165
166
167/* Guest - PAE mode */
168#define PGM_GST_TYPE PGM_TYPE_PAE
169#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
170#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
171#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
172#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
173#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
174#include "PGMGstDefs.h"
175#include "PGMAllGst.h"
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_BIG
178#undef BTH_PGMPOOLKIND_PT_FOR_PT
179#undef BTH_PGMPOOLKIND_ROOT
180#undef PGM_BTH_NAME
181#undef PGM_GST_TYPE
182#undef PGM_GST_NAME
183
184#undef PGM_SHW_TYPE
185#undef PGM_SHW_NAME
186
187
188/*
189 * Shadow - AMD64 mode
190 */
191#define PGM_SHW_TYPE PGM_TYPE_AMD64
192#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
193#include "PGMAllShw.h"
194
195/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
196/** @todo retire this hack. */
197#define PGM_GST_TYPE PGM_TYPE_PROT
198#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
199#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
200#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
201#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
202#include "PGMGstDefs.h"
203#include "PGMAllBth.h"
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#ifdef VBOX_WITH_64_BITS_GUESTS
211/* Guest - AMD64 mode */
212# define PGM_GST_TYPE PGM_TYPE_AMD64
213# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
214# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
215# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
216# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
217# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
218# include "PGMGstDefs.h"
219# include "PGMAllGst.h"
220# include "PGMAllBth.h"
221# undef BTH_PGMPOOLKIND_PT_FOR_BIG
222# undef BTH_PGMPOOLKIND_PT_FOR_PT
223# undef BTH_PGMPOOLKIND_ROOT
224# undef PGM_BTH_NAME
225# undef PGM_GST_TYPE
226# undef PGM_GST_NAME
227#endif /* VBOX_WITH_64_BITS_GUESTS */
228
229#undef PGM_SHW_TYPE
230#undef PGM_SHW_NAME
231
232
233/*
234 * Shadow - 32-bit nested paging mode.
235 */
236#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
237#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
238#include "PGMAllShw.h"
239
240/* Guest - real mode */
241#define PGM_GST_TYPE PGM_TYPE_REAL
242#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
243#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
244#include "PGMGstDefs.h"
245#include "PGMAllBth.h"
246#undef PGM_BTH_NAME
247#undef PGM_GST_TYPE
248#undef PGM_GST_NAME
249
250/* Guest - protected mode */
251#define PGM_GST_TYPE PGM_TYPE_PROT
252#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
253#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
254#include "PGMGstDefs.h"
255#include "PGMAllBth.h"
256#undef PGM_BTH_NAME
257#undef PGM_GST_TYPE
258#undef PGM_GST_NAME
259
260/* Guest - 32-bit mode */
261#define PGM_GST_TYPE PGM_TYPE_32BIT
262#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
263#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
264#include "PGMGstDefs.h"
265#include "PGMAllBth.h"
266#undef PGM_BTH_NAME
267#undef PGM_GST_TYPE
268#undef PGM_GST_NAME
269
270/* Guest - PAE mode */
271#define PGM_GST_TYPE PGM_TYPE_PAE
272#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
273#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
274#include "PGMGstDefs.h"
275#include "PGMAllBth.h"
276#undef PGM_BTH_NAME
277#undef PGM_GST_TYPE
278#undef PGM_GST_NAME
279
280#ifdef VBOX_WITH_64_BITS_GUESTS
281/* Guest - AMD64 mode */
282# define PGM_GST_TYPE PGM_TYPE_AMD64
283# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
284# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
285# include "PGMGstDefs.h"
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290#endif /* VBOX_WITH_64_BITS_GUESTS */
291
292#undef PGM_SHW_TYPE
293#undef PGM_SHW_NAME
294
295
296/*
297 * Shadow - PAE nested paging mode.
298 */
299#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
300#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
301#include "PGMAllShw.h"
302
303/* Guest - real mode */
304#define PGM_GST_TYPE PGM_TYPE_REAL
305#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
307#include "PGMGstDefs.h"
308#include "PGMAllBth.h"
309#undef PGM_BTH_NAME
310#undef PGM_GST_TYPE
311#undef PGM_GST_NAME
312
313/* Guest - protected mode */
314#define PGM_GST_TYPE PGM_TYPE_PROT
315#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
316#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
317#include "PGMGstDefs.h"
318#include "PGMAllBth.h"
319#undef PGM_BTH_NAME
320#undef PGM_GST_TYPE
321#undef PGM_GST_NAME
322
323/* Guest - 32-bit mode */
324#define PGM_GST_TYPE PGM_TYPE_32BIT
325#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
326#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
327#include "PGMGstDefs.h"
328#include "PGMAllBth.h"
329#undef PGM_BTH_NAME
330#undef PGM_GST_TYPE
331#undef PGM_GST_NAME
332
333/* Guest - PAE mode */
334#define PGM_GST_TYPE PGM_TYPE_PAE
335#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
336#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
337#include "PGMGstDefs.h"
338#include "PGMAllBth.h"
339#undef PGM_BTH_NAME
340#undef PGM_GST_TYPE
341#undef PGM_GST_NAME
342
343#ifdef VBOX_WITH_64_BITS_GUESTS
344/* Guest - AMD64 mode */
345# define PGM_GST_TYPE PGM_TYPE_AMD64
346# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
347# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
348# include "PGMGstDefs.h"
349# include "PGMAllBth.h"
350# undef PGM_BTH_NAME
351# undef PGM_GST_TYPE
352# undef PGM_GST_NAME
353#endif /* VBOX_WITH_64_BITS_GUESTS */
354
355#undef PGM_SHW_TYPE
356#undef PGM_SHW_NAME
357
358
359/*
360 * Shadow - AMD64 nested paging mode.
361 */
362#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
363#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
364#include "PGMAllShw.h"
365
366/* Guest - real mode */
367#define PGM_GST_TYPE PGM_TYPE_REAL
368#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
369#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
370#include "PGMGstDefs.h"
371#include "PGMAllBth.h"
372#undef PGM_BTH_NAME
373#undef PGM_GST_TYPE
374#undef PGM_GST_NAME
375
376/* Guest - protected mode */
377#define PGM_GST_TYPE PGM_TYPE_PROT
378#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
379#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
380#include "PGMGstDefs.h"
381#include "PGMAllBth.h"
382#undef PGM_BTH_NAME
383#undef PGM_GST_TYPE
384#undef PGM_GST_NAME
385
386/* Guest - 32-bit mode */
387#define PGM_GST_TYPE PGM_TYPE_32BIT
388#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
389#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
390#include "PGMGstDefs.h"
391#include "PGMAllBth.h"
392#undef PGM_BTH_NAME
393#undef PGM_GST_TYPE
394#undef PGM_GST_NAME
395
396/* Guest - PAE mode */
397#define PGM_GST_TYPE PGM_TYPE_PAE
398#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
399#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
400#include "PGMGstDefs.h"
401#include "PGMAllBth.h"
402#undef PGM_BTH_NAME
403#undef PGM_GST_TYPE
404#undef PGM_GST_NAME
405
406#ifdef VBOX_WITH_64_BITS_GUESTS
407/* Guest - AMD64 mode */
408# define PGM_GST_TYPE PGM_TYPE_AMD64
409# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
410# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
411# include "PGMGstDefs.h"
412# include "PGMAllBth.h"
413# undef PGM_BTH_NAME
414# undef PGM_GST_TYPE
415# undef PGM_GST_NAME
416#endif /* VBOX_WITH_64_BITS_GUESTS */
417
418#undef PGM_SHW_TYPE
419#undef PGM_SHW_NAME
420
421
422/*
423 * Shadow - EPT.
424 */
425#define PGM_SHW_TYPE PGM_TYPE_EPT
426#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
427#include "PGMAllShw.h"
428
429/* Guest - real mode */
430#define PGM_GST_TYPE PGM_TYPE_REAL
431#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
432#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
433#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
434#include "PGMGstDefs.h"
435#include "PGMAllBth.h"
436#undef BTH_PGMPOOLKIND_PT_FOR_PT
437#undef PGM_BTH_NAME
438#undef PGM_GST_TYPE
439#undef PGM_GST_NAME
440
441/* Guest - protected mode */
442#define PGM_GST_TYPE PGM_TYPE_PROT
443#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
444#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
445#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
446#include "PGMGstDefs.h"
447#include "PGMAllBth.h"
448#undef BTH_PGMPOOLKIND_PT_FOR_PT
449#undef PGM_BTH_NAME
450#undef PGM_GST_TYPE
451#undef PGM_GST_NAME
452
453/* Guest - 32-bit mode */
454#define PGM_GST_TYPE PGM_TYPE_32BIT
455#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
456#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
457#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
458#include "PGMGstDefs.h"
459#include "PGMAllBth.h"
460#undef BTH_PGMPOOLKIND_PT_FOR_PT
461#undef PGM_BTH_NAME
462#undef PGM_GST_TYPE
463#undef PGM_GST_NAME
464
465/* Guest - PAE mode */
466#define PGM_GST_TYPE PGM_TYPE_PAE
467#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
468#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
469#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
470#include "PGMGstDefs.h"
471#include "PGMAllBth.h"
472#undef BTH_PGMPOOLKIND_PT_FOR_PT
473#undef PGM_BTH_NAME
474#undef PGM_GST_TYPE
475#undef PGM_GST_NAME
476
477#ifdef VBOX_WITH_64_BITS_GUESTS
478/* Guest - AMD64 mode */
479# define PGM_GST_TYPE PGM_TYPE_AMD64
480# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
481# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
482# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
483# include "PGMGstDefs.h"
484# include "PGMAllBth.h"
485# undef BTH_PGMPOOLKIND_PT_FOR_PT
486# undef PGM_BTH_NAME
487# undef PGM_GST_TYPE
488# undef PGM_GST_NAME
489#endif /* VBOX_WITH_64_BITS_GUESTS */
490
491#undef PGM_SHW_TYPE
492#undef PGM_SHW_NAME
493
494
495/*
496 * Shadow - NEM / None.
497 */
498#define PGM_SHW_TYPE PGM_TYPE_NONE
499#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
500#include "PGMAllShw.h"
501
502/* Guest - real mode */
503#define PGM_GST_TYPE PGM_TYPE_REAL
504#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
505#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
506#include "PGMGstDefs.h"
507#include "PGMAllBth.h"
508#undef PGM_BTH_NAME
509#undef PGM_GST_TYPE
510#undef PGM_GST_NAME
511
512/* Guest - protected mode */
513#define PGM_GST_TYPE PGM_TYPE_PROT
514#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
515#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
516#include "PGMGstDefs.h"
517#include "PGMAllBth.h"
518#undef PGM_BTH_NAME
519#undef PGM_GST_TYPE
520#undef PGM_GST_NAME
521
522/* Guest - 32-bit mode */
523#define PGM_GST_TYPE PGM_TYPE_32BIT
524#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
525#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
526#include "PGMGstDefs.h"
527#include "PGMAllBth.h"
528#undef PGM_BTH_NAME
529#undef PGM_GST_TYPE
530#undef PGM_GST_NAME
531
532/* Guest - PAE mode */
533#define PGM_GST_TYPE PGM_TYPE_PAE
534#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
535#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
536#include "PGMGstDefs.h"
537#include "PGMAllBth.h"
538#undef PGM_BTH_NAME
539#undef PGM_GST_TYPE
540#undef PGM_GST_NAME
541
542#ifdef VBOX_WITH_64_BITS_GUESTS
543/* Guest - AMD64 mode */
544# define PGM_GST_TYPE PGM_TYPE_AMD64
545# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
546# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
547# include "PGMGstDefs.h"
548# include "PGMAllBth.h"
549# undef PGM_BTH_NAME
550# undef PGM_GST_TYPE
551# undef PGM_GST_NAME
552#endif /* VBOX_WITH_64_BITS_GUESTS */
553
554#undef PGM_SHW_TYPE
555#undef PGM_SHW_NAME
556
557
558
559/**
560 * Guest mode data array.
561 */
562PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
563{
564 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL }, /* 0 */
565 {
566 PGM_TYPE_REAL,
567 PGM_GST_NAME_REAL(GetPage),
568 PGM_GST_NAME_REAL(ModifyPage),
569 PGM_GST_NAME_REAL(GetPDE),
570 PGM_GST_NAME_REAL(Enter),
571 PGM_GST_NAME_REAL(Exit),
572#ifdef IN_RING3
573 PGM_GST_NAME_REAL(Relocate),
574#endif
575 },
576 {
577 PGM_TYPE_PROT,
578 PGM_GST_NAME_PROT(GetPage),
579 PGM_GST_NAME_PROT(ModifyPage),
580 PGM_GST_NAME_PROT(GetPDE),
581 PGM_GST_NAME_PROT(Enter),
582 PGM_GST_NAME_PROT(Exit),
583#ifdef IN_RING3
584 PGM_GST_NAME_PROT(Relocate),
585#endif
586 },
587 {
588 PGM_TYPE_32BIT,
589 PGM_GST_NAME_32BIT(GetPage),
590 PGM_GST_NAME_32BIT(ModifyPage),
591 PGM_GST_NAME_32BIT(GetPDE),
592 PGM_GST_NAME_32BIT(Enter),
593 PGM_GST_NAME_32BIT(Exit),
594#ifdef IN_RING3
595 PGM_GST_NAME_32BIT(Relocate),
596#endif
597 },
598 {
599 PGM_TYPE_PAE,
600 PGM_GST_NAME_PAE(GetPage),
601 PGM_GST_NAME_PAE(ModifyPage),
602 PGM_GST_NAME_PAE(GetPDE),
603 PGM_GST_NAME_PAE(Enter),
604 PGM_GST_NAME_PAE(Exit),
605#ifdef IN_RING3
606 PGM_GST_NAME_PAE(Relocate),
607#endif
608 },
609#ifdef VBOX_WITH_64_BITS_GUESTS
610 {
611 PGM_TYPE_AMD64,
612 PGM_GST_NAME_AMD64(GetPage),
613 PGM_GST_NAME_AMD64(ModifyPage),
614 PGM_GST_NAME_AMD64(GetPDE),
615 PGM_GST_NAME_AMD64(Enter),
616 PGM_GST_NAME_AMD64(Exit),
617# ifdef IN_RING3
618 PGM_GST_NAME_AMD64(Relocate),
619# endif
620 },
621#endif
622};
623
624
625/**
626 * The shadow mode data array.
627 */
628PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
629{
630 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
631 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
632 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
633 {
634 PGM_TYPE_32BIT,
635 PGM_SHW_NAME_32BIT(GetPage),
636 PGM_SHW_NAME_32BIT(ModifyPage),
637 PGM_SHW_NAME_32BIT(Enter),
638 PGM_SHW_NAME_32BIT(Exit),
639#ifdef IN_RING3
640 PGM_SHW_NAME_32BIT(Relocate),
641#endif
642 },
643 {
644 PGM_TYPE_PAE,
645 PGM_SHW_NAME_PAE(GetPage),
646 PGM_SHW_NAME_PAE(ModifyPage),
647 PGM_SHW_NAME_PAE(Enter),
648 PGM_SHW_NAME_PAE(Exit),
649#ifdef IN_RING3
650 PGM_SHW_NAME_PAE(Relocate),
651#endif
652 },
653 {
654 PGM_TYPE_AMD64,
655 PGM_SHW_NAME_AMD64(GetPage),
656 PGM_SHW_NAME_AMD64(ModifyPage),
657 PGM_SHW_NAME_AMD64(Enter),
658 PGM_SHW_NAME_AMD64(Exit),
659#ifdef IN_RING3
660 PGM_SHW_NAME_AMD64(Relocate),
661#endif
662 },
663 {
664 PGM_TYPE_NESTED_32BIT,
665 PGM_SHW_NAME_NESTED_32BIT(GetPage),
666 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
667 PGM_SHW_NAME_NESTED_32BIT(Enter),
668 PGM_SHW_NAME_NESTED_32BIT(Exit),
669#ifdef IN_RING3
670 PGM_SHW_NAME_NESTED_32BIT(Relocate),
671#endif
672 },
673 {
674 PGM_TYPE_NESTED_PAE,
675 PGM_SHW_NAME_NESTED_PAE(GetPage),
676 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
677 PGM_SHW_NAME_NESTED_PAE(Enter),
678 PGM_SHW_NAME_NESTED_PAE(Exit),
679#ifdef IN_RING3
680 PGM_SHW_NAME_NESTED_PAE(Relocate),
681#endif
682 },
683 {
684 PGM_TYPE_NESTED_AMD64,
685 PGM_SHW_NAME_NESTED_AMD64(GetPage),
686 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
687 PGM_SHW_NAME_NESTED_AMD64(Enter),
688 PGM_SHW_NAME_NESTED_AMD64(Exit),
689#ifdef IN_RING3
690 PGM_SHW_NAME_NESTED_AMD64(Relocate),
691#endif
692 },
693 {
694 PGM_TYPE_EPT,
695 PGM_SHW_NAME_EPT(GetPage),
696 PGM_SHW_NAME_EPT(ModifyPage),
697 PGM_SHW_NAME_EPT(Enter),
698 PGM_SHW_NAME_EPT(Exit),
699#ifdef IN_RING3
700 PGM_SHW_NAME_EPT(Relocate),
701#endif
702 },
703 {
704 PGM_TYPE_NONE,
705 PGM_SHW_NAME_NONE(GetPage),
706 PGM_SHW_NAME_NONE(ModifyPage),
707 PGM_SHW_NAME_NONE(Enter),
708 PGM_SHW_NAME_NONE(Exit),
709#ifdef IN_RING3
710 PGM_SHW_NAME_NONE(Relocate),
711#endif
712 },
713};
714
715
716/**
717 * The guest+shadow mode data array.
718 */
719PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
720{
721#if !defined(IN_RING3) && !defined(VBOX_STRICT)
722# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
723# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
724 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler) }
725
726#elif !defined(IN_RING3) && defined(VBOX_STRICT)
727# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
728# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
729 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(AssertCR3) }
730
731#elif defined(IN_RING3) && !defined(VBOX_STRICT)
732# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
733# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
734 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
735
736#elif defined(IN_RING3) && defined(VBOX_STRICT)
737# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
738# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
739 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
740
741#else
742# error "Misconfig."
743#endif
744
745 /* 32-bit shadow paging mode: */
746 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
747 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
748 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
749 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
750 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
751 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
752 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
753 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
754 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
755 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
756 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
757
758 /* PAE shadow paging mode: */
759 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
760 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
761 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
762 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
763 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
764 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
765 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
766 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
767 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
768 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
769 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
770
771 /* AMD64 shadow paging mode: */
772 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
773 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
774 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
775 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
776 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
777#ifdef VBOX_WITH_64_BITS_GUESTS
778 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
779#else
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
781#endif
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
785 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
786 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
787
788 /* 32-bit nested paging mode: */
789 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
793 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
794#ifdef VBOX_WITH_64_BITS_GUESTS
795 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
796#else
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
798#endif
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
801 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
802 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
803 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
804
805 /* PAE nested paging mode: */
806 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
808 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
810 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
811#ifdef VBOX_WITH_64_BITS_GUESTS
812 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
813#else
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
815#endif
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
817 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
818 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
819 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
820 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
821
822 /* AMD64 nested paging mode: */
823 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
825 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
826 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
827 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
828#ifdef VBOX_WITH_64_BITS_GUESTS
829 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
830#else
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
832#endif
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
834 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
835 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
836 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
837 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
838
839 /* EPT nested paging mode: */
840 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
842 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
843 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
844 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
845#ifdef VBOX_WITH_64_BITS_GUESTS
846 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
847#else
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
849#endif
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
851 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
852 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
853 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
854 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
855
856 /* NONE / NEM: */
857 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
859 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
860 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
861 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
862#ifdef VBOX_WITH_64_BITS_GUESTS
863 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
864#else
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
866#endif
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
868 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
869 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
870 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
871 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
872
873
874#undef PGMMODEDATABTH_ENTRY
875#undef PGMMODEDATABTH_NULL_ENTRY
876};
877
878
879#ifdef IN_RING0
880/**
881 * #PF Handler.
882 *
883 * @returns VBox status code (appropriate for trap handling and GC return).
884 * @param pVCpu The cross context virtual CPU structure.
885 * @param uErr The trap error code.
886 * @param pRegFrame Trap register frame.
887 * @param pvFault The fault address.
888 */
889VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
890{
891 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
892
893 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
894 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
895 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
896
897
898# ifdef VBOX_WITH_STATISTICS
899 /*
900 * Error code stats.
901 */
902 if (uErr & X86_TRAP_PF_US)
903 {
904 if (!(uErr & X86_TRAP_PF_P))
905 {
906 if (uErr & X86_TRAP_PF_RW)
907 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
908 else
909 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
910 }
911 else if (uErr & X86_TRAP_PF_RW)
912 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
913 else if (uErr & X86_TRAP_PF_RSVD)
914 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
915 else if (uErr & X86_TRAP_PF_ID)
916 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
917 else
918 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
919 }
920 else
921 { /* Supervisor */
922 if (!(uErr & X86_TRAP_PF_P))
923 {
924 if (uErr & X86_TRAP_PF_RW)
925 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
926 else
927 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
928 }
929 else if (uErr & X86_TRAP_PF_RW)
930 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
931 else if (uErr & X86_TRAP_PF_ID)
932 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
933 else if (uErr & X86_TRAP_PF_RSVD)
934 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
935 }
936# endif /* VBOX_WITH_STATISTICS */
937
938 /*
939 * Call the worker.
940 */
941 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
942 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
943 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
944 bool fLockTaken = false;
945 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
946 if (fLockTaken)
947 {
948 PGM_LOCK_ASSERT_OWNER(pVM);
949 PGM_UNLOCK(pVM);
950 }
951 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
952
953 /*
954 * Return code tweaks.
955 */
956 if (rc != VINF_SUCCESS)
957 {
958 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
959 rc = VINF_SUCCESS;
960
961 /* Note: hack alert for difficult to reproduce problem. */
962 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
963 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
964 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
965 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
966 {
967 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
968 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
969 rc = VINF_SUCCESS;
970 }
971 }
972
973 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
974 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
975 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
976 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
977 return rc;
978}
979#endif /* IN_RING0 */
980
981
982/**
983 * Prefetch a page
984 *
985 * Typically used to sync commonly used pages before entering raw mode
986 * after a CR3 reload.
987 *
988 * @returns VBox status code suitable for scheduling.
989 * @retval VINF_SUCCESS on success.
990 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
991 * @param pVCpu The cross context virtual CPU structure.
992 * @param GCPtrPage Page to invalidate.
993 */
994VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
995{
996 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
997
998 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
999 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1000 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1001 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1002
1003 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1004 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1005 return rc;
1006}
1007
1008
1009#ifndef PGM_WITHOUT_MAPPINGS
1010/**
1011 * Gets the mapping corresponding to the specified address (if any).
1012 *
1013 * @returns Pointer to the mapping.
1014 * @returns NULL if not
1015 *
1016 * @param pVM The cross context VM structure.
1017 * @param GCPtr The guest context pointer.
1018 */
1019PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
1020{
1021 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1022 while (pMapping)
1023 {
1024 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
1025 break;
1026 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
1027 return pMapping;
1028 pMapping = pMapping->CTX_SUFF(pNext);
1029 }
1030 return NULL;
1031}
1032#endif
1033
1034
1035/**
1036 * Verifies a range of pages for read or write access
1037 *
1038 * Only checks the guest's page tables
1039 *
1040 * @returns VBox status code.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 * @param Addr Guest virtual address to check
1043 * @param cbSize Access size
1044 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1045 * @remarks Current not in use.
1046 */
1047VMMDECL(int) PGMIsValidAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1048{
1049 /*
1050 * Validate input.
1051 */
1052 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
1053 {
1054 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
1055 return VERR_INVALID_PARAMETER;
1056 }
1057
1058 uint64_t fPage;
1059 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
1060 if (RT_FAILURE(rc))
1061 {
1062 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
1063 return VINF_EM_RAW_GUEST_TRAP;
1064 }
1065
1066 /*
1067 * Check if the access would cause a page fault
1068 *
1069 * Note that hypervisor page directories are not present in the guest's tables, so this check
1070 * is sufficient.
1071 */
1072 bool fWrite = !!(fAccess & X86_PTE_RW);
1073 bool fUser = !!(fAccess & X86_PTE_US);
1074 if ( !(fPage & X86_PTE_P)
1075 || (fWrite && !(fPage & X86_PTE_RW))
1076 || (fUser && !(fPage & X86_PTE_US)) )
1077 {
1078 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
1079 return VINF_EM_RAW_GUEST_TRAP;
1080 }
1081 if ( RT_SUCCESS(rc)
1082 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
1083 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
1084 return rc;
1085}
1086
1087
1088/**
1089 * Verifies a range of pages for read or write access
1090 *
1091 * Supports handling of pages marked for dirty bit tracking and CSAM
1092 *
1093 * @returns VBox status code.
1094 * @param pVCpu The cross context virtual CPU structure.
1095 * @param Addr Guest virtual address to check
1096 * @param cbSize Access size
1097 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
1098 */
1099VMMDECL(int) PGMVerifyAccess(PVMCPUCC pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
1100{
1101 PVM pVM = pVCpu->CTX_SUFF(pVM);
1102
1103 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
1104
1105 /*
1106 * Get going.
1107 */
1108 uint64_t fPageGst;
1109 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
1110 if (RT_FAILURE(rc))
1111 {
1112 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
1113 return VINF_EM_RAW_GUEST_TRAP;
1114 }
1115
1116 /*
1117 * Check if the access would cause a page fault
1118 *
1119 * Note that hypervisor page directories are not present in the guest's tables, so this check
1120 * is sufficient.
1121 */
1122 const bool fWrite = !!(fAccess & X86_PTE_RW);
1123 const bool fUser = !!(fAccess & X86_PTE_US);
1124 if ( !(fPageGst & X86_PTE_P)
1125 || (fWrite && !(fPageGst & X86_PTE_RW))
1126 || (fUser && !(fPageGst & X86_PTE_US)) )
1127 {
1128 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
1129 return VINF_EM_RAW_GUEST_TRAP;
1130 }
1131
1132 if (!pVM->pgm.s.fNestedPaging)
1133 {
1134 /*
1135 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
1136 */
1137 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
1138 if ( rc == VERR_PAGE_NOT_PRESENT
1139 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1140 {
1141 /*
1142 * Page is not present in our page tables.
1143 * Try to sync it!
1144 */
1145 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
1146 uint32_t const uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
1147 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1148 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1149 AssertReturn(g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
1150 rc = g_aPgmBothModeData[idxBth].pfnVerifyAccessSyncPage(pVCpu, Addr, fPageGst, uErr);
1151 if (rc != VINF_SUCCESS)
1152 return rc;
1153 }
1154 else
1155 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
1156 }
1157
1158#if 0 /* def VBOX_STRICT; triggers too often now */
1159 /*
1160 * This check is a bit paranoid, but useful.
1161 */
1162 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
1163 uint64_t fPageShw;
1164 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
1165 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
1166 || (fWrite && !(fPageShw & X86_PTE_RW))
1167 || (fUser && !(fPageShw & X86_PTE_US)) )
1168 {
1169 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
1170 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
1171 return VINF_EM_RAW_GUEST_TRAP;
1172 }
1173#endif
1174
1175 if ( RT_SUCCESS(rc)
1176 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
1177 || Addr + cbSize < Addr))
1178 {
1179 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
1180 for (;;)
1181 {
1182 Addr += PAGE_SIZE;
1183 if (cbSize > PAGE_SIZE)
1184 cbSize -= PAGE_SIZE;
1185 else
1186 cbSize = 1;
1187 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
1188 if (rc != VINF_SUCCESS)
1189 break;
1190 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
1191 break;
1192 }
1193 }
1194 return rc;
1195}
1196
1197
1198/**
1199 * Emulation of the invlpg instruction (HC only actually).
1200 *
1201 * @returns Strict VBox status code, special care required.
1202 * @retval VINF_PGM_SYNC_CR3 - handled.
1203 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1204 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
1205 *
1206 * @param pVCpu The cross context virtual CPU structure.
1207 * @param GCPtrPage Page to invalidate.
1208 *
1209 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1210 * safe, but there could be edge cases!
1211 *
1212 * @todo Flush page or page directory only if necessary!
1213 * @todo VBOXSTRICTRC
1214 */
1215VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1216{
1217 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1218 int rc;
1219 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1220
1221 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1222
1223 /*
1224 * Call paging mode specific worker.
1225 */
1226 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1227 PGM_LOCK_VOID(pVM);
1228
1229 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1230 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1231 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1232 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1233
1234 PGM_UNLOCK(pVM);
1235 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1236
1237#ifdef IN_RING3
1238 /*
1239 * Check if we have a pending update of the CR3 monitoring.
1240 */
1241 if ( RT_SUCCESS(rc)
1242 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
1243 {
1244 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1245 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
1246 }
1247#endif /* IN_RING3 */
1248
1249 /* Ignore all irrelevant error codes. */
1250 if ( rc == VERR_PAGE_NOT_PRESENT
1251 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1252 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1253 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1254 rc = VINF_SUCCESS;
1255
1256 return rc;
1257}
1258
1259
1260/**
1261 * Executes an instruction using the interpreter.
1262 *
1263 * @returns VBox status code (appropriate for trap handling and GC return).
1264 * @param pVM The cross context VM structure.
1265 * @param pVCpu The cross context virtual CPU structure.
1266 * @param pRegFrame Register frame.
1267 * @param pvFault Fault address.
1268 */
1269VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
1270{
1271 NOREF(pVM);
1272 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
1273 if (rc == VERR_EM_INTERPRETER)
1274 rc = VINF_EM_RAW_EMULATE_INSTR;
1275 if (rc != VINF_SUCCESS)
1276 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1277 return rc;
1278}
1279
1280
1281/**
1282 * Gets effective page information (from the VMM page directory).
1283 *
1284 * @returns VBox status code.
1285 * @param pVCpu The cross context virtual CPU structure.
1286 * @param GCPtr Guest Context virtual address of the page.
1287 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1288 * @param pHCPhys Where to store the HC physical address of the page.
1289 * This is page aligned.
1290 * @remark You should use PGMMapGetPage() for pages in a mapping.
1291 */
1292VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1293{
1294 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1295 PGM_LOCK_VOID(pVM);
1296
1297 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1298 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1299 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1300 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1301
1302 PGM_UNLOCK(pVM);
1303 return rc;
1304}
1305
1306
1307/**
1308 * Modify page flags for a range of pages in the shadow context.
1309 *
1310 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1311 *
1312 * @returns VBox status code.
1313 * @param pVCpu The cross context virtual CPU structure.
1314 * @param GCPtr Virtual address of the first page in the range.
1315 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1316 * @param fMask The AND mask - page flags X86_PTE_*.
1317 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1318 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1319 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1320 */
1321DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1322{
1323 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1324 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1325
1326 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
1327
1328 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1329 PGM_LOCK_VOID(pVM);
1330
1331 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1332 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1333 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1334 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
1335
1336 PGM_UNLOCK(pVM);
1337 return rc;
1338}
1339
1340
1341/**
1342 * Changing the page flags for a single page in the shadow page tables so as to
1343 * make it read-only.
1344 *
1345 * @returns VBox status code.
1346 * @param pVCpu The cross context virtual CPU structure.
1347 * @param GCPtr Virtual address of the first page in the range.
1348 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1349 */
1350VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1351{
1352 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1353}
1354
1355
1356/**
1357 * Changing the page flags for a single page in the shadow page tables so as to
1358 * make it writable.
1359 *
1360 * The call must know with 101% certainty that the guest page tables maps this
1361 * as writable too. This function will deal shared, zero and write monitored
1362 * pages.
1363 *
1364 * @returns VBox status code.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param GCPtr Virtual address of the first page in the range.
1367 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1368 */
1369VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1370{
1371 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1372 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1373 return VINF_SUCCESS;
1374}
1375
1376
1377/**
1378 * Changing the page flags for a single page in the shadow page tables so as to
1379 * make it not present.
1380 *
1381 * @returns VBox status code.
1382 * @param pVCpu The cross context virtual CPU structure.
1383 * @param GCPtr Virtual address of the first page in the range.
1384 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1385 */
1386VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1387{
1388 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1389}
1390
1391
1392/**
1393 * Changing the page flags for a single page in the shadow page tables so as to
1394 * make it supervisor and writable.
1395 *
1396 * This if for dealing with CR0.WP=0 and readonly user pages.
1397 *
1398 * @returns VBox status code.
1399 * @param pVCpu The cross context virtual CPU structure.
1400 * @param GCPtr Virtual address of the first page in the range.
1401 * @param fBigPage Whether or not this is a big page. If it is, we have to
1402 * change the shadow PDE as well. If it isn't, the caller
1403 * has checked that the shadow PDE doesn't need changing.
1404 * We ASSUME 4KB pages backing the big page here!
1405 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1406 */
1407int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1408{
1409 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1410 if (rc == VINF_SUCCESS && fBigPage)
1411 {
1412 /* this is a bit ugly... */
1413 switch (pVCpu->pgm.s.enmShadowMode)
1414 {
1415 case PGMMODE_32_BIT:
1416 {
1417 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1418 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1419 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1420 pPde->u |= X86_PDE_RW;
1421 Log(("-> PDE=%#llx (32)\n", pPde->u));
1422 break;
1423 }
1424 case PGMMODE_PAE:
1425 case PGMMODE_PAE_NX:
1426 {
1427 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1428 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1429 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1430 pPde->u |= X86_PDE_RW;
1431 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1432 break;
1433 }
1434 default:
1435 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1436 }
1437 }
1438 return rc;
1439}
1440
1441
1442/**
1443 * Gets the shadow page directory for the specified address, PAE.
1444 *
1445 * @returns Pointer to the shadow PD.
1446 * @param pVCpu The cross context virtual CPU structure.
1447 * @param GCPtr The address.
1448 * @param uGstPdpe Guest PDPT entry. Valid.
1449 * @param ppPD Receives address of page directory
1450 */
1451int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1452{
1453 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1454 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1455 PPGMPOOLPAGE pShwPage;
1456 int rc;
1457 PGM_LOCK_ASSERT_OWNER(pVM);
1458
1459
1460 /* Allocate page directory if not present. */
1461 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1462 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1463 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1464 X86PGPAEUINT const uPdpe = pPdpe->u;
1465 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1466 {
1467 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1468 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1469 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1470
1471 pgmPoolCacheUsed(pPool, pShwPage);
1472
1473 /* Update the entry if necessary. */
1474 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1475 if (uPdpeNew == uPdpe)
1476 { /* likely */ }
1477 else
1478 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1479 }
1480 else
1481 {
1482 RTGCPTR64 GCPdPt;
1483 PGMPOOLKIND enmKind;
1484 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1485 {
1486 /* AMD-V nested paging or real/protected mode without paging. */
1487 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1488 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1489 }
1490 else
1491 {
1492 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1493 {
1494 if (!(uGstPdpe & X86_PDPE_P))
1495 {
1496 /* PD not present; guest must reload CR3 to change it.
1497 * No need to monitor anything in this case. */
1498 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1499 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1500 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1501 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1502 }
1503 else
1504 {
1505 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1506 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1507 }
1508 }
1509 else
1510 {
1511 GCPdPt = CPUMGetGuestCR3(pVCpu);
1512 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1513 }
1514 }
1515
1516 /* Create a reference back to the PDPT by using the index in its shadow page. */
1517 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1518 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1519 &pShwPage);
1520 AssertRCReturn(rc, rc);
1521
1522 /* Hook it up. */
1523 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1524 }
1525 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1526
1527 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1528 return VINF_SUCCESS;
1529}
1530
1531
1532/**
1533 * Gets the pointer to the shadow page directory entry for an address, PAE.
1534 *
1535 * @returns Pointer to the PDE.
1536 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1537 * @param GCPtr The address.
1538 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1539 */
1540DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1541{
1542 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1543 PGM_LOCK_ASSERT_OWNER(pVM);
1544
1545 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1546 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1547 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1548 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1549 if (!(uPdpe & X86_PDPE_P))
1550 {
1551 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1552 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1553 }
1554 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1555
1556 /* Fetch the pgm pool shadow descriptor. */
1557 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1558 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1559
1560 *ppShwPde = pShwPde;
1561 return VINF_SUCCESS;
1562}
1563
1564
1565/**
1566 * Syncs the SHADOW page directory pointer for the specified address.
1567 *
1568 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1569 *
1570 * The caller is responsible for making sure the guest has a valid PD before
1571 * calling this function.
1572 *
1573 * @returns VBox status code.
1574 * @param pVCpu The cross context virtual CPU structure.
1575 * @param GCPtr The address.
1576 * @param uGstPml4e Guest PML4 entry (valid).
1577 * @param uGstPdpe Guest PDPT entry (valid).
1578 * @param ppPD Receives address of page directory
1579 */
1580static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1581{
1582 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1583 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1584 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1585 int rc;
1586
1587 PGM_LOCK_ASSERT_OWNER(pVM);
1588
1589 /*
1590 * PML4.
1591 */
1592 PPGMPOOLPAGE pShwPage;
1593 {
1594 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1595 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1596 X86PGPAEUINT const uPml4e = pPml4e->u;
1597
1598 /* Allocate page directory pointer table if not present. */
1599 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1600 {
1601 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1602 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1603
1604 pgmPoolCacheUsed(pPool, pShwPage);
1605
1606 /* Update the entry if needed. */
1607 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1608 | (uPml4e & PGM_PML4_FLAGS);
1609 if (uPml4e == uPml4eNew)
1610 { /* likely */ }
1611 else
1612 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1613 }
1614 else
1615 {
1616 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1617
1618 RTGCPTR64 GCPml4;
1619 PGMPOOLKIND enmKind;
1620 if (fNestedPagingOrNoGstPaging)
1621 {
1622 /* AMD-V nested paging or real/protected mode without paging */
1623 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1624 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1625 }
1626 else
1627 {
1628 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1629 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1630 }
1631
1632 /* Create a reference back to the PDPT by using the index in its shadow page. */
1633 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1634 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1635 &pShwPage);
1636 AssertRCReturn(rc, rc);
1637
1638 /* Hook it up. */
1639 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1640 | (uPml4e & PGM_PML4_FLAGS));
1641 }
1642 }
1643
1644 /*
1645 * PDPT.
1646 */
1647 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1648 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1649 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1650 X86PGPAEUINT const uPdpe = pPdpe->u;
1651
1652 /* Allocate page directory if not present. */
1653 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1654 {
1655 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1656 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1657
1658 pgmPoolCacheUsed(pPool, pShwPage);
1659
1660 /* Update the entry if needed. */
1661 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1662 | (uPdpe & PGM_PDPT_FLAGS);
1663 if (uPdpe == uPdpeNew)
1664 { /* likely */ }
1665 else
1666 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1667 }
1668 else
1669 {
1670 RTGCPTR64 GCPdPt;
1671 PGMPOOLKIND enmKind;
1672 if (fNestedPagingOrNoGstPaging)
1673 {
1674 /* AMD-V nested paging or real/protected mode without paging */
1675 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1676 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1677 }
1678 else
1679 {
1680 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1681 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1682 }
1683
1684 /* Create a reference back to the PDPT by using the index in its shadow page. */
1685 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1686 pShwPage->idx, iPdPt, false /*fLockPage*/,
1687 &pShwPage);
1688 AssertRCReturn(rc, rc);
1689
1690 /* Hook it up. */
1691 ASMAtomicWriteU64(&pPdpe->u,
1692 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1693 }
1694
1695 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1696 return VINF_SUCCESS;
1697}
1698
1699
1700/**
1701 * Gets the SHADOW page directory pointer for the specified address (long mode).
1702 *
1703 * @returns VBox status code.
1704 * @param pVCpu The cross context virtual CPU structure.
1705 * @param GCPtr The address.
1706 * @param ppPml4e Receives the address of the page map level 4 entry.
1707 * @param ppPdpt Receives the address of the page directory pointer table.
1708 * @param ppPD Receives the address of the page directory.
1709 */
1710DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1711{
1712 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1713 PGM_LOCK_ASSERT_OWNER(pVM);
1714
1715 /*
1716 * PML4
1717 */
1718 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1719 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1720 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1721 if (ppPml4e)
1722 *ppPml4e = (PX86PML4E)pPml4e;
1723 X86PGPAEUINT const uPml4e = pPml4e->u;
1724 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1725 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1726 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1727
1728 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1729 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1730 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1731
1732 /*
1733 * PDPT
1734 */
1735 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1736 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1737 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1738 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1739 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1740
1741 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1742 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1743
1744 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1745 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1746 return VINF_SUCCESS;
1747}
1748
1749
1750/**
1751 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1752 * backing pages in case the PDPT or PML4 entry is missing.
1753 *
1754 * @returns VBox status code.
1755 * @param pVCpu The cross context virtual CPU structure.
1756 * @param GCPtr The address.
1757 * @param ppPdpt Receives address of pdpt
1758 * @param ppPD Receives address of page directory
1759 */
1760static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1761{
1762 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1763 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1764 int rc;
1765
1766 Assert(pVM->pgm.s.fNestedPaging);
1767 PGM_LOCK_ASSERT_OWNER(pVM);
1768
1769 /*
1770 * PML4 level.
1771 */
1772
1773 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1774 Assert(pPml4);
1775
1776 /* Allocate page directory pointer table if not present. */
1777 PPGMPOOLPAGE pShwPage;
1778 {
1779 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1780 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1781 EPTPML4E Pml4e;
1782 Pml4e.u = pPml4e->u;
1783 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1784 {
1785 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1786
1787 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1788 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1789 &pShwPage);
1790 AssertRCReturn(rc, rc);
1791
1792 /* Hook up the new PDPT now. */
1793 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1794 }
1795 else
1796 {
1797 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1798 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1799
1800 pgmPoolCacheUsed(pPool, pShwPage);
1801
1802 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1803 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1804 { }
1805 else
1806 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1807 }
1808 }
1809
1810 /*
1811 * PDPT level.
1812 */
1813 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1814 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1815 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1816
1817 if (ppPdpt)
1818 *ppPdpt = pPdpt;
1819
1820 /* Allocate page directory if not present. */
1821 EPTPDPTE Pdpe;
1822 Pdpe.u = pPdpe->u;
1823 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1824 {
1825 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1826 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1827 pShwPage->idx, iPdPt, false /*fLockPage*/,
1828 &pShwPage);
1829 AssertRCReturn(rc, rc);
1830
1831 /* Hook up the new PD now. */
1832 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1833 }
1834 else
1835 {
1836 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1837 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1838
1839 pgmPoolCacheUsed(pPool, pShwPage);
1840
1841 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1842 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1843 { }
1844 else
1845 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1846 }
1847
1848 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1849 return VINF_SUCCESS;
1850}
1851
1852
1853#ifdef IN_RING0
1854/**
1855 * Synchronizes a range of nested page table entries.
1856 *
1857 * The caller must own the PGM lock.
1858 *
1859 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1860 * @param GCPhys Where to start.
1861 * @param cPages How many pages which entries should be synced.
1862 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1863 * host paging mode for AMD-V).
1864 */
1865int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1866{
1867 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1868
1869/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1870 int rc;
1871 switch (enmShwPagingMode)
1872 {
1873 case PGMMODE_32_BIT:
1874 {
1875 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1876 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1877 break;
1878 }
1879
1880 case PGMMODE_PAE:
1881 case PGMMODE_PAE_NX:
1882 {
1883 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1884 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1885 break;
1886 }
1887
1888 case PGMMODE_AMD64:
1889 case PGMMODE_AMD64_NX:
1890 {
1891 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1892 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1893 break;
1894 }
1895
1896 case PGMMODE_EPT:
1897 {
1898 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1899 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1900 break;
1901 }
1902
1903 default:
1904 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1905 }
1906 return rc;
1907}
1908#endif /* IN_RING0 */
1909
1910
1911/**
1912 * Gets effective Guest OS page information.
1913 *
1914 * When GCPtr is in a big page, the function will return as if it was a normal
1915 * 4KB page. If the need for distinguishing between big and normal page becomes
1916 * necessary at a later point, a PGMGstGetPage() will be created for that
1917 * purpose.
1918 *
1919 * @returns VBox status code.
1920 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1921 * @param GCPtr Guest Context virtual address of the page.
1922 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1923 * @param pGCPhys Where to store the GC physical address of the page.
1924 * This is page aligned. The fact that the
1925 */
1926VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1927{
1928 VMCPU_ASSERT_EMT(pVCpu);
1929 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1930 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1931 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1932 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1933}
1934
1935
1936/**
1937 * Performs a guest page table walk.
1938 *
1939 * The guest should be in paged protect mode or long mode when making a call to
1940 * this function.
1941 *
1942 * @returns VBox status code.
1943 * @retval VINF_SUCCESS on success.
1944 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1945 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1946 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1947 *
1948 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1949 * @param GCPtr The guest virtual address to walk by.
1950 * @param pWalk Where to return the walk result. This is valid for some
1951 * error codes as well.
1952 */
1953int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1954{
1955 VMCPU_ASSERT_EMT(pVCpu);
1956 switch (pVCpu->pgm.s.enmGuestMode)
1957 {
1958 case PGMMODE_32_BIT:
1959 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1960 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1961
1962 case PGMMODE_PAE:
1963 case PGMMODE_PAE_NX:
1964 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1965 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1966
1967 case PGMMODE_AMD64:
1968 case PGMMODE_AMD64_NX:
1969 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1970 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1971
1972 case PGMMODE_REAL:
1973 case PGMMODE_PROTECTED:
1974 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1975 return VERR_PGM_NOT_USED_IN_MODE;
1976
1977 case PGMMODE_NESTED_32BIT:
1978 case PGMMODE_NESTED_PAE:
1979 case PGMMODE_NESTED_AMD64:
1980 case PGMMODE_EPT:
1981 default:
1982 AssertFailed();
1983 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1984 return VERR_PGM_NOT_USED_IN_MODE;
1985 }
1986}
1987
1988
1989/**
1990 * Tries to continue the previous walk.
1991 *
1992 * @note Requires the caller to hold the PGM lock from the first
1993 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1994 * we cannot use the pointers.
1995 *
1996 * @returns VBox status code.
1997 * @retval VINF_SUCCESS on success.
1998 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1999 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2000 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2001 *
2002 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2003 * @param GCPtr The guest virtual address to walk by.
2004 * @param pWalk Pointer to the previous walk result and where to return
2005 * the result of this walk. This is valid for some error
2006 * codes as well.
2007 */
2008int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
2009{
2010 /*
2011 * We can only handle successfully walks.
2012 * We also limit ourselves to the next page.
2013 */
2014 if ( pWalk->u.Core.fSucceeded
2015 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
2016 {
2017 Assert(pWalk->u.Core.uLevel == 0);
2018 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2019 {
2020 /*
2021 * AMD64
2022 */
2023 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
2024 {
2025 /*
2026 * We fall back to full walk if the PDE table changes, if any
2027 * reserved bits are set, or if the effective page access changes.
2028 */
2029 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2030 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2031 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2032 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2033
2034 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
2035 {
2036 if (pWalk->u.Amd64.pPte)
2037 {
2038 X86PTEPAE Pte;
2039 Pte.u = pWalk->u.Amd64.pPte[1].u;
2040 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2041 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2042 {
2043
2044 pWalk->u.Core.GCPtr = GCPtr;
2045 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2046 pWalk->u.Amd64.Pte.u = Pte.u;
2047 pWalk->u.Amd64.pPte++;
2048 return VINF_SUCCESS;
2049 }
2050 }
2051 }
2052 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
2053 {
2054 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2055 if (pWalk->u.Amd64.pPde)
2056 {
2057 X86PDEPAE Pde;
2058 Pde.u = pWalk->u.Amd64.pPde[1].u;
2059 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
2060 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2061 {
2062 /* Get the new PTE and check out the first entry. */
2063 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2064 &pWalk->u.Amd64.pPt);
2065 if (RT_SUCCESS(rc))
2066 {
2067 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
2068 X86PTEPAE Pte;
2069 Pte.u = pWalk->u.Amd64.pPte->u;
2070 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
2071 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2072 {
2073 pWalk->u.Core.GCPtr = GCPtr;
2074 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2075 pWalk->u.Amd64.Pte.u = Pte.u;
2076 pWalk->u.Amd64.Pde.u = Pde.u;
2077 pWalk->u.Amd64.pPde++;
2078 return VINF_SUCCESS;
2079 }
2080 }
2081 }
2082 }
2083 }
2084 }
2085 else if (!pWalk->u.Core.fGigantPage)
2086 {
2087 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
2088 {
2089 pWalk->u.Core.GCPtr = GCPtr;
2090 pWalk->u.Core.GCPhys += PAGE_SIZE;
2091 return VINF_SUCCESS;
2092 }
2093 }
2094 else
2095 {
2096 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
2097 {
2098 pWalk->u.Core.GCPtr = GCPtr;
2099 pWalk->u.Core.GCPhys += PAGE_SIZE;
2100 return VINF_SUCCESS;
2101 }
2102 }
2103 }
2104 }
2105 /* Case we don't handle. Do full walk. */
2106 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
2107}
2108
2109
2110/**
2111 * Checks if the page is present.
2112 *
2113 * @returns true if the page is present.
2114 * @returns false if the page is not present.
2115 * @param pVCpu The cross context virtual CPU structure.
2116 * @param GCPtr Address within the page.
2117 */
2118VMMDECL(bool) PGMGstIsPagePresent(PVMCPUCC pVCpu, RTGCPTR GCPtr)
2119{
2120 VMCPU_ASSERT_EMT(pVCpu);
2121 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
2122 return RT_SUCCESS(rc);
2123}
2124
2125
2126/**
2127 * Sets (replaces) the page flags for a range of pages in the guest's tables.
2128 *
2129 * @returns VBox status code.
2130 * @param pVCpu The cross context virtual CPU structure.
2131 * @param GCPtr The address of the first page.
2132 * @param cb The size of the range in bytes.
2133 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
2134 */
2135VMMDECL(int) PGMGstSetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
2136{
2137 VMCPU_ASSERT_EMT(pVCpu);
2138 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
2139}
2140
2141
2142/**
2143 * Modify page flags for a range of pages in the guest's tables
2144 *
2145 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2146 *
2147 * @returns VBox status code.
2148 * @param pVCpu The cross context virtual CPU structure.
2149 * @param GCPtr Virtual address of the first page in the range.
2150 * @param cb Size (in bytes) of the range to apply the modification to.
2151 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2152 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2153 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2154 */
2155VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2156{
2157 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2158 VMCPU_ASSERT_EMT(pVCpu);
2159
2160 /*
2161 * Validate input.
2162 */
2163 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2164 Assert(cb);
2165
2166 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2167
2168 /*
2169 * Adjust input.
2170 */
2171 cb += GCPtr & PAGE_OFFSET_MASK;
2172 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
2173 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
2174
2175 /*
2176 * Call worker.
2177 */
2178 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2179 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2180 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2181 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2182
2183 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2184 return rc;
2185}
2186
2187
2188/**
2189 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2190 *
2191 * @returns @c true if the PDPE is valid, @c false otherwise.
2192 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2193 * @param paPaePdpes The PAE PDPEs to validate.
2194 *
2195 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2196 */
2197VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2198{
2199 Assert(paPaePdpes);
2200 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2201 {
2202 X86PDPE const PaePdpe = paPaePdpes[i];
2203 if ( !(PaePdpe.u & X86_PDPE_P)
2204 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2205 { /* likely */ }
2206 else
2207 return false;
2208 }
2209 return true;
2210}
2211
2212
2213/**
2214 * Performs the lazy mapping of the 32-bit guest PD.
2215 *
2216 * @returns VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2218 * @param ppPd Where to return the pointer to the mapping. This is
2219 * always set.
2220 */
2221int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2222{
2223 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2224 PGM_LOCK_VOID(pVM);
2225
2226 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2227
2228 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
2229 PPGMPAGE pPage;
2230 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2231 if (RT_SUCCESS(rc))
2232 {
2233 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2234 if (RT_SUCCESS(rc))
2235 {
2236# ifdef IN_RING3
2237 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2238 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2239# else
2240 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2241 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2242# endif
2243 PGM_UNLOCK(pVM);
2244 return VINF_SUCCESS;
2245 }
2246 AssertRC(rc);
2247 }
2248 PGM_UNLOCK(pVM);
2249
2250 *ppPd = NULL;
2251 return rc;
2252}
2253
2254
2255/**
2256 * Performs the lazy mapping of the PAE guest PDPT.
2257 *
2258 * @returns VBox status code.
2259 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2260 * @param ppPdpt Where to return the pointer to the mapping. This is
2261 * always set.
2262 */
2263int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2264{
2265 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2266 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2267 PGM_LOCK_VOID(pVM);
2268
2269 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
2270 PPGMPAGE pPage;
2271 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2272 if (RT_SUCCESS(rc))
2273 {
2274 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2275 if (RT_SUCCESS(rc))
2276 {
2277# ifdef IN_RING3
2278 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2279 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2280# else
2281 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2282 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2283# endif
2284 PGM_UNLOCK(pVM);
2285 return VINF_SUCCESS;
2286 }
2287 AssertRC(rc);
2288 }
2289
2290 PGM_UNLOCK(pVM);
2291 *ppPdpt = NULL;
2292 return rc;
2293}
2294
2295
2296/**
2297 * Performs the lazy mapping / updating of a PAE guest PD.
2298 *
2299 * @returns Pointer to the mapping.
2300 * @returns VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2302 * @param iPdpt Which PD entry to map (0..3).
2303 * @param ppPd Where to return the pointer to the mapping. This is
2304 * always set.
2305 */
2306int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2307{
2308 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2309 PGM_LOCK_VOID(pVM);
2310
2311 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2312 Assert(pGuestPDPT);
2313 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2314 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2315 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2316
2317 PPGMPAGE pPage;
2318 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2319 if (RT_SUCCESS(rc))
2320 {
2321 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2322 AssertRC(rc);
2323 if (RT_SUCCESS(rc))
2324 {
2325# ifdef IN_RING3
2326 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2327 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2328# else
2329 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2330 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2331# endif
2332 if (fChanged)
2333 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2334 PGM_UNLOCK(pVM);
2335 return VINF_SUCCESS;
2336 }
2337 }
2338
2339 /* Invalid page or some failure, invalidate the entry. */
2340 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2341 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2342 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2343
2344 PGM_UNLOCK(pVM);
2345 return rc;
2346}
2347
2348
2349/**
2350 * Performs the lazy mapping of the 32-bit guest PD.
2351 *
2352 * @returns VBox status code.
2353 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2354 * @param ppPml4 Where to return the pointer to the mapping. This will
2355 * always be set.
2356 */
2357int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2358{
2359 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2360 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2361 PGM_LOCK_VOID(pVM);
2362
2363 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
2364 PPGMPAGE pPage;
2365 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2366 if (RT_SUCCESS(rc))
2367 {
2368 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2369 if (RT_SUCCESS(rc))
2370 {
2371# ifdef IN_RING3
2372 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2373 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2374# else
2375 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2376 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2377# endif
2378 PGM_UNLOCK(pVM);
2379 return VINF_SUCCESS;
2380 }
2381 }
2382
2383 PGM_UNLOCK(pVM);
2384 *ppPml4 = NULL;
2385 return rc;
2386}
2387
2388
2389/**
2390 * Gets the current CR3 register value for the shadow memory context.
2391 * @returns CR3 value.
2392 * @param pVCpu The cross context virtual CPU structure.
2393 */
2394VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2395{
2396 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2397 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2398 return pPoolPage->Core.Key;
2399}
2400
2401
2402/**
2403 * Forces lazy remapping of the guest's PAE page-directory structures.
2404 *
2405 * @param pVCpu The cross context virtual CPU structure.
2406 */
2407static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2408{
2409 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2410 {
2411 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2412 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2413 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2414 }
2415}
2416
2417
2418/**
2419 * Gets the PGM CR3 value masked according to the current guest mode.
2420 *
2421 * @returns The masked PGM CR3 value.
2422 * @param pVCpu The cross context virtual CPU structure.
2423 * @param uCr3 The raw guest CR3 value.
2424 */
2425DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
2426{
2427 RTGCPHYS GCPhysCR3;
2428 switch (pVCpu->pgm.s.enmGuestMode)
2429 {
2430 case PGMMODE_PAE:
2431 case PGMMODE_PAE_NX:
2432 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAE_PAGE_MASK);
2433 break;
2434 case PGMMODE_AMD64:
2435 case PGMMODE_AMD64_NX:
2436 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_AMD64_PAGE_MASK);
2437 break;
2438 default:
2439 GCPhysCR3 = (RTGCPHYS)(uCr3 & X86_CR3_PAGE_MASK);
2440 break;
2441 }
2442 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2443 return GCPhysCR3;
2444}
2445
2446
2447/**
2448 * Performs and schedules necessary updates following a CR3 load or reload.
2449 *
2450 * This will normally involve mapping the guest PD or nPDPT
2451 *
2452 * @returns VBox status code.
2453 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2454 * safely be ignored and overridden since the FF will be set too then.
2455 * @param pVCpu The cross context virtual CPU structure.
2456 * @param cr3 The new cr3.
2457 * @param fGlobal Indicates whether this is a global flush or not.
2458 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2459 */
2460VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal, bool fPdpesMapped)
2461{
2462 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2463 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2464
2465 VMCPU_ASSERT_EMT(pVCpu);
2466
2467 /*
2468 * Always flag the necessary updates; necessary for hardware acceleration
2469 */
2470 /** @todo optimize this, it shouldn't always be necessary. */
2471 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2472 if (fGlobal)
2473 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2474 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2475
2476 /*
2477 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2478 */
2479 int rc = VINF_SUCCESS;
2480 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2481 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2482 if (GCPhysOldCR3 != GCPhysCR3)
2483 {
2484 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2485 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2486 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2487
2488 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2489 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2490 if (RT_LIKELY(rc == VINF_SUCCESS))
2491 {
2492 if (pgmMapAreMappingsFloating(pVM))
2493 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2494 }
2495 else
2496 {
2497 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2498 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2499 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2500 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2501 if (pgmMapAreMappingsFloating(pVM))
2502 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2503 }
2504
2505 if (fGlobal)
2506 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2507 else
2508 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2509 }
2510 else
2511 {
2512#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2513 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2514 if (pPool->cDirtyPages)
2515 {
2516 PGM_LOCK_VOID(pVM);
2517 pgmPoolResetDirtyPages(pVM);
2518 PGM_UNLOCK(pVM);
2519 }
2520#endif
2521 /*
2522 * Check if we have a pending update of the CR3 monitoring.
2523 */
2524 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2525 {
2526 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2527 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2528 }
2529 if (fGlobal)
2530 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2531 else
2532 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2533
2534 /*
2535 * Flush PAE PDPTEs.
2536 */
2537 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2538 pgmGstFlushPaePdpes(pVCpu);
2539 }
2540
2541 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2542 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2543 return rc;
2544}
2545
2546
2547/**
2548 * Performs and schedules necessary updates following a CR3 load or reload when
2549 * using nested or extended paging.
2550 *
2551 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2552 * TLB and triggering a SyncCR3.
2553 *
2554 * This will normally involve mapping the guest PD or nPDPT
2555 *
2556 * @returns VBox status code.
2557 * @retval VINF_SUCCESS.
2558 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2559 * paging modes). This can safely be ignored and overridden since the
2560 * FF will be set too then.
2561 * @param pVCpu The cross context virtual CPU structure.
2562 * @param cr3 The new CR3.
2563 * @param fPdpesMapped Whether the PAE PDPEs (and PDPT) have been mapped.
2564 */
2565VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3, bool fPdpesMapped)
2566{
2567 VMCPU_ASSERT_EMT(pVCpu);
2568 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2569
2570 /* We assume we're only called in nested paging mode. */
2571 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2572 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2573 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2574
2575 /*
2576 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2577 */
2578 int rc = VINF_SUCCESS;
2579 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2580 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2581 {
2582 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2583 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2584 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2585
2586 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2587 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, fPdpesMapped);
2588
2589 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2590 }
2591 /*
2592 * Flush PAE PDPTEs.
2593 */
2594 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2595 pgmGstFlushPaePdpes(pVCpu);
2596
2597 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2598 return rc;
2599}
2600
2601
2602/**
2603 * Synchronize the paging structures.
2604 *
2605 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2606 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2607 * in several places, most importantly whenever the CR3 is loaded.
2608 *
2609 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2610 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2611 * the VMM into guest context.
2612 * @param pVCpu The cross context virtual CPU structure.
2613 * @param cr0 Guest context CR0 register
2614 * @param cr3 Guest context CR3 register
2615 * @param cr4 Guest context CR4 register
2616 * @param fGlobal Including global page directories or not
2617 */
2618VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2619{
2620 int rc;
2621
2622 VMCPU_ASSERT_EMT(pVCpu);
2623
2624 /*
2625 * The pool may have pending stuff and even require a return to ring-3 to
2626 * clear the whole thing.
2627 */
2628 rc = pgmPoolSyncCR3(pVCpu);
2629 if (rc != VINF_SUCCESS)
2630 return rc;
2631
2632 /*
2633 * We might be called when we shouldn't.
2634 *
2635 * The mode switching will ensure that the PD is resynced after every mode
2636 * switch. So, if we find ourselves here when in protected or real mode
2637 * we can safely clear the FF and return immediately.
2638 */
2639 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2640 {
2641 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2642 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2643 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2644 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2645 return VINF_SUCCESS;
2646 }
2647
2648 /* If global pages are not supported, then all flushes are global. */
2649 if (!(cr4 & X86_CR4_PGE))
2650 fGlobal = true;
2651 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2652 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2653
2654 /*
2655 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2656 * This should be done before SyncCR3.
2657 */
2658 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2659 {
2660 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2661
2662 RTGCPHYS const GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2663 RTGCPHYS const GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2664 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2665 {
2666 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2667 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2668 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2669 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2670 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3, false /* fPdpesMapped */);
2671 }
2672
2673 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2674 if ( rc == VINF_PGM_SYNC_CR3
2675 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2676 {
2677 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2678#ifdef IN_RING3
2679 rc = pgmPoolSyncCR3(pVCpu);
2680#else
2681 if (rc == VINF_PGM_SYNC_CR3)
2682 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2683 return VINF_PGM_SYNC_CR3;
2684#endif
2685 }
2686 AssertRCReturn(rc, rc);
2687 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2688 }
2689
2690 /*
2691 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2692 */
2693 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2694
2695 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2696 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2697 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2698 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2699
2700 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2701 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2702 if (rc == VINF_SUCCESS)
2703 {
2704 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2705 {
2706 /* Go back to ring 3 if a pgm pool sync is again pending. */
2707 return VINF_PGM_SYNC_CR3;
2708 }
2709
2710 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2711 {
2712 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2713 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2714 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2715 }
2716
2717 /*
2718 * Check if we have a pending update of the CR3 monitoring.
2719 */
2720 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2721 {
2722 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2723 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2724 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2725 }
2726 }
2727
2728 /*
2729 * Now flush the CR3 (guest context).
2730 */
2731 if (rc == VINF_SUCCESS)
2732 PGM_INVL_VCPU_TLBS(pVCpu);
2733 return rc;
2734}
2735
2736
2737/**
2738 * Maps all the PAE PDPE entries.
2739 *
2740 * @returns VBox status code.
2741 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2742 * @param paPaePdpes The new PAE PDPE values.
2743 *
2744 * @remarks This function may be invoked during the process of changing the guest
2745 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2746 * reflect PAE paging just yet.
2747 */
2748VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2749{
2750 Assert(paPaePdpes);
2751 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2752 {
2753 X86PDPE const PaePdpe = paPaePdpes[i];
2754
2755 /*
2756 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2757 * are deferred.[1] Also, different situations require different handling of invalid
2758 * PDPE entries. Here we assume the caller has already validated or doesn't require
2759 * validation of the PDPEs.
2760 *
2761 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2762 */
2763 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2764 {
2765 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2766 RTHCPTR HCPtr;
2767 RTGCPHYS const GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2768
2769 PGM_LOCK_VOID(pVM);
2770 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2771 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2772 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2773 PGM_UNLOCK(pVM);
2774 if (RT_SUCCESS(rc))
2775 {
2776# ifdef IN_RING3
2777 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2778 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2779# else
2780 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2781 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2782# endif
2783 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2784 continue;
2785 }
2786 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2787 }
2788 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2789 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2790 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2791 }
2792
2793 return VINF_SUCCESS;
2794}
2795
2796
2797/**
2798 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2799 *
2800 * @returns VBox status code.
2801 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2802 * @param cr3 The guest CR3 value.
2803 *
2804 * @remarks This function may be invoked during the process of changing the guest
2805 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2806 * PAE paging just yet.
2807 */
2808VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2809{
2810 /*
2811 * Read the page-directory-pointer table (PDPT) at CR3.
2812 */
2813 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2814 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2815 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2816
2817 PGM_LOCK_VOID(pVM);
2818 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3);
2819 AssertReturnStmt(pPageCR3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
2820
2821 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
2822 RTHCPTR HCPtrGuestCr3;
2823 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3, (void **)&HCPtrGuestCr3);
2824 PGM_UNLOCK(pVM);
2825 AssertRCReturn(rc, rc);
2826 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
2827
2828 /*
2829 * Validate the page-directory-pointer table entries (PDPE).
2830 */
2831 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
2832 {
2833 /*
2834 * Map the PDPT.
2835 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
2836 * that PGMFlushTLB will be called soon and only a change to CR3 then
2837 * will cause the shadow page tables to be updated.
2838 */
2839# ifdef IN_RING3
2840 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
2841 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2842# else
2843 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2844 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
2845# endif
2846
2847 /*
2848 * Update CPUM.
2849 * We do this prior to mapping the PDPEs to keep the order consistent
2850 * with what's used in HM. In practice, it doesn't really matter.
2851 */
2852 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
2853
2854 /*
2855 * Map the PDPEs.
2856 */
2857 return PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
2858 }
2859 return VERR_PGM_PAE_PDPE_RSVD;
2860}
2861
2862
2863/**
2864 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2865 *
2866 * @returns VBox status code, with the following informational code for
2867 * VM scheduling.
2868 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2869 * @retval VINF_PGM_CHANGE_MODE if we're in RC the mode changes. This will
2870 * NOT be returned in ring-3 or ring-0.
2871 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2872 *
2873 * @param pVCpu The cross context virtual CPU structure.
2874 * @param cr0 The new cr0.
2875 * @param cr4 The new cr4.
2876 * @param efer The new extended feature enable register.
2877 */
2878VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2879{
2880 VMCPU_ASSERT_EMT(pVCpu);
2881
2882 /*
2883 * Calc the new guest mode.
2884 *
2885 * Note! We check PG before PE and without requiring PE because of the
2886 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2887 */
2888 PGMMODE enmGuestMode;
2889 if (cr0 & X86_CR0_PG)
2890 {
2891 if (!(cr4 & X86_CR4_PAE))
2892 {
2893 bool const fPse = !!(cr4 & X86_CR4_PSE);
2894 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2895 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2896 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2897 enmGuestMode = PGMMODE_32_BIT;
2898 }
2899 else if (!(efer & MSR_K6_EFER_LME))
2900 {
2901 if (!(efer & MSR_K6_EFER_NXE))
2902 enmGuestMode = PGMMODE_PAE;
2903 else
2904 enmGuestMode = PGMMODE_PAE_NX;
2905 }
2906 else
2907 {
2908 if (!(efer & MSR_K6_EFER_NXE))
2909 enmGuestMode = PGMMODE_AMD64;
2910 else
2911 enmGuestMode = PGMMODE_AMD64_NX;
2912 }
2913 }
2914 else if (!(cr0 & X86_CR0_PE))
2915 enmGuestMode = PGMMODE_REAL;
2916 else
2917 enmGuestMode = PGMMODE_PROTECTED;
2918
2919 /*
2920 * Did it change?
2921 */
2922 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2923 return VINF_SUCCESS;
2924
2925 /* Flush the TLB */
2926 PGM_INVL_VCPU_TLBS(pVCpu);
2927 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2928}
2929
2930
2931/**
2932 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
2933 *
2934 * @returns PGM_TYPE_*.
2935 * @param pgmMode The mode value to convert.
2936 */
2937DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
2938{
2939 switch (pgmMode)
2940 {
2941 case PGMMODE_REAL: return PGM_TYPE_REAL;
2942 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
2943 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
2944 case PGMMODE_PAE:
2945 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
2946 case PGMMODE_AMD64:
2947 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
2948 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
2949 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
2950 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
2951 case PGMMODE_EPT: return PGM_TYPE_EPT;
2952 case PGMMODE_NONE: return PGM_TYPE_NONE;
2953 default:
2954 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
2955 }
2956}
2957
2958
2959/**
2960 * Calculates the shadow paging mode.
2961 *
2962 * @returns The shadow paging mode.
2963 * @param pVM The cross context VM structure.
2964 * @param enmGuestMode The guest mode.
2965 * @param enmHostMode The host mode.
2966 * @param enmShadowMode The current shadow mode.
2967 */
2968static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
2969{
2970 switch (enmGuestMode)
2971 {
2972 /*
2973 * When switching to real or protected mode we don't change
2974 * anything since it's likely that we'll switch back pretty soon.
2975 *
2976 * During pgmR3InitPaging we'll end up here with PGMMODE_INVALID
2977 * and is supposed to determine which shadow paging and switcher to
2978 * use during init.
2979 */
2980 case PGMMODE_REAL:
2981 case PGMMODE_PROTECTED:
2982 if ( enmShadowMode != PGMMODE_INVALID
2983 && VM_IS_RAW_MODE_ENABLED(pVM) /* always switch in hm and nem modes! */)
2984 break; /* (no change) */
2985
2986 switch (enmHostMode)
2987 {
2988 case SUPPAGINGMODE_32_BIT:
2989 case SUPPAGINGMODE_32_BIT_GLOBAL:
2990 enmShadowMode = PGMMODE_32_BIT;
2991 break;
2992
2993 case SUPPAGINGMODE_PAE:
2994 case SUPPAGINGMODE_PAE_NX:
2995 case SUPPAGINGMODE_PAE_GLOBAL:
2996 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2997 enmShadowMode = PGMMODE_PAE;
2998 break;
2999
3000 case SUPPAGINGMODE_AMD64:
3001 case SUPPAGINGMODE_AMD64_GLOBAL:
3002 case SUPPAGINGMODE_AMD64_NX:
3003 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3004 enmShadowMode = PGMMODE_PAE;
3005 break;
3006
3007 default:
3008 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3009 }
3010 break;
3011
3012 case PGMMODE_32_BIT:
3013 switch (enmHostMode)
3014 {
3015 case SUPPAGINGMODE_32_BIT:
3016 case SUPPAGINGMODE_32_BIT_GLOBAL:
3017 enmShadowMode = PGMMODE_32_BIT;
3018 break;
3019
3020 case SUPPAGINGMODE_PAE:
3021 case SUPPAGINGMODE_PAE_NX:
3022 case SUPPAGINGMODE_PAE_GLOBAL:
3023 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3024 enmShadowMode = PGMMODE_PAE;
3025 break;
3026
3027 case SUPPAGINGMODE_AMD64:
3028 case SUPPAGINGMODE_AMD64_GLOBAL:
3029 case SUPPAGINGMODE_AMD64_NX:
3030 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3031 enmShadowMode = PGMMODE_PAE;
3032 break;
3033
3034 default:
3035 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3036 }
3037 break;
3038
3039 case PGMMODE_PAE:
3040 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3041 switch (enmHostMode)
3042 {
3043 case SUPPAGINGMODE_32_BIT:
3044 case SUPPAGINGMODE_32_BIT_GLOBAL:
3045 enmShadowMode = PGMMODE_PAE;
3046 break;
3047
3048 case SUPPAGINGMODE_PAE:
3049 case SUPPAGINGMODE_PAE_NX:
3050 case SUPPAGINGMODE_PAE_GLOBAL:
3051 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3052 enmShadowMode = PGMMODE_PAE;
3053 break;
3054
3055 case SUPPAGINGMODE_AMD64:
3056 case SUPPAGINGMODE_AMD64_GLOBAL:
3057 case SUPPAGINGMODE_AMD64_NX:
3058 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3059 enmShadowMode = PGMMODE_PAE;
3060 break;
3061
3062 default:
3063 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3064 }
3065 break;
3066
3067 case PGMMODE_AMD64:
3068 case PGMMODE_AMD64_NX:
3069 switch (enmHostMode)
3070 {
3071 case SUPPAGINGMODE_32_BIT:
3072 case SUPPAGINGMODE_32_BIT_GLOBAL:
3073 enmShadowMode = PGMMODE_AMD64;
3074 break;
3075
3076 case SUPPAGINGMODE_PAE:
3077 case SUPPAGINGMODE_PAE_NX:
3078 case SUPPAGINGMODE_PAE_GLOBAL:
3079 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3080 enmShadowMode = PGMMODE_AMD64;
3081 break;
3082
3083 case SUPPAGINGMODE_AMD64:
3084 case SUPPAGINGMODE_AMD64_GLOBAL:
3085 case SUPPAGINGMODE_AMD64_NX:
3086 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3087 enmShadowMode = PGMMODE_AMD64;
3088 break;
3089
3090 default:
3091 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3092 }
3093 break;
3094
3095 default:
3096 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3097 }
3098
3099 /*
3100 * Override the shadow mode when NEM or nested paging is active.
3101 */
3102 if (VM_IS_NEM_ENABLED(pVM))
3103 {
3104 pVM->pgm.s.fNestedPaging = true;
3105 enmShadowMode = PGMMODE_NONE;
3106 }
3107 else
3108 {
3109 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3110 pVM->pgm.s.fNestedPaging = fNestedPaging;
3111 if (fNestedPaging)
3112 {
3113 if (HMIsVmxActive(pVM))
3114 enmShadowMode = PGMMODE_EPT;
3115 else
3116 {
3117 /* The nested SVM paging depends on the host one. */
3118 Assert(HMIsSvmActive(pVM));
3119 if ( enmGuestMode == PGMMODE_AMD64
3120 || enmGuestMode == PGMMODE_AMD64_NX)
3121 enmShadowMode = PGMMODE_NESTED_AMD64;
3122 else
3123 switch (pVM->pgm.s.enmHostMode)
3124 {
3125 case SUPPAGINGMODE_32_BIT:
3126 case SUPPAGINGMODE_32_BIT_GLOBAL:
3127 enmShadowMode = PGMMODE_NESTED_32BIT;
3128 break;
3129
3130 case SUPPAGINGMODE_PAE:
3131 case SUPPAGINGMODE_PAE_GLOBAL:
3132 case SUPPAGINGMODE_PAE_NX:
3133 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3134 enmShadowMode = PGMMODE_NESTED_PAE;
3135 break;
3136
3137 case SUPPAGINGMODE_AMD64:
3138 case SUPPAGINGMODE_AMD64_GLOBAL:
3139 case SUPPAGINGMODE_AMD64_NX:
3140 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3141 enmShadowMode = PGMMODE_NESTED_AMD64;
3142 break;
3143
3144 default:
3145 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3146 }
3147 }
3148 }
3149 }
3150
3151 return enmShadowMode;
3152}
3153
3154
3155/**
3156 * Performs the actual mode change.
3157 * This is called by PGMChangeMode and pgmR3InitPaging().
3158 *
3159 * @returns VBox status code. May suspend or power off the VM on error, but this
3160 * will trigger using FFs and not informational status codes.
3161 *
3162 * @param pVM The cross context VM structure.
3163 * @param pVCpu The cross context virtual CPU structure.
3164 * @param enmGuestMode The new guest mode. This is assumed to be different from
3165 * the current mode.
3166 */
3167VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode)
3168{
3169 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3170 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3171
3172 /*
3173 * Calc the shadow mode and switcher.
3174 */
3175 PGMMODE enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3176
3177 /*
3178 * Exit old mode(s).
3179 */
3180 /* shadow */
3181 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3182 {
3183 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3184 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3185 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3186 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3187 {
3188 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3189 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3190 }
3191 }
3192 else
3193 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3194
3195 /* guest */
3196 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3197 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3198 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3199 {
3200 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3201 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3202 }
3203 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3204
3205 /*
3206 * Change the paging mode data indexes.
3207 */
3208 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3209 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3210 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3211 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3212 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3213 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPDE, VERR_PGM_MODE_IPE);
3214 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3215 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3216#ifdef IN_RING3
3217 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3218#endif
3219
3220 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3221 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3222 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3223 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3224 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3225 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3226 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3227#ifdef IN_RING3
3228 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3229#endif
3230
3231 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3232 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3233 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3234 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3235 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3236 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3237 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3238 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3239 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3240 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3241#ifdef VBOX_STRICT
3242 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3243#endif
3244
3245 /*
3246 * Enter new shadow mode (if changed).
3247 */
3248 if (enmShadowMode != pVCpu->pgm.s.enmShadowMode)
3249 {
3250 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3251 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu, enmGuestMode >= PGMMODE_AMD64);
3252 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3253 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3254 }
3255
3256 /*
3257 * Always flag the necessary updates
3258 */
3259 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3260
3261 /*
3262 * Enter the new guest and shadow+guest modes.
3263 */
3264 /* Calc the new CR3 value. */
3265 RTGCPHYS GCPhysCR3;
3266 switch (enmGuestMode)
3267 {
3268 case PGMMODE_REAL:
3269 case PGMMODE_PROTECTED:
3270 GCPhysCR3 = NIL_RTGCPHYS;
3271 break;
3272
3273 case PGMMODE_32_BIT:
3274 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3275 break;
3276
3277 case PGMMODE_PAE_NX:
3278 case PGMMODE_PAE:
3279 if (!pVM->cpum.ro.GuestFeatures.fPae)
3280#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3281 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3282 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3283#else
3284 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3285
3286#endif
3287 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3288 break;
3289
3290#ifdef VBOX_WITH_64_BITS_GUESTS
3291 case PGMMODE_AMD64_NX:
3292 case PGMMODE_AMD64:
3293 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3294 break;
3295#endif
3296 default:
3297 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3298 }
3299
3300 /* Enter the new guest mode. */
3301 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3302 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3303 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3304
3305 /* Set the new guest CR3. */
3306 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3307
3308 /* status codes. */
3309 AssertRC(rc);
3310 AssertRC(rc2);
3311 if (RT_SUCCESS(rc))
3312 {
3313 rc = rc2;
3314 if (RT_SUCCESS(rc)) /* no informational status codes. */
3315 rc = VINF_SUCCESS;
3316 }
3317
3318 /*
3319 * Notify HM.
3320 */
3321 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3322 return rc;
3323}
3324
3325
3326/**
3327 * Called by CPUM or REM when CR0.WP changes to 1.
3328 *
3329 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3330 * @thread EMT
3331 */
3332VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3333{
3334 /*
3335 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3336 *
3337 * Use the counter to judge whether there might be pool pages with active
3338 * hacks in them. If there are, we will be running the risk of messing up
3339 * the guest by allowing it to write to read-only pages. Thus, we have to
3340 * clear the page pool ASAP if there is the slightest chance.
3341 */
3342 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3343 {
3344 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3345
3346 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3347 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3348 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3349 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3350 }
3351}
3352
3353
3354/**
3355 * Gets the current guest paging mode.
3356 *
3357 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3358 *
3359 * @returns The current paging mode.
3360 * @param pVCpu The cross context virtual CPU structure.
3361 */
3362VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3363{
3364 return pVCpu->pgm.s.enmGuestMode;
3365}
3366
3367
3368/**
3369 * Gets the current shadow paging mode.
3370 *
3371 * @returns The current paging mode.
3372 * @param pVCpu The cross context virtual CPU structure.
3373 */
3374VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3375{
3376 return pVCpu->pgm.s.enmShadowMode;
3377}
3378
3379
3380/**
3381 * Gets the current host paging mode.
3382 *
3383 * @returns The current paging mode.
3384 * @param pVM The cross context VM structure.
3385 */
3386VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3387{
3388 switch (pVM->pgm.s.enmHostMode)
3389 {
3390 case SUPPAGINGMODE_32_BIT:
3391 case SUPPAGINGMODE_32_BIT_GLOBAL:
3392 return PGMMODE_32_BIT;
3393
3394 case SUPPAGINGMODE_PAE:
3395 case SUPPAGINGMODE_PAE_GLOBAL:
3396 return PGMMODE_PAE;
3397
3398 case SUPPAGINGMODE_PAE_NX:
3399 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3400 return PGMMODE_PAE_NX;
3401
3402 case SUPPAGINGMODE_AMD64:
3403 case SUPPAGINGMODE_AMD64_GLOBAL:
3404 return PGMMODE_AMD64;
3405
3406 case SUPPAGINGMODE_AMD64_NX:
3407 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3408 return PGMMODE_AMD64_NX;
3409
3410 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3411 }
3412
3413 return PGMMODE_INVALID;
3414}
3415
3416
3417/**
3418 * Get mode name.
3419 *
3420 * @returns read-only name string.
3421 * @param enmMode The mode which name is desired.
3422 */
3423VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3424{
3425 switch (enmMode)
3426 {
3427 case PGMMODE_REAL: return "Real";
3428 case PGMMODE_PROTECTED: return "Protected";
3429 case PGMMODE_32_BIT: return "32-bit";
3430 case PGMMODE_PAE: return "PAE";
3431 case PGMMODE_PAE_NX: return "PAE+NX";
3432 case PGMMODE_AMD64: return "AMD64";
3433 case PGMMODE_AMD64_NX: return "AMD64+NX";
3434 case PGMMODE_NESTED_32BIT: return "Nested-32";
3435 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3436 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3437 case PGMMODE_EPT: return "EPT";
3438 case PGMMODE_NONE: return "None";
3439 default: return "unknown mode value";
3440 }
3441}
3442
3443
3444/**
3445 * Gets the physical address represented in the guest CR3 as PGM sees it.
3446 *
3447 * This is mainly for logging and debugging.
3448 *
3449 * @returns PGM's guest CR3 value.
3450 * @param pVCpu The cross context virtual CPU structure.
3451 */
3452VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3453{
3454 return pVCpu->pgm.s.GCPhysCR3;
3455}
3456
3457
3458
3459/**
3460 * Notification from CPUM that the EFER.NXE bit has changed.
3461 *
3462 * @param pVCpu The cross context virtual CPU structure of the CPU for
3463 * which EFER changed.
3464 * @param fNxe The new NXE state.
3465 */
3466VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3467{
3468/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3469 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3470
3471 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3472 if (fNxe)
3473 {
3474 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3475 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3476 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3477 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3478 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3479 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3480 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3481 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3482 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3483 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3484 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3485
3486 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3487 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3488 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3489 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3490 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3491 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3492 }
3493 else
3494 {
3495 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3496 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3497 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3498 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3499 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3500 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3501 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3502 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3503 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3504 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3505 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3506
3507 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3508 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3509 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3510 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3511 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3512 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3513 }
3514}
3515
3516
3517/**
3518 * Check if any pgm pool pages are marked dirty (not monitored)
3519 *
3520 * @returns bool locked/not locked
3521 * @param pVM The cross context VM structure.
3522 */
3523VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3524{
3525 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3526}
3527
3528
3529/**
3530 * Check if this VCPU currently owns the PGM lock.
3531 *
3532 * @returns bool owner/not owner
3533 * @param pVM The cross context VM structure.
3534 */
3535VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3536{
3537 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3538}
3539
3540
3541/**
3542 * Enable or disable large page usage
3543 *
3544 * @returns VBox status code.
3545 * @param pVM The cross context VM structure.
3546 * @param fUseLargePages Use/not use large pages
3547 */
3548VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3549{
3550 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3551
3552 pVM->pgm.s.fUseLargePages = fUseLargePages;
3553 return VINF_SUCCESS;
3554}
3555
3556
3557/**
3558 * Acquire the PGM lock.
3559 *
3560 * @returns VBox status code
3561 * @param pVM The cross context VM structure.
3562 * @param fVoid Set if the caller cannot handle failure returns.
3563 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3564 */
3565#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3566int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3567#else
3568int pgmLock(PVMCC pVM, bool fVoid)
3569#endif
3570{
3571#if defined(VBOX_STRICT)
3572 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3573#else
3574 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3575#endif
3576 if (RT_SUCCESS(rc))
3577 return rc;
3578 if (fVoid)
3579 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3580 else
3581 AssertRC(rc);
3582 return rc;
3583}
3584
3585
3586/**
3587 * Release the PGM lock.
3588 *
3589 * @returns VBox status code
3590 * @param pVM The cross context VM structure.
3591 */
3592void pgmUnlock(PVMCC pVM)
3593{
3594 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3595 pVM->pgm.s.cDeprecatedPageLocks = 0;
3596 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3597 if (rc == VINF_SEM_NESTED)
3598 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3599}
3600
3601
3602#if !defined(IN_R0) || defined(LOG_ENABLED)
3603
3604/** Format handler for PGMPAGE.
3605 * @copydoc FNRTSTRFORMATTYPE */
3606static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3607 const char *pszType, void const *pvValue,
3608 int cchWidth, int cchPrecision, unsigned fFlags,
3609 void *pvUser)
3610{
3611 size_t cch;
3612 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3613 if (RT_VALID_PTR(pPage))
3614 {
3615 char szTmp[64+80];
3616
3617 cch = 0;
3618
3619 /* The single char state stuff. */
3620 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3621 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3622
3623# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3624 if (IS_PART_INCLUDED(5))
3625 {
3626 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
3627 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
3628 }
3629
3630 /* The type. */
3631 if (IS_PART_INCLUDED(4))
3632 {
3633 szTmp[cch++] = ':';
3634 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3635 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3636 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3637 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3638 }
3639
3640 /* The numbers. */
3641 if (IS_PART_INCLUDED(3))
3642 {
3643 szTmp[cch++] = ':';
3644 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3645 }
3646
3647 if (IS_PART_INCLUDED(2))
3648 {
3649 szTmp[cch++] = ':';
3650 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3651 }
3652
3653 if (IS_PART_INCLUDED(6))
3654 {
3655 szTmp[cch++] = ':';
3656 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3657 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3658 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3659 }
3660# undef IS_PART_INCLUDED
3661
3662 cch = pfnOutput(pvArgOutput, szTmp, cch);
3663 }
3664 else
3665 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3666 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3667 return cch;
3668}
3669
3670
3671/** Format handler for PGMRAMRANGE.
3672 * @copydoc FNRTSTRFORMATTYPE */
3673static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3674 const char *pszType, void const *pvValue,
3675 int cchWidth, int cchPrecision, unsigned fFlags,
3676 void *pvUser)
3677{
3678 size_t cch;
3679 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3680 if (RT_VALID_PTR(pRam))
3681 {
3682 char szTmp[80];
3683 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3684 cch = pfnOutput(pvArgOutput, szTmp, cch);
3685 }
3686 else
3687 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3688 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3689 return cch;
3690}
3691
3692/** Format type andlers to be registered/deregistered. */
3693static const struct
3694{
3695 char szType[24];
3696 PFNRTSTRFORMATTYPE pfnHandler;
3697} g_aPgmFormatTypes[] =
3698{
3699 { "pgmpage", pgmFormatTypeHandlerPage },
3700 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3701};
3702
3703#endif /* !IN_R0 || LOG_ENABLED */
3704
3705/**
3706 * Registers the global string format types.
3707 *
3708 * This should be called at module load time or in some other manner that ensure
3709 * that it's called exactly one time.
3710 *
3711 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3712 */
3713VMMDECL(int) PGMRegisterStringFormatTypes(void)
3714{
3715#if !defined(IN_R0) || defined(LOG_ENABLED)
3716 int rc = VINF_SUCCESS;
3717 unsigned i;
3718 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3719 {
3720 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3721# ifdef IN_RING0
3722 if (rc == VERR_ALREADY_EXISTS)
3723 {
3724 /* in case of cleanup failure in ring-0 */
3725 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3726 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
3727 }
3728# endif
3729 }
3730 if (RT_FAILURE(rc))
3731 while (i-- > 0)
3732 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3733
3734 return rc;
3735#else
3736 return VINF_SUCCESS;
3737#endif
3738}
3739
3740
3741/**
3742 * Deregisters the global string format types.
3743 *
3744 * This should be called at module unload time or in some other manner that
3745 * ensure that it's called exactly one time.
3746 */
3747VMMDECL(void) PGMDeregisterStringFormatTypes(void)
3748{
3749#if !defined(IN_R0) || defined(LOG_ENABLED)
3750 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
3751 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
3752#endif
3753}
3754
3755#ifdef VBOX_STRICT
3756
3757# ifndef PGM_WITHOUT_MAPPINGS
3758/**
3759 * Asserts that there are no mapping conflicts.
3760 *
3761 * @returns Number of conflicts.
3762 * @param pVM The cross context VM structure.
3763 */
3764VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
3765{
3766 unsigned cErrors = 0;
3767
3768 /* Only applies to raw mode -> 1 VPCU */
3769 Assert(pVM->cCpus == 1);
3770 PVMCPU pVCpu = &VMCC_GET_CPU_0(pVM);
3771
3772 /*
3773 * Check for mapping conflicts.
3774 */
3775 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3776 pMapping;
3777 pMapping = pMapping->CTX_SUFF(pNext))
3778 {
3779 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
3780 for (RTGCPTR GCPtr = pMapping->GCPtr; GCPtr <= pMapping->GCPtrLast; GCPtr += PAGE_SIZE)
3781 {
3782 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
3783 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
3784 {
3785 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
3786 cErrors++;
3787 break;
3788 }
3789 }
3790 }
3791
3792 return cErrors;
3793}
3794# endif /* !PGM_WITHOUT_MAPPINGS */
3795
3796
3797/**
3798 * Asserts that everything related to the guest CR3 is correctly shadowed.
3799 *
3800 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3801 * and assert the correctness of the guest CR3 mapping before asserting that the
3802 * shadow page tables is in sync with the guest page tables.
3803 *
3804 * @returns Number of conflicts.
3805 * @param pVM The cross context VM structure.
3806 * @param pVCpu The cross context virtual CPU structure.
3807 * @param cr3 The current guest CR3 register value.
3808 * @param cr4 The current guest CR4 register value.
3809 */
3810VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3811{
3812 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3813
3814 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3815 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3816 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3817
3818 PGM_LOCK_VOID(pVM);
3819 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3820 PGM_UNLOCK(pVM);
3821
3822 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3823 return cErrors;
3824}
3825
3826#endif /* VBOX_STRICT */
3827
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette