VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 103884

Last change on this file since 103884 was 103417, checked in by vboxsync, 10 months ago

Devices/Graphics,VMM: Fix some unused function warnings, bugref:3409

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 152.8 KB
Line 
1/* $Id: PGMAll.cpp 103417 2024-02-19 08:44:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/pgm.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/selm.h>
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/iom.h>
39#include <VBox/sup.h>
40#include <VBox/vmm/mm.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/trpm.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49#include <iprt/assert.h>
50#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
51# include <iprt/asm-amd64-x86.h>
52#endif
53#include <iprt/string.h>
54#include <VBox/log.h>
55#include <VBox/param.h>
56#include <VBox/err.h>
57
58
59/*********************************************************************************************************************************
60* Internal Functions *
61*********************************************************************************************************************************/
62DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
63DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
64DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
65#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
66static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
67 PPGMPTWALKGST pGstWalk);
68static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
69static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
70 PPGMPTWALKGST pGstWalkAll);
71#endif
72static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
73static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
74#ifdef PGM_WITH_PAGE_ZEROING_DETECTION
75static bool pgmHandlePageZeroingCode(PVMCPUCC pVCpu, PCPUMCTX pCtx);
76#endif
77
78
79/*
80 * Second level transation - EPT.
81 */
82#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
83# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
84# include "PGMSlatDefs.h"
85# include "PGMAllGstSlatEpt.cpp.h"
86# undef PGM_SLAT_TYPE
87#endif
88
89
90/*
91 * Shadow - 32-bit mode
92 */
93#define PGM_SHW_TYPE PGM_TYPE_32BIT
94#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
95#include "PGMAllShw.h"
96
97/* Guest - real mode */
98#define PGM_GST_TYPE PGM_TYPE_REAL
99#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
100#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
101#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
102#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
103#include "PGMGstDefs.h"
104#include "PGMAllGst.h"
105#include "PGMAllBth.h"
106#undef BTH_PGMPOOLKIND_PT_FOR_PT
107#undef BTH_PGMPOOLKIND_ROOT
108#undef PGM_BTH_NAME
109#undef PGM_GST_TYPE
110#undef PGM_GST_NAME
111
112/* Guest - protected mode */
113#define PGM_GST_TYPE PGM_TYPE_PROT
114#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
115#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
116#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
117#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
118#include "PGMGstDefs.h"
119#include "PGMAllGst.h"
120#include "PGMAllBth.h"
121#undef BTH_PGMPOOLKIND_PT_FOR_PT
122#undef BTH_PGMPOOLKIND_ROOT
123#undef PGM_BTH_NAME
124#undef PGM_GST_TYPE
125#undef PGM_GST_NAME
126
127/* Guest - 32-bit mode */
128#define PGM_GST_TYPE PGM_TYPE_32BIT
129#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
130#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
131#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
132#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
133#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
134#include "PGMGstDefs.h"
135#include "PGMAllGst.h"
136#include "PGMAllBth.h"
137#undef BTH_PGMPOOLKIND_PT_FOR_BIG
138#undef BTH_PGMPOOLKIND_PT_FOR_PT
139#undef BTH_PGMPOOLKIND_ROOT
140#undef PGM_BTH_NAME
141#undef PGM_GST_TYPE
142#undef PGM_GST_NAME
143
144#undef PGM_SHW_TYPE
145#undef PGM_SHW_NAME
146
147
148/*
149 * Shadow - PAE mode
150 */
151#define PGM_SHW_TYPE PGM_TYPE_PAE
152#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
153#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
154#include "PGMAllShw.h"
155
156/* Guest - real mode */
157#define PGM_GST_TYPE PGM_TYPE_REAL
158#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - protected mode */
171#define PGM_GST_TYPE PGM_TYPE_PROT
172#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
176#include "PGMGstDefs.h"
177#include "PGMAllBth.h"
178#undef BTH_PGMPOOLKIND_PT_FOR_PT
179#undef BTH_PGMPOOLKIND_ROOT
180#undef PGM_BTH_NAME
181#undef PGM_GST_TYPE
182#undef PGM_GST_NAME
183
184/* Guest - 32-bit mode */
185#define PGM_GST_TYPE PGM_TYPE_32BIT
186#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
187#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
188#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
189#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
190#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
191#include "PGMGstDefs.h"
192#include "PGMAllBth.h"
193#undef BTH_PGMPOOLKIND_PT_FOR_BIG
194#undef BTH_PGMPOOLKIND_PT_FOR_PT
195#undef BTH_PGMPOOLKIND_ROOT
196#undef PGM_BTH_NAME
197#undef PGM_GST_TYPE
198#undef PGM_GST_NAME
199
200
201/* Guest - PAE mode */
202#define PGM_GST_TYPE PGM_TYPE_PAE
203#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
204#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
205#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
206#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
207#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
208#include "PGMGstDefs.h"
209#include "PGMAllGst.h"
210#include "PGMAllBth.h"
211#undef BTH_PGMPOOLKIND_PT_FOR_BIG
212#undef BTH_PGMPOOLKIND_PT_FOR_PT
213#undef BTH_PGMPOOLKIND_ROOT
214#undef PGM_BTH_NAME
215#undef PGM_GST_TYPE
216#undef PGM_GST_NAME
217
218#undef PGM_SHW_TYPE
219#undef PGM_SHW_NAME
220
221
222/*
223 * Shadow - AMD64 mode
224 */
225#define PGM_SHW_TYPE PGM_TYPE_AMD64
226#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
227#include "PGMAllShw.h"
228
229/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
230/** @todo retire this hack. */
231#define PGM_GST_TYPE PGM_TYPE_PROT
232#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
233#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
234#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
235#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
236#include "PGMGstDefs.h"
237#include "PGMAllBth.h"
238#undef BTH_PGMPOOLKIND_PT_FOR_PT
239#undef BTH_PGMPOOLKIND_ROOT
240#undef PGM_BTH_NAME
241#undef PGM_GST_TYPE
242#undef PGM_GST_NAME
243
244#ifdef VBOX_WITH_64_BITS_GUESTS
245/* Guest - AMD64 mode */
246# define PGM_GST_TYPE PGM_TYPE_AMD64
247# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
248# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
249# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
250# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
251# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
252# include "PGMGstDefs.h"
253# include "PGMAllGst.h"
254# include "PGMAllBth.h"
255# undef BTH_PGMPOOLKIND_PT_FOR_BIG
256# undef BTH_PGMPOOLKIND_PT_FOR_PT
257# undef BTH_PGMPOOLKIND_ROOT
258# undef PGM_BTH_NAME
259# undef PGM_GST_TYPE
260# undef PGM_GST_NAME
261#endif /* VBOX_WITH_64_BITS_GUESTS */
262
263#undef PGM_SHW_TYPE
264#undef PGM_SHW_NAME
265
266
267/*
268 * Shadow - 32-bit nested paging mode.
269 */
270#define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
271#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
272#include "PGMAllShw.h"
273
274/* Guest - real mode */
275#define PGM_GST_TYPE PGM_TYPE_REAL
276#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
277#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
278#include "PGMGstDefs.h"
279#include "PGMAllBth.h"
280#undef PGM_BTH_NAME
281#undef PGM_GST_TYPE
282#undef PGM_GST_NAME
283
284/* Guest - protected mode */
285#define PGM_GST_TYPE PGM_TYPE_PROT
286#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
287#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
288#include "PGMGstDefs.h"
289#include "PGMAllBth.h"
290#undef PGM_BTH_NAME
291#undef PGM_GST_TYPE
292#undef PGM_GST_NAME
293
294/* Guest - 32-bit mode */
295#define PGM_GST_TYPE PGM_TYPE_32BIT
296#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
297#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
298#include "PGMGstDefs.h"
299#include "PGMAllBth.h"
300#undef PGM_BTH_NAME
301#undef PGM_GST_TYPE
302#undef PGM_GST_NAME
303
304/* Guest - PAE mode */
305#define PGM_GST_TYPE PGM_TYPE_PAE
306#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
307#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
308#include "PGMGstDefs.h"
309#include "PGMAllBth.h"
310#undef PGM_BTH_NAME
311#undef PGM_GST_TYPE
312#undef PGM_GST_NAME
313
314#ifdef VBOX_WITH_64_BITS_GUESTS
315/* Guest - AMD64 mode */
316# define PGM_GST_TYPE PGM_TYPE_AMD64
317# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
318# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
319# include "PGMGstDefs.h"
320# include "PGMAllBth.h"
321# undef PGM_BTH_NAME
322# undef PGM_GST_TYPE
323# undef PGM_GST_NAME
324#endif /* VBOX_WITH_64_BITS_GUESTS */
325
326#undef PGM_SHW_TYPE
327#undef PGM_SHW_NAME
328
329
330/*
331 * Shadow - PAE nested paging mode.
332 */
333#define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
334#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
335#include "PGMAllShw.h"
336
337/* Guest - real mode */
338#define PGM_GST_TYPE PGM_TYPE_REAL
339#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
340#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
341#include "PGMGstDefs.h"
342#include "PGMAllBth.h"
343#undef PGM_BTH_NAME
344#undef PGM_GST_TYPE
345#undef PGM_GST_NAME
346
347/* Guest - protected mode */
348#define PGM_GST_TYPE PGM_TYPE_PROT
349#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
350#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
351#include "PGMGstDefs.h"
352#include "PGMAllBth.h"
353#undef PGM_BTH_NAME
354#undef PGM_GST_TYPE
355#undef PGM_GST_NAME
356
357/* Guest - 32-bit mode */
358#define PGM_GST_TYPE PGM_TYPE_32BIT
359#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
360#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
361#include "PGMGstDefs.h"
362#include "PGMAllBth.h"
363#undef PGM_BTH_NAME
364#undef PGM_GST_TYPE
365#undef PGM_GST_NAME
366
367/* Guest - PAE mode */
368#define PGM_GST_TYPE PGM_TYPE_PAE
369#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
370#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
371#include "PGMGstDefs.h"
372#include "PGMAllBth.h"
373#undef PGM_BTH_NAME
374#undef PGM_GST_TYPE
375#undef PGM_GST_NAME
376
377#ifdef VBOX_WITH_64_BITS_GUESTS
378/* Guest - AMD64 mode */
379# define PGM_GST_TYPE PGM_TYPE_AMD64
380# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
381# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
382# include "PGMGstDefs.h"
383# include "PGMAllBth.h"
384# undef PGM_BTH_NAME
385# undef PGM_GST_TYPE
386# undef PGM_GST_NAME
387#endif /* VBOX_WITH_64_BITS_GUESTS */
388
389#undef PGM_SHW_TYPE
390#undef PGM_SHW_NAME
391
392
393/*
394 * Shadow - AMD64 nested paging mode.
395 */
396#define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
397#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
398#include "PGMAllShw.h"
399
400/* Guest - real mode */
401#define PGM_GST_TYPE PGM_TYPE_REAL
402#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
403#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
404#include "PGMGstDefs.h"
405#include "PGMAllBth.h"
406#undef PGM_BTH_NAME
407#undef PGM_GST_TYPE
408#undef PGM_GST_NAME
409
410/* Guest - protected mode */
411#define PGM_GST_TYPE PGM_TYPE_PROT
412#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
413#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
414#include "PGMGstDefs.h"
415#include "PGMAllBth.h"
416#undef PGM_BTH_NAME
417#undef PGM_GST_TYPE
418#undef PGM_GST_NAME
419
420/* Guest - 32-bit mode */
421#define PGM_GST_TYPE PGM_TYPE_32BIT
422#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
423#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
424#include "PGMGstDefs.h"
425#include "PGMAllBth.h"
426#undef PGM_BTH_NAME
427#undef PGM_GST_TYPE
428#undef PGM_GST_NAME
429
430/* Guest - PAE mode */
431#define PGM_GST_TYPE PGM_TYPE_PAE
432#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
433#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
434#include "PGMGstDefs.h"
435#include "PGMAllBth.h"
436#undef PGM_BTH_NAME
437#undef PGM_GST_TYPE
438#undef PGM_GST_NAME
439
440#ifdef VBOX_WITH_64_BITS_GUESTS
441/* Guest - AMD64 mode */
442# define PGM_GST_TYPE PGM_TYPE_AMD64
443# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
444# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
445# include "PGMGstDefs.h"
446# include "PGMAllBth.h"
447# undef PGM_BTH_NAME
448# undef PGM_GST_TYPE
449# undef PGM_GST_NAME
450#endif /* VBOX_WITH_64_BITS_GUESTS */
451
452#undef PGM_SHW_TYPE
453#undef PGM_SHW_NAME
454
455
456/*
457 * Shadow - EPT.
458 */
459#define PGM_SHW_TYPE PGM_TYPE_EPT
460#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
461#include "PGMAllShw.h"
462
463/* Guest - real mode */
464#define PGM_GST_TYPE PGM_TYPE_REAL
465#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
466#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
467#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
468#include "PGMGstDefs.h"
469#include "PGMAllBth.h"
470#undef BTH_PGMPOOLKIND_PT_FOR_PT
471#undef PGM_BTH_NAME
472#undef PGM_GST_TYPE
473#undef PGM_GST_NAME
474
475/* Guest - protected mode */
476#define PGM_GST_TYPE PGM_TYPE_PROT
477#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
478#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
479#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
480#include "PGMGstDefs.h"
481#include "PGMAllBth.h"
482#undef BTH_PGMPOOLKIND_PT_FOR_PT
483#undef PGM_BTH_NAME
484#undef PGM_GST_TYPE
485#undef PGM_GST_NAME
486
487/* Guest - 32-bit mode */
488#define PGM_GST_TYPE PGM_TYPE_32BIT
489#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
490#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
491#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
492#include "PGMGstDefs.h"
493#include "PGMAllBth.h"
494#undef BTH_PGMPOOLKIND_PT_FOR_PT
495#undef PGM_BTH_NAME
496#undef PGM_GST_TYPE
497#undef PGM_GST_NAME
498
499/* Guest - PAE mode */
500#define PGM_GST_TYPE PGM_TYPE_PAE
501#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
502#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
503#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
504#include "PGMGstDefs.h"
505#include "PGMAllBth.h"
506#undef BTH_PGMPOOLKIND_PT_FOR_PT
507#undef PGM_BTH_NAME
508#undef PGM_GST_TYPE
509#undef PGM_GST_NAME
510
511#ifdef VBOX_WITH_64_BITS_GUESTS
512/* Guest - AMD64 mode */
513# define PGM_GST_TYPE PGM_TYPE_AMD64
514# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
515# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
516# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
517# include "PGMGstDefs.h"
518# include "PGMAllBth.h"
519# undef BTH_PGMPOOLKIND_PT_FOR_PT
520# undef PGM_BTH_NAME
521# undef PGM_GST_TYPE
522# undef PGM_GST_NAME
523#endif /* VBOX_WITH_64_BITS_GUESTS */
524
525#undef PGM_SHW_TYPE
526#undef PGM_SHW_NAME
527
528
529/*
530 * Shadow - NEM / None.
531 */
532#define PGM_SHW_TYPE PGM_TYPE_NONE
533#define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
534#include "PGMAllShw.h"
535
536/* Guest - real mode */
537#define PGM_GST_TYPE PGM_TYPE_REAL
538#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
539#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
540#include "PGMGstDefs.h"
541#include "PGMAllBth.h"
542#undef PGM_BTH_NAME
543#undef PGM_GST_TYPE
544#undef PGM_GST_NAME
545
546/* Guest - protected mode */
547#define PGM_GST_TYPE PGM_TYPE_PROT
548#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
549#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
550#include "PGMGstDefs.h"
551#include "PGMAllBth.h"
552#undef PGM_BTH_NAME
553#undef PGM_GST_TYPE
554#undef PGM_GST_NAME
555
556/* Guest - 32-bit mode */
557#define PGM_GST_TYPE PGM_TYPE_32BIT
558#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
559#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
560#include "PGMGstDefs.h"
561#include "PGMAllBth.h"
562#undef PGM_BTH_NAME
563#undef PGM_GST_TYPE
564#undef PGM_GST_NAME
565
566/* Guest - PAE mode */
567#define PGM_GST_TYPE PGM_TYPE_PAE
568#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
569#define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
570#include "PGMGstDefs.h"
571#include "PGMAllBth.h"
572#undef PGM_BTH_NAME
573#undef PGM_GST_TYPE
574#undef PGM_GST_NAME
575
576#ifdef VBOX_WITH_64_BITS_GUESTS
577/* Guest - AMD64 mode */
578# define PGM_GST_TYPE PGM_TYPE_AMD64
579# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
580# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
581# include "PGMGstDefs.h"
582# include "PGMAllBth.h"
583# undef PGM_BTH_NAME
584# undef PGM_GST_TYPE
585# undef PGM_GST_NAME
586#endif /* VBOX_WITH_64_BITS_GUESTS */
587
588#undef PGM_SHW_TYPE
589#undef PGM_SHW_NAME
590
591
592
593/**
594 * Guest mode data array.
595 */
596PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
597{
598 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
599 {
600 PGM_TYPE_REAL,
601 PGM_GST_NAME_REAL(GetPage),
602 PGM_GST_NAME_REAL(ModifyPage),
603 PGM_GST_NAME_REAL(Enter),
604 PGM_GST_NAME_REAL(Exit),
605#ifdef IN_RING3
606 PGM_GST_NAME_REAL(Relocate),
607#endif
608 },
609 {
610 PGM_TYPE_PROT,
611 PGM_GST_NAME_PROT(GetPage),
612 PGM_GST_NAME_PROT(ModifyPage),
613 PGM_GST_NAME_PROT(Enter),
614 PGM_GST_NAME_PROT(Exit),
615#ifdef IN_RING3
616 PGM_GST_NAME_PROT(Relocate),
617#endif
618 },
619 {
620 PGM_TYPE_32BIT,
621 PGM_GST_NAME_32BIT(GetPage),
622 PGM_GST_NAME_32BIT(ModifyPage),
623 PGM_GST_NAME_32BIT(Enter),
624 PGM_GST_NAME_32BIT(Exit),
625#ifdef IN_RING3
626 PGM_GST_NAME_32BIT(Relocate),
627#endif
628 },
629 {
630 PGM_TYPE_PAE,
631 PGM_GST_NAME_PAE(GetPage),
632 PGM_GST_NAME_PAE(ModifyPage),
633 PGM_GST_NAME_PAE(Enter),
634 PGM_GST_NAME_PAE(Exit),
635#ifdef IN_RING3
636 PGM_GST_NAME_PAE(Relocate),
637#endif
638 },
639#ifdef VBOX_WITH_64_BITS_GUESTS
640 {
641 PGM_TYPE_AMD64,
642 PGM_GST_NAME_AMD64(GetPage),
643 PGM_GST_NAME_AMD64(ModifyPage),
644 PGM_GST_NAME_AMD64(Enter),
645 PGM_GST_NAME_AMD64(Exit),
646# ifdef IN_RING3
647 PGM_GST_NAME_AMD64(Relocate),
648# endif
649 },
650#endif
651};
652
653
654/**
655 * The shadow mode data array.
656 */
657PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
658{
659 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
660 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
661 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
662 {
663 PGM_TYPE_32BIT,
664 PGM_SHW_NAME_32BIT(GetPage),
665 PGM_SHW_NAME_32BIT(ModifyPage),
666 PGM_SHW_NAME_32BIT(Enter),
667 PGM_SHW_NAME_32BIT(Exit),
668#ifdef IN_RING3
669 PGM_SHW_NAME_32BIT(Relocate),
670#endif
671 },
672 {
673 PGM_TYPE_PAE,
674 PGM_SHW_NAME_PAE(GetPage),
675 PGM_SHW_NAME_PAE(ModifyPage),
676 PGM_SHW_NAME_PAE(Enter),
677 PGM_SHW_NAME_PAE(Exit),
678#ifdef IN_RING3
679 PGM_SHW_NAME_PAE(Relocate),
680#endif
681 },
682 {
683 PGM_TYPE_AMD64,
684 PGM_SHW_NAME_AMD64(GetPage),
685 PGM_SHW_NAME_AMD64(ModifyPage),
686 PGM_SHW_NAME_AMD64(Enter),
687 PGM_SHW_NAME_AMD64(Exit),
688#ifdef IN_RING3
689 PGM_SHW_NAME_AMD64(Relocate),
690#endif
691 },
692 {
693 PGM_TYPE_NESTED_32BIT,
694 PGM_SHW_NAME_NESTED_32BIT(GetPage),
695 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
696 PGM_SHW_NAME_NESTED_32BIT(Enter),
697 PGM_SHW_NAME_NESTED_32BIT(Exit),
698#ifdef IN_RING3
699 PGM_SHW_NAME_NESTED_32BIT(Relocate),
700#endif
701 },
702 {
703 PGM_TYPE_NESTED_PAE,
704 PGM_SHW_NAME_NESTED_PAE(GetPage),
705 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
706 PGM_SHW_NAME_NESTED_PAE(Enter),
707 PGM_SHW_NAME_NESTED_PAE(Exit),
708#ifdef IN_RING3
709 PGM_SHW_NAME_NESTED_PAE(Relocate),
710#endif
711 },
712 {
713 PGM_TYPE_NESTED_AMD64,
714 PGM_SHW_NAME_NESTED_AMD64(GetPage),
715 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
716 PGM_SHW_NAME_NESTED_AMD64(Enter),
717 PGM_SHW_NAME_NESTED_AMD64(Exit),
718#ifdef IN_RING3
719 PGM_SHW_NAME_NESTED_AMD64(Relocate),
720#endif
721 },
722 {
723 PGM_TYPE_EPT,
724 PGM_SHW_NAME_EPT(GetPage),
725 PGM_SHW_NAME_EPT(ModifyPage),
726 PGM_SHW_NAME_EPT(Enter),
727 PGM_SHW_NAME_EPT(Exit),
728#ifdef IN_RING3
729 PGM_SHW_NAME_EPT(Relocate),
730#endif
731 },
732 {
733 PGM_TYPE_NONE,
734 PGM_SHW_NAME_NONE(GetPage),
735 PGM_SHW_NAME_NONE(ModifyPage),
736 PGM_SHW_NAME_NONE(Enter),
737 PGM_SHW_NAME_NONE(Exit),
738#ifdef IN_RING3
739 PGM_SHW_NAME_NONE(Relocate),
740#endif
741 },
742};
743
744
745/**
746 * The guest+shadow mode data array.
747 */
748PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
749{
750#if !defined(IN_RING3) && !defined(VBOX_STRICT)
751# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
752# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
753 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
754
755#elif !defined(IN_RING3) && defined(VBOX_STRICT)
756# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
757# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
758 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
759
760#elif defined(IN_RING3) && !defined(VBOX_STRICT)
761# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
762# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
763 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
764
765#elif defined(IN_RING3) && defined(VBOX_STRICT)
766# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
767# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
768 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
769
770#else
771# error "Misconfig."
772#endif
773
774 /* 32-bit shadow paging mode: */
775 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
776 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
777 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
778 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
779 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
780 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
781 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
782 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
783 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
784 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
785 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
786
787 /* PAE shadow paging mode: */
788 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
789 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
790 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
791 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
792 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
793 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
794 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
795 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
796 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
799
800 /* AMD64 shadow paging mode: */
801 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
802 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
803 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
804 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
805 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
806#ifdef VBOX_WITH_64_BITS_GUESTS
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
808#else
809 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
810#endif
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
816
817 /* 32-bit nested paging mode: */
818 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
819 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
820 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
821 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
822 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
823#ifdef VBOX_WITH_64_BITS_GUESTS
824 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
825#else
826 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
827#endif
828 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
833
834 /* PAE nested paging mode: */
835 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
836 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
840#ifdef VBOX_WITH_64_BITS_GUESTS
841 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
842#else
843 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
844#endif
845 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
850
851 /* AMD64 nested paging mode: */
852 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
853 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
857#ifdef VBOX_WITH_64_BITS_GUESTS
858 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
859#else
860 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
861#endif
862 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
867
868 /* EPT nested paging mode: */
869 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
870 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
874#ifdef VBOX_WITH_64_BITS_GUESTS
875 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
876#else
877 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
878#endif
879 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
884
885 /* NONE / NEM: */
886 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
887 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
888 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
890 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
891#ifdef VBOX_WITH_64_BITS_GUESTS
892 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
893#else
894 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
895#endif
896 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
897 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
900 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
901
902
903#undef PGMMODEDATABTH_ENTRY
904#undef PGMMODEDATABTH_NULL_ENTRY
905};
906
907
908/** Mask array used by pgmGetCr3MaskForMode.
909 * X86_CR3_AMD64_PAGE_MASK is used for modes that doesn't have a CR3 or EPTP. */
910static uint64_t const g_auCr3MaskForMode[PGMMODE_MAX] =
911{
912 /* [PGMMODE_INVALID] = */ X86_CR3_AMD64_PAGE_MASK,
913 /* [PGMMODE_REAL] = */ X86_CR3_AMD64_PAGE_MASK,
914 /* [PGMMODE_PROTECTED] = */ X86_CR3_AMD64_PAGE_MASK,
915 /* [PGMMODE_32_BIT] = */ X86_CR3_PAGE_MASK,
916 /* [PGMMODE_PAE] = */ X86_CR3_PAE_PAGE_MASK,
917 /* [PGMMODE_PAE_NX] = */ X86_CR3_PAE_PAGE_MASK,
918 /* [PGMMODE_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
919 /* [PGMMODE_AMD64_NX] = */ X86_CR3_AMD64_PAGE_MASK,
920 /* [PGMMODE_NESTED_32BIT = */ X86_CR3_PAGE_MASK,
921 /* [PGMMODE_NESTED_PAE] = */ X86_CR3_PAE_PAGE_MASK,
922 /* [PGMMODE_NESTED_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
923 /* [PGMMODE_EPT] = */ X86_CR3_EPT_PAGE_MASK,
924 /* [PGMMODE_NONE] = */ X86_CR3_AMD64_PAGE_MASK,
925};
926
927
928/**
929 * Gets the physical address mask for CR3 in the given paging mode.
930 *
931 * The mask is for eliminating flags and other stuff in CR3/EPTP when
932 * extracting the physical address. It is not for validating whether there are
933 * reserved bits set. PGM ASSUMES that whoever loaded the CR3 value and passed
934 * it to PGM checked for reserved bits, including reserved physical address
935 * bits.
936 *
937 * @returns The CR3 mask.
938 * @param enmMode The paging mode.
939 * @param enmSlatMode The second-level address translation mode.
940 */
941DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
942{
943 if (enmSlatMode == PGMSLAT_DIRECT)
944 {
945 Assert(enmMode != PGMMODE_EPT);
946 return g_auCr3MaskForMode[(unsigned)enmMode < (unsigned)PGMMODE_MAX ? enmMode : 0];
947 }
948 Assert(enmSlatMode == PGMSLAT_EPT);
949 return X86_CR3_EPT_PAGE_MASK;
950}
951
952
953/**
954 * Gets the masked CR3 value according to the current guest paging mode.
955 *
956 * See disclaimer in pgmGetCr3MaskForMode.
957 *
958 * @returns The masked PGM CR3 value.
959 * @param pVCpu The cross context virtual CPU structure.
960 * @param uCr3 The raw guest CR3 value.
961 */
962DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
963{
964 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
965 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
966 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
967 return GCPhysCR3;
968}
969
970
971#ifdef IN_RING0
972/**
973 * #PF Handler.
974 *
975 * @returns VBox status code (appropriate for trap handling and GC return).
976 * @param pVCpu The cross context virtual CPU structure.
977 * @param uErr The trap error code.
978 * @param pCtx Pointer to the register context for the CPU.
979 * @param pvFault The fault address.
980 */
981VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPTR pvFault)
982{
983 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
984
985 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
986 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
987 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
988
989
990# ifdef VBOX_WITH_STATISTICS
991 /*
992 * Error code stats.
993 */
994 if (uErr & X86_TRAP_PF_US)
995 {
996 if (!(uErr & X86_TRAP_PF_P))
997 {
998 if (uErr & X86_TRAP_PF_RW)
999 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
1000 else
1001 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1002 }
1003 else if (uErr & X86_TRAP_PF_RW)
1004 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1005 else if (uErr & X86_TRAP_PF_RSVD)
1006 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1007 else if (uErr & X86_TRAP_PF_ID)
1008 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1009 else
1010 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1011 }
1012 else
1013 { /* Supervisor */
1014 if (!(uErr & X86_TRAP_PF_P))
1015 {
1016 if (uErr & X86_TRAP_PF_RW)
1017 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1018 else
1019 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1020 }
1021 else if (uErr & X86_TRAP_PF_RW)
1022 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1023 else if (uErr & X86_TRAP_PF_ID)
1024 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1025 else if (uErr & X86_TRAP_PF_RSVD)
1026 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1027 }
1028# endif /* VBOX_WITH_STATISTICS */
1029
1030 /*
1031 * Call the worker.
1032 */
1033 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1034 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1035 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
1036 bool fLockTaken = false;
1037 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pCtx, pvFault, &fLockTaken);
1038 if (fLockTaken)
1039 {
1040 PGM_LOCK_ASSERT_OWNER(pVM);
1041 PGM_UNLOCK(pVM);
1042 }
1043 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
1044
1045 /*
1046 * Return code tweaks.
1047 */
1048 if (rc != VINF_SUCCESS)
1049 {
1050 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1051 rc = VINF_SUCCESS;
1052
1053 /* Note: hack alert for difficult to reproduce problem. */
1054 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1055 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1056 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1057 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1058 {
1059 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pCtx->rip));
1060 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1061 rc = VINF_SUCCESS;
1062 }
1063 }
1064
1065 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
1066 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1067 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1068 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1069 return rc;
1070}
1071#endif /* IN_RING0 */
1072
1073
1074/**
1075 * Prefetch a page
1076 *
1077 * Typically used to sync commonly used pages before entering raw mode
1078 * after a CR3 reload.
1079 *
1080 * @returns VBox status code suitable for scheduling.
1081 * @retval VINF_SUCCESS on success.
1082 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1083 * @param pVCpu The cross context virtual CPU structure.
1084 * @param GCPtrPage Page to invalidate.
1085 */
1086VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1087{
1088 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1089
1090 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1091 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1092 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1093 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1094
1095 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1096 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1097 return rc;
1098}
1099
1100
1101/**
1102 * Emulation of the invlpg instruction (HC only actually).
1103 *
1104 * @returns Strict VBox status code, special care required.
1105 * @retval VINF_PGM_SYNC_CR3 - handled.
1106 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1107 *
1108 * @param pVCpu The cross context virtual CPU structure.
1109 * @param GCPtrPage Page to invalidate.
1110 *
1111 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1112 * safe, but there could be edge cases!
1113 *
1114 * @todo Flush page or page directory only if necessary!
1115 * @todo VBOXSTRICTRC
1116 */
1117VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1118{
1119 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1120 int rc;
1121 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1122
1123 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1124
1125 /*
1126 * Call paging mode specific worker.
1127 */
1128 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1129 PGM_LOCK_VOID(pVM);
1130
1131 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1132 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1133 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1134 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1135
1136 PGM_UNLOCK(pVM);
1137 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1138
1139 /* Ignore all irrelevant error codes. */
1140 if ( rc == VERR_PAGE_NOT_PRESENT
1141 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1142 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1143 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1144 rc = VINF_SUCCESS;
1145
1146 return rc;
1147}
1148
1149
1150/**
1151 * Executes an instruction using the interpreter.
1152 *
1153 * @returns VBox status code (appropriate for trap handling and GC return).
1154 * @param pVCpu The cross context virtual CPU structure.
1155 * @param pvFault Fault address.
1156 */
1157VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCPUCC pVCpu, RTGCPTR pvFault)
1158{
1159 RT_NOREF(pvFault);
1160 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu);
1161 if (rc == VERR_EM_INTERPRETER)
1162 rc = VINF_EM_RAW_EMULATE_INSTR;
1163 if (rc != VINF_SUCCESS)
1164 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1165 return rc;
1166}
1167
1168
1169/**
1170 * Gets effective page information (from the VMM page directory).
1171 *
1172 * @returns VBox status code.
1173 * @param pVCpu The cross context virtual CPU structure.
1174 * @param GCPtr Guest Context virtual address of the page.
1175 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1176 * @param pHCPhys Where to store the HC physical address of the page.
1177 * This is page aligned.
1178 * @remark You should use PGMMapGetPage() for pages in a mapping.
1179 */
1180VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1181{
1182 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1183 PGM_LOCK_VOID(pVM);
1184
1185 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1186 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1187 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1188 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1189
1190 PGM_UNLOCK(pVM);
1191 return rc;
1192}
1193
1194
1195/**
1196 * Modify page flags for a range of pages in the shadow context.
1197 *
1198 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1199 *
1200 * @returns VBox status code.
1201 * @param pVCpu The cross context virtual CPU structure.
1202 * @param GCPtr Virtual address of the first page in the range.
1203 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1204 * @param fMask The AND mask - page flags X86_PTE_*.
1205 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1206 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1207 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1208 */
1209DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1210{
1211 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1212 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1213
1214 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1215
1216 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1217 PGM_LOCK_VOID(pVM);
1218
1219 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1220 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1221 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1222 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1223
1224 PGM_UNLOCK(pVM);
1225 return rc;
1226}
1227
1228
1229/**
1230 * Changing the page flags for a single page in the shadow page tables so as to
1231 * make it read-only.
1232 *
1233 * @returns VBox status code.
1234 * @param pVCpu The cross context virtual CPU structure.
1235 * @param GCPtr Virtual address of the first page in the range.
1236 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1237 */
1238VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1239{
1240 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1241}
1242
1243
1244/**
1245 * Changing the page flags for a single page in the shadow page tables so as to
1246 * make it writable.
1247 *
1248 * The call must know with 101% certainty that the guest page tables maps this
1249 * as writable too. This function will deal shared, zero and write monitored
1250 * pages.
1251 *
1252 * @returns VBox status code.
1253 * @param pVCpu The cross context virtual CPU structure.
1254 * @param GCPtr Virtual address of the first page in the range.
1255 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1256 */
1257VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1258{
1259 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1260 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1261 return VINF_SUCCESS;
1262}
1263
1264
1265/**
1266 * Changing the page flags for a single page in the shadow page tables so as to
1267 * make it not present.
1268 *
1269 * @returns VBox status code.
1270 * @param pVCpu The cross context virtual CPU structure.
1271 * @param GCPtr Virtual address of the first page in the range.
1272 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1273 */
1274VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1275{
1276 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1277}
1278
1279
1280/**
1281 * Changing the page flags for a single page in the shadow page tables so as to
1282 * make it supervisor and writable.
1283 *
1284 * This if for dealing with CR0.WP=0 and readonly user pages.
1285 *
1286 * @returns VBox status code.
1287 * @param pVCpu The cross context virtual CPU structure.
1288 * @param GCPtr Virtual address of the first page in the range.
1289 * @param fBigPage Whether or not this is a big page. If it is, we have to
1290 * change the shadow PDE as well. If it isn't, the caller
1291 * has checked that the shadow PDE doesn't need changing.
1292 * We ASSUME 4KB pages backing the big page here!
1293 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1294 */
1295int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1296{
1297 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1298 if (rc == VINF_SUCCESS && fBigPage)
1299 {
1300 /* this is a bit ugly... */
1301 switch (pVCpu->pgm.s.enmShadowMode)
1302 {
1303 case PGMMODE_32_BIT:
1304 {
1305 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1306 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1307 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1308 pPde->u |= X86_PDE_RW;
1309 Log(("-> PDE=%#llx (32)\n", pPde->u));
1310 break;
1311 }
1312 case PGMMODE_PAE:
1313 case PGMMODE_PAE_NX:
1314 {
1315 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1316 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1317 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1318 pPde->u |= X86_PDE_RW;
1319 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1320 break;
1321 }
1322 default:
1323 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1324 }
1325 }
1326 return rc;
1327}
1328
1329
1330/**
1331 * Gets the shadow page directory for the specified address, PAE.
1332 *
1333 * @returns Pointer to the shadow PD.
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param GCPtr The address.
1336 * @param uGstPdpe Guest PDPT entry. Valid.
1337 * @param ppPD Receives address of page directory
1338 */
1339int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1340{
1341 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1342 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1343 PPGMPOOLPAGE pShwPage;
1344 int rc;
1345 PGM_LOCK_ASSERT_OWNER(pVM);
1346
1347
1348 /* Allocate page directory if not present. */
1349 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1350 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1351 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1352 X86PGPAEUINT const uPdpe = pPdpe->u;
1353 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1354 {
1355 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1356 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1357 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1358
1359 pgmPoolCacheUsed(pPool, pShwPage);
1360
1361 /* Update the entry if necessary. */
1362 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1363 if (uPdpeNew == uPdpe)
1364 { /* likely */ }
1365 else
1366 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1367 }
1368 else
1369 {
1370 RTGCPTR64 GCPdPt;
1371 PGMPOOLKIND enmKind;
1372 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1373 {
1374 /* AMD-V nested paging or real/protected mode without paging. */
1375 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1376 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1377 }
1378 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1379 {
1380 if (uGstPdpe & X86_PDPE_P)
1381 {
1382 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1383 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1384 }
1385 else
1386 {
1387 /* PD not present; guest must reload CR3 to change it.
1388 * No need to monitor anything in this case. */
1389 /** @todo r=bird: WTF is hit?!? */
1390 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1391 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1392 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1393 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1394 }
1395 }
1396 else
1397 {
1398 GCPdPt = CPUMGetGuestCR3(pVCpu);
1399 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1400 }
1401
1402 /* Create a reference back to the PDPT by using the index in its shadow page. */
1403 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1404 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1405 &pShwPage);
1406 AssertRCReturn(rc, rc);
1407
1408 /* Hook it up. */
1409 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1410 }
1411 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1412
1413 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1414 return VINF_SUCCESS;
1415}
1416
1417
1418/**
1419 * Gets the pointer to the shadow page directory entry for an address, PAE.
1420 *
1421 * @returns Pointer to the PDE.
1422 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1423 * @param GCPtr The address.
1424 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1425 */
1426DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1427{
1428 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1429 PGM_LOCK_ASSERT_OWNER(pVM);
1430
1431 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1432 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1433 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1434 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1435 if (!(uPdpe & X86_PDPE_P))
1436 {
1437 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1438 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1439 }
1440 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1441
1442 /* Fetch the pgm pool shadow descriptor. */
1443 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1444 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1445
1446 *ppShwPde = pShwPde;
1447 return VINF_SUCCESS;
1448}
1449
1450
1451/**
1452 * Syncs the SHADOW page directory pointer for the specified address.
1453 *
1454 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1455 *
1456 * The caller is responsible for making sure the guest has a valid PD before
1457 * calling this function.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The cross context virtual CPU structure.
1461 * @param GCPtr The address.
1462 * @param uGstPml4e Guest PML4 entry (valid).
1463 * @param uGstPdpe Guest PDPT entry (valid).
1464 * @param ppPD Receives address of page directory
1465 */
1466static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1467{
1468 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1469 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1470 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1471 int rc;
1472
1473 PGM_LOCK_ASSERT_OWNER(pVM);
1474
1475 /*
1476 * PML4.
1477 */
1478 PPGMPOOLPAGE pShwPage;
1479 {
1480 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1481 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1482 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1483 X86PGPAEUINT const uPml4e = pPml4e->u;
1484
1485 /* Allocate page directory pointer table if not present. */
1486 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1487 {
1488 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1489 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1490
1491 pgmPoolCacheUsed(pPool, pShwPage);
1492
1493 /* Update the entry if needed. */
1494 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1495 | (uPml4e & PGM_PML4_FLAGS);
1496 if (uPml4e == uPml4eNew)
1497 { /* likely */ }
1498 else
1499 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1500 }
1501 else
1502 {
1503 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1504
1505 RTGCPTR64 GCPml4;
1506 PGMPOOLKIND enmKind;
1507 if (fNestedPagingOrNoGstPaging)
1508 {
1509 /* AMD-V nested paging or real/protected mode without paging */
1510 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1511 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1512 }
1513 else
1514 {
1515 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1516 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1517 }
1518
1519 /* Create a reference back to the PDPT by using the index in its shadow page. */
1520 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1521 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1522 &pShwPage);
1523 AssertRCReturn(rc, rc);
1524
1525 /* Hook it up. */
1526 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1527 | (uPml4e & PGM_PML4_FLAGS));
1528 }
1529 }
1530
1531 /*
1532 * PDPT.
1533 */
1534 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1535 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1536 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1537 X86PGPAEUINT const uPdpe = pPdpe->u;
1538
1539 /* Allocate page directory if not present. */
1540 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1541 {
1542 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1543 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1544
1545 pgmPoolCacheUsed(pPool, pShwPage);
1546
1547 /* Update the entry if needed. */
1548 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1549 | (uPdpe & PGM_PDPT_FLAGS);
1550 if (uPdpe == uPdpeNew)
1551 { /* likely */ }
1552 else
1553 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1554 }
1555 else
1556 {
1557 RTGCPTR64 GCPdPt;
1558 PGMPOOLKIND enmKind;
1559 if (fNestedPagingOrNoGstPaging)
1560 {
1561 /* AMD-V nested paging or real/protected mode without paging */
1562 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1563 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1564 }
1565 else
1566 {
1567 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1568 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1569 }
1570
1571 /* Create a reference back to the PDPT by using the index in its shadow page. */
1572 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1573 pShwPage->idx, iPdPt, false /*fLockPage*/,
1574 &pShwPage);
1575 AssertRCReturn(rc, rc);
1576
1577 /* Hook it up. */
1578 ASMAtomicWriteU64(&pPdpe->u,
1579 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1580 }
1581
1582 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1583 return VINF_SUCCESS;
1584}
1585
1586
1587/**
1588 * Gets the SHADOW page directory pointer for the specified address (long mode).
1589 *
1590 * @returns VBox status code.
1591 * @param pVCpu The cross context virtual CPU structure.
1592 * @param GCPtr The address.
1593 * @param ppPml4e Receives the address of the page map level 4 entry.
1594 * @param ppPdpt Receives the address of the page directory pointer table.
1595 * @param ppPD Receives the address of the page directory.
1596 */
1597DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1598{
1599 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1600 PGM_LOCK_ASSERT_OWNER(pVM);
1601
1602 /*
1603 * PML4
1604 */
1605 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1606 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1607 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1608 if (ppPml4e)
1609 *ppPml4e = (PX86PML4E)pPml4e;
1610 X86PGPAEUINT const uPml4e = pPml4e->u;
1611 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1612 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1613 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1614
1615 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1616 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1617 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1618
1619 /*
1620 * PDPT
1621 */
1622 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1623 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1624 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1625 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1626 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1627
1628 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1629 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1630
1631 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1632 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1639 * backing pages in case the PDPT or PML4 entry is missing.
1640 *
1641 * @returns VBox status code.
1642 * @param pVCpu The cross context virtual CPU structure.
1643 * @param GCPtr The address.
1644 * @param ppPdpt Receives address of pdpt
1645 * @param ppPD Receives address of page directory
1646 */
1647static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1648{
1649 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1650 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1651 int rc;
1652
1653 Assert(pVM->pgm.s.fNestedPaging);
1654 PGM_LOCK_ASSERT_OWNER(pVM);
1655
1656 /*
1657 * PML4 level.
1658 */
1659 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1660 Assert(pPml4);
1661
1662 /* Allocate page directory pointer table if not present. */
1663 PPGMPOOLPAGE pShwPage;
1664 {
1665 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1666 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1667 EPTPML4E Pml4e;
1668 Pml4e.u = pPml4e->u;
1669 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1670 {
1671 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1672 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1673 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1674 &pShwPage);
1675 AssertRCReturn(rc, rc);
1676
1677 /* Hook up the new PDPT now. */
1678 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1679 }
1680 else
1681 {
1682 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1683 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1684
1685 pgmPoolCacheUsed(pPool, pShwPage);
1686
1687 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1688 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1689 { }
1690 else
1691 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1692 }
1693 }
1694
1695 /*
1696 * PDPT level.
1697 */
1698 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1699 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1700 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1701
1702 if (ppPdpt)
1703 *ppPdpt = pPdpt;
1704
1705 /* Allocate page directory if not present. */
1706 EPTPDPTE Pdpe;
1707 Pdpe.u = pPdpe->u;
1708 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1709 {
1710 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1711 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1712 pShwPage->idx, iPdPt, false /*fLockPage*/,
1713 &pShwPage);
1714 AssertRCReturn(rc, rc);
1715
1716 /* Hook up the new PD now. */
1717 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1718 }
1719 else
1720 {
1721 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1722 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1723
1724 pgmPoolCacheUsed(pPool, pShwPage);
1725
1726 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1727 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1728 { }
1729 else
1730 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1731 }
1732
1733 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1734 return VINF_SUCCESS;
1735}
1736
1737
1738#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1739/**
1740 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
1741 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1742 *
1743 * @returns VBox status code.
1744 * @param pVCpu The cross context virtual CPU structure.
1745 * @param GCPhysNested The nested-guest physical address.
1746 * @param ppPdpt Where to store the PDPT. Optional, can be NULL.
1747 * @param ppPD Where to store the PD. Optional, can be NULL.
1748 * @param pGstWalkAll The guest walk info.
1749 */
1750static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
1751 PPGMPTWALKGST pGstWalkAll)
1752{
1753 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1754 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1755 int rc;
1756
1757 PPGMPOOLPAGE pShwPage;
1758 Assert(pVM->pgm.s.fNestedPaging);
1759 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
1760 PGM_LOCK_ASSERT_OWNER(pVM);
1761
1762 /*
1763 * PML4 level.
1764 */
1765 {
1766 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1767 Assert(pPml4);
1768
1769 /* Allocate page directory pointer table if not present. */
1770 {
1771 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
1772 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1773 PEPTPML4E pPml4e = &pPml4->a[iPml4e];
1774
1775 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1776 {
1777 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
1778 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
1779 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
1780 &pShwPage);
1781 AssertRCReturn(rc, rc);
1782
1783 /* Hook up the new PDPT now. */
1784 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1785 }
1786 else
1787 {
1788 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1789 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1790
1791 pgmPoolCacheUsed(pPool, pShwPage);
1792
1793 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1794 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
1795 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1796 }
1797 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1798 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
1799 }
1800 }
1801
1802 /*
1803 * PDPT level.
1804 */
1805 {
1806 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
1807
1808 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1809 if (ppPdpt)
1810 *ppPdpt = pPdpt;
1811
1812 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
1813 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1814 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte];
1815
1816 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1817 {
1818 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
1819 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1820 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
1821 AssertRCReturn(rc, rc);
1822
1823 /* Hook up the new PD now. */
1824 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1825 }
1826 else
1827 {
1828 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
1829 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1830
1831 pgmPoolCacheUsed(pPool, pShwPage);
1832
1833 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1834 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
1835 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1836 }
1837 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1838 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
1839
1840 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1841 }
1842
1843 return VINF_SUCCESS;
1844}
1845#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1846
1847
1848#ifdef IN_RING0
1849/**
1850 * Synchronizes a range of nested page table entries.
1851 *
1852 * The caller must own the PGM lock.
1853 *
1854 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1855 * @param GCPhys Where to start.
1856 * @param cPages How many pages which entries should be synced.
1857 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1858 * host paging mode for AMD-V).
1859 */
1860int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1861{
1862 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1863
1864/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1865 int rc;
1866 switch (enmShwPagingMode)
1867 {
1868 case PGMMODE_32_BIT:
1869 {
1870 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1871 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1872 break;
1873 }
1874
1875 case PGMMODE_PAE:
1876 case PGMMODE_PAE_NX:
1877 {
1878 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1879 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1880 break;
1881 }
1882
1883 case PGMMODE_AMD64:
1884 case PGMMODE_AMD64_NX:
1885 {
1886 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1887 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1888 break;
1889 }
1890
1891 case PGMMODE_EPT:
1892 {
1893 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1894 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1895 break;
1896 }
1897
1898 default:
1899 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1900 }
1901 return rc;
1902}
1903#endif /* IN_RING0 */
1904
1905
1906/**
1907 * Gets effective Guest OS page information.
1908 *
1909 * When GCPtr is in a big page, the function will return as if it was a normal
1910 * 4KB page. If the need for distinguishing between big and normal page becomes
1911 * necessary at a later point, a PGMGstGetPage() will be created for that
1912 * purpose.
1913 *
1914 * @returns VBox status code.
1915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1916 * @param GCPtr Guest Context virtual address of the page.
1917 * @param pWalk Where to store the page walk information.
1918 */
1919VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
1920{
1921 VMCPU_ASSERT_EMT(pVCpu);
1922 Assert(pWalk);
1923 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1924 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1925 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1926 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
1927}
1928
1929
1930/**
1931 * Maps the guest CR3.
1932 *
1933 * @returns VBox status code.
1934 * @param pVCpu The cross context virtual CPU structure.
1935 * @param GCPhysCr3 The guest CR3 value.
1936 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1937 */
1938DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1939{
1940 /** @todo this needs some reworking wrt. locking? */
1941 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1942 PGM_LOCK_VOID(pVM);
1943 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1944 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1945
1946 RTHCPTR HCPtrGuestCr3;
1947 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1948 PGM_UNLOCK(pVM);
1949
1950 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1951 return rc;
1952}
1953
1954
1955#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1956/**
1957 * Unmaps the guest CR3.
1958 *
1959 * @returns VBox status code.
1960 * @param pVCpu The cross context virtual CPU structure.
1961 */
1962DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1963{
1964 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1965 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1966 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
1967 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1968}
1969#endif
1970
1971
1972/**
1973 * Performs a guest page table walk.
1974 *
1975 * The guest should be in paged protect mode or long mode when making a call to
1976 * this function.
1977 *
1978 * @returns VBox status code.
1979 * @retval VINF_SUCCESS on success.
1980 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1981 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1982 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1983 *
1984 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1985 * @param GCPtr The guest virtual address to walk by.
1986 * @param pWalk Where to return the walk result. This is valid for some
1987 * error codes as well.
1988 * @param pGstWalk The guest mode specific page walk information.
1989 */
1990int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
1991{
1992 VMCPU_ASSERT_EMT(pVCpu);
1993 switch (pVCpu->pgm.s.enmGuestMode)
1994 {
1995 case PGMMODE_32_BIT:
1996 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1997 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
1998
1999 case PGMMODE_PAE:
2000 case PGMMODE_PAE_NX:
2001 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
2002 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
2003
2004 case PGMMODE_AMD64:
2005 case PGMMODE_AMD64_NX:
2006 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2007 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
2008
2009 case PGMMODE_REAL:
2010 case PGMMODE_PROTECTED:
2011 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2012 return VERR_PGM_NOT_USED_IN_MODE;
2013
2014 case PGMMODE_EPT:
2015 case PGMMODE_NESTED_32BIT:
2016 case PGMMODE_NESTED_PAE:
2017 case PGMMODE_NESTED_AMD64:
2018 default:
2019 AssertFailed();
2020 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2021 return VERR_PGM_NOT_USED_IN_MODE;
2022 }
2023}
2024
2025
2026#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2027/**
2028 * Performs a guest second-level address translation (SLAT).
2029 *
2030 * @returns VBox status code.
2031 * @retval VINF_SUCCESS on success.
2032 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2033 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2034 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2035 *
2036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2037 * @param GCPhysNested The nested-guest physical address being translated.
2038 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is the
2039 * cause for this translation.
2040 * @param GCPtrNested The nested-guest virtual address that initiated the
2041 * SLAT. If none, pass 0 (and not NIL_RTGCPTR).
2042 * @param pWalk Where to return the walk result. This is updated for
2043 * all error codes other than
2044 * VERR_PGM_NOT_USED_IN_MODE.
2045 * @param pGstWalk Where to store the second-level paging-mode specific
2046 * walk info.
2047 */
2048static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
2049 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2050{
2051 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
2052 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
2053 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
2054 AssertPtr(pWalk);
2055 AssertPtr(pGstWalk);
2056 switch (pVCpu->pgm.s.enmGuestSlatMode)
2057 {
2058 case PGMSLAT_EPT:
2059 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2060 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
2061
2062 default:
2063 AssertFailed();
2064 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2065 return VERR_PGM_NOT_USED_IN_MODE;
2066 }
2067}
2068#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
2069
2070
2071/**
2072 * Tries to continue the previous walk.
2073 *
2074 * @note Requires the caller to hold the PGM lock from the first
2075 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2076 * we cannot use the pointers.
2077 *
2078 * @returns VBox status code.
2079 * @retval VINF_SUCCESS on success.
2080 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2081 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2082 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2083 *
2084 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2085 * @param GCPtr The guest virtual address to walk by.
2086 * @param pWalk Pointer to the previous walk result and where to return
2087 * the result of this walk. This is valid for some error
2088 * codes as well.
2089 * @param pGstWalk The guest-mode specific walk information.
2090 */
2091int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2092{
2093 /*
2094 * We can only handle successfully walks.
2095 * We also limit ourselves to the next page.
2096 */
2097 if ( pWalk->fSucceeded
2098 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
2099 {
2100 Assert(pWalk->uLevel == 0);
2101 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2102 {
2103 /*
2104 * AMD64
2105 */
2106 if (!pWalk->fGigantPage && !pWalk->fBigPage)
2107 {
2108 /*
2109 * We fall back to full walk if the PDE table changes, if any
2110 * reserved bits are set, or if the effective page access changes.
2111 */
2112 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2113 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2114 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2115 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2116
2117 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
2118 {
2119 if (pGstWalk->u.Amd64.pPte)
2120 {
2121 X86PTEPAE Pte;
2122 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
2123 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2124 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2125 {
2126 pWalk->GCPtr = GCPtr;
2127 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2128 pGstWalk->u.Amd64.Pte.u = Pte.u;
2129 pGstWalk->u.Amd64.pPte++;
2130 return VINF_SUCCESS;
2131 }
2132 }
2133 }
2134 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
2135 {
2136 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2137 if (pGstWalk->u.Amd64.pPde)
2138 {
2139 X86PDEPAE Pde;
2140 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
2141 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
2142 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2143 {
2144 /* Get the new PTE and check out the first entry. */
2145 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2146 &pGstWalk->u.Amd64.pPt);
2147 if (RT_SUCCESS(rc))
2148 {
2149 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
2150 X86PTEPAE Pte;
2151 Pte.u = pGstWalk->u.Amd64.pPte->u;
2152 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2153 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2154 {
2155 pWalk->GCPtr = GCPtr;
2156 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2157 pGstWalk->u.Amd64.Pte.u = Pte.u;
2158 pGstWalk->u.Amd64.Pde.u = Pde.u;
2159 pGstWalk->u.Amd64.pPde++;
2160 return VINF_SUCCESS;
2161 }
2162 }
2163 }
2164 }
2165 }
2166 }
2167 else if (!pWalk->fGigantPage)
2168 {
2169 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2170 {
2171 pWalk->GCPtr = GCPtr;
2172 pWalk->GCPhys += GUEST_PAGE_SIZE;
2173 return VINF_SUCCESS;
2174 }
2175 }
2176 else
2177 {
2178 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2179 {
2180 pWalk->GCPtr = GCPtr;
2181 pWalk->GCPhys += GUEST_PAGE_SIZE;
2182 return VINF_SUCCESS;
2183 }
2184 }
2185 }
2186 }
2187 /* Case we don't handle. Do full walk. */
2188 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2189}
2190
2191
2192/**
2193 * Modify page flags for a range of pages in the guest's tables
2194 *
2195 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2196 *
2197 * @returns VBox status code.
2198 * @param pVCpu The cross context virtual CPU structure.
2199 * @param GCPtr Virtual address of the first page in the range.
2200 * @param cb Size (in bytes) of the range to apply the modification to.
2201 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2202 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2203 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2204 */
2205VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2206{
2207 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2208 VMCPU_ASSERT_EMT(pVCpu);
2209
2210 /*
2211 * Validate input.
2212 */
2213 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2214 Assert(cb);
2215
2216 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2217
2218 /*
2219 * Adjust input.
2220 */
2221 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2222 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2223 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2224
2225 /*
2226 * Call worker.
2227 */
2228 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2229 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2230 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2231 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2232
2233 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2234 return rc;
2235}
2236
2237
2238/**
2239 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2240 *
2241 * @returns @c true if the PDPE is valid, @c false otherwise.
2242 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2243 * @param paPaePdpes The PAE PDPEs to validate.
2244 *
2245 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2246 */
2247VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2248{
2249 Assert(paPaePdpes);
2250 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2251 {
2252 X86PDPE const PaePdpe = paPaePdpes[i];
2253 if ( !(PaePdpe.u & X86_PDPE_P)
2254 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2255 { /* likely */ }
2256 else
2257 return false;
2258 }
2259 return true;
2260}
2261
2262
2263/**
2264 * Performs the lazy mapping of the 32-bit guest PD.
2265 *
2266 * @returns VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2268 * @param ppPd Where to return the pointer to the mapping. This is
2269 * always set.
2270 */
2271int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2272{
2273 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2274 PGM_LOCK_VOID(pVM);
2275
2276 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2277
2278 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2279 PPGMPAGE pPage;
2280 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2281 if (RT_SUCCESS(rc))
2282 {
2283 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2284 if (RT_SUCCESS(rc))
2285 {
2286# ifdef IN_RING3
2287 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2288 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2289# else
2290 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2291 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2292# endif
2293 PGM_UNLOCK(pVM);
2294 return VINF_SUCCESS;
2295 }
2296 AssertRC(rc);
2297 }
2298 PGM_UNLOCK(pVM);
2299
2300 *ppPd = NULL;
2301 return rc;
2302}
2303
2304
2305/**
2306 * Performs the lazy mapping of the PAE guest PDPT.
2307 *
2308 * @returns VBox status code.
2309 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2310 * @param ppPdpt Where to return the pointer to the mapping. This is
2311 * always set.
2312 */
2313int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2314{
2315 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2316 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2317 PGM_LOCK_VOID(pVM);
2318
2319 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2320 PPGMPAGE pPage;
2321 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2322 if (RT_SUCCESS(rc))
2323 {
2324 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2325 if (RT_SUCCESS(rc))
2326 {
2327# ifdef IN_RING3
2328 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2329 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2330# else
2331 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2332 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2333# endif
2334 PGM_UNLOCK(pVM);
2335 return VINF_SUCCESS;
2336 }
2337 AssertRC(rc);
2338 }
2339
2340 PGM_UNLOCK(pVM);
2341 *ppPdpt = NULL;
2342 return rc;
2343}
2344
2345
2346/**
2347 * Performs the lazy mapping / updating of a PAE guest PD.
2348 *
2349 * @returns Pointer to the mapping.
2350 * @returns VBox status code.
2351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2352 * @param iPdpt Which PD entry to map (0..3).
2353 * @param ppPd Where to return the pointer to the mapping. This is
2354 * always set.
2355 */
2356int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2357{
2358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2359 PGM_LOCK_VOID(pVM);
2360
2361 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2362 Assert(pGuestPDPT);
2363 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2364 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2365 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2366
2367 PPGMPAGE pPage;
2368 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2369 if (RT_SUCCESS(rc))
2370 {
2371 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2372 AssertRC(rc);
2373 if (RT_SUCCESS(rc))
2374 {
2375# ifdef IN_RING3
2376 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2377 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2378# else
2379 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2380 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2381# endif
2382 if (fChanged)
2383 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2384 PGM_UNLOCK(pVM);
2385 return VINF_SUCCESS;
2386 }
2387 }
2388
2389 /* Invalid page or some failure, invalidate the entry. */
2390 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2391 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2392 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2393
2394 PGM_UNLOCK(pVM);
2395 return rc;
2396}
2397
2398
2399/**
2400 * Performs the lazy mapping of the 32-bit guest PD.
2401 *
2402 * @returns VBox status code.
2403 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2404 * @param ppPml4 Where to return the pointer to the mapping. This will
2405 * always be set.
2406 */
2407int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2408{
2409 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2410 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2411 PGM_LOCK_VOID(pVM);
2412
2413 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2414 PPGMPAGE pPage;
2415 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2416 if (RT_SUCCESS(rc))
2417 {
2418 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2419 if (RT_SUCCESS(rc))
2420 {
2421# ifdef IN_RING3
2422 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2423 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2424# else
2425 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2426 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2427# endif
2428 PGM_UNLOCK(pVM);
2429 return VINF_SUCCESS;
2430 }
2431 }
2432
2433 PGM_UNLOCK(pVM);
2434 *ppPml4 = NULL;
2435 return rc;
2436}
2437
2438
2439#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2440 /**
2441 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2442 *
2443 * @returns VBox status code.
2444 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2445 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2446 * always be set.
2447 */
2448int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2449{
2450 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2451 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2452 PGM_LOCK_VOID(pVM);
2453
2454 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2455 PPGMPAGE pPage;
2456 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2457 if (RT_SUCCESS(rc))
2458 {
2459 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2460 if (RT_SUCCESS(rc))
2461 {
2462# ifdef IN_RING3
2463 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2464 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2465# else
2466 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2467 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2468# endif
2469 PGM_UNLOCK(pVM);
2470 return VINF_SUCCESS;
2471 }
2472 }
2473
2474 PGM_UNLOCK(pVM);
2475 *ppEptPml4 = NULL;
2476 return rc;
2477}
2478#endif
2479
2480
2481/**
2482 * Gets the current CR3 register value for the shadow memory context.
2483 * @returns CR3 value.
2484 * @param pVCpu The cross context virtual CPU structure.
2485 */
2486VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2487{
2488 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2489 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2490 return pPoolPage->Core.Key;
2491}
2492
2493
2494/**
2495 * Forces lazy remapping of the guest's PAE page-directory structures.
2496 *
2497 * @param pVCpu The cross context virtual CPU structure.
2498 */
2499static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2500{
2501 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2502 {
2503 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2504 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2505 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2506 }
2507}
2508
2509
2510#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2511/**
2512 * Performs second-level address translation for the given CR3 and updates the
2513 * nested-guest CR3 when successful.
2514 *
2515 * @returns VBox status code.
2516 * @param pVCpu The cross context virtual CPU structure.
2517 * @param uCr3 The masked nested-guest CR3 value.
2518 * @param pGCPhysCR3 Where to store the translated CR3.
2519 *
2520 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2521 * mindful of this in code that's hyper sensitive to the order of
2522 * operations.
2523 */
2524static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2525{
2526 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2527 {
2528 PGMPTWALK Walk;
2529 PGMPTWALKGST GstWalk;
2530 int const rc = pgmGstSlatWalk(pVCpu, uCr3, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */, &Walk, &GstWalk);
2531 if (RT_SUCCESS(rc))
2532 {
2533 /* Update nested-guest CR3. */
2534 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2535
2536 /* Pass back the translated result. */
2537 *pGCPhysCr3 = Walk.GCPhys;
2538 return VINF_SUCCESS;
2539 }
2540
2541 /* Translation failed. */
2542 *pGCPhysCr3 = NIL_RTGCPHYS;
2543 return rc;
2544 }
2545
2546 /*
2547 * If the nested-guest CR3 has not changed, then the previously
2548 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2549 */
2550 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2551 return VINF_SUCCESS;
2552}
2553#endif
2554
2555
2556/**
2557 * Performs and schedules necessary updates following a CR3 load or reload.
2558 *
2559 * This will normally involve mapping the guest PD or nPDPT
2560 *
2561 * @returns VBox status code.
2562 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2563 * safely be ignored and overridden since the FF will be set too then.
2564 * @param pVCpu The cross context virtual CPU structure.
2565 * @param cr3 The new cr3.
2566 * @param fGlobal Indicates whether this is a global flush or not.
2567 */
2568VMMDECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2569{
2570 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2571 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2572
2573 VMCPU_ASSERT_EMT(pVCpu);
2574
2575 /*
2576 * Always flag the necessary updates; necessary for hardware acceleration
2577 */
2578 /** @todo optimize this, it shouldn't always be necessary. */
2579 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2580 if (fGlobal)
2581 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2582
2583 /*
2584 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2585 */
2586 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2587 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2588#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2589 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2590 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2591 {
2592 RTGCPHYS GCPhysOut;
2593 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2594 if (RT_SUCCESS(rc))
2595 GCPhysCR3 = GCPhysOut;
2596 else
2597 {
2598 /* CR3 SLAT translation failed but we try to pretend it
2599 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2600 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2601 int const rc2 = pgmGstUnmapCr3(pVCpu);
2602 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2603 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2604 return rc2;
2605 }
2606 }
2607#endif
2608
2609 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2610 int rc = VINF_SUCCESS;
2611 if (GCPhysOldCR3 != GCPhysCR3)
2612 {
2613 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2614 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2615 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2616
2617 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2618 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2619 if (RT_LIKELY(rc == VINF_SUCCESS))
2620 { }
2621 else
2622 {
2623 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2624 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2625 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2626 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2627 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2628 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2629 }
2630
2631 if (fGlobal)
2632 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2633 else
2634 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2635 }
2636 else
2637 {
2638#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2639 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2640 if (pPool->cDirtyPages)
2641 {
2642 PGM_LOCK_VOID(pVM);
2643 pgmPoolResetDirtyPages(pVM);
2644 PGM_UNLOCK(pVM);
2645 }
2646#endif
2647 if (fGlobal)
2648 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2649 else
2650 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2651
2652 /*
2653 * Flush PAE PDPTEs.
2654 */
2655 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2656 pgmGstFlushPaePdpes(pVCpu);
2657 }
2658
2659 IEMTlbInvalidateAll(pVCpu);
2660 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2661 return rc;
2662}
2663
2664
2665/**
2666 * Performs and schedules necessary updates following a CR3 load or reload when
2667 * using nested or extended paging.
2668 *
2669 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2670 * TLB and triggering a SyncCR3.
2671 *
2672 * This will normally involve mapping the guest PD or nPDPT
2673 *
2674 * @returns VBox status code.
2675 * @retval VINF_SUCCESS.
2676 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2677 * paging modes). This can safely be ignored and overridden since the
2678 * FF will be set too then.
2679 * @param pVCpu The cross context virtual CPU structure.
2680 * @param cr3 The new CR3.
2681 */
2682VMMDECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2683{
2684 VMCPU_ASSERT_EMT(pVCpu);
2685
2686 /* We assume we're only called in nested paging mode. */
2687 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2688
2689 /*
2690 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2691 */
2692 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2693 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2694#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2695 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2696 {
2697 RTGCPHYS GCPhysOut;
2698 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2699 if (RT_SUCCESS(rc))
2700 GCPhysCR3 = GCPhysOut;
2701 else
2702 {
2703 /* CR3 SLAT translation failed but we try to pretend it
2704 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2705 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2706 int const rc2 = pgmGstUnmapCr3(pVCpu);
2707 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2708 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2709 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2710 return rc2;
2711 }
2712 }
2713#endif
2714
2715 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2716 int rc = VINF_SUCCESS;
2717 if (GCPhysOldCR3 != GCPhysCR3)
2718 {
2719 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2720 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2721 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2722
2723 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2724 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2725
2726 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2727 }
2728 /*
2729 * Flush PAE PDPTEs.
2730 */
2731 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2732 pgmGstFlushPaePdpes(pVCpu);
2733
2734 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2735 return rc;
2736}
2737
2738
2739/**
2740 * Synchronize the paging structures.
2741 *
2742 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2743 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2744 * in several places, most importantly whenever the CR3 is loaded.
2745 *
2746 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2747 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2748 * the VMM into guest context.
2749 * @param pVCpu The cross context virtual CPU structure.
2750 * @param cr0 Guest context CR0 register
2751 * @param cr3 Guest context CR3 register
2752 * @param cr4 Guest context CR4 register
2753 * @param fGlobal Including global page directories or not
2754 */
2755VMMDECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2756{
2757 int rc;
2758
2759 VMCPU_ASSERT_EMT(pVCpu);
2760
2761 /*
2762 * The pool may have pending stuff and even require a return to ring-3 to
2763 * clear the whole thing.
2764 */
2765 rc = pgmPoolSyncCR3(pVCpu);
2766 if (rc != VINF_SUCCESS)
2767 return rc;
2768
2769 /*
2770 * We might be called when we shouldn't.
2771 *
2772 * The mode switching will ensure that the PD is resynced after every mode
2773 * switch. So, if we find ourselves here when in protected or real mode
2774 * we can safely clear the FF and return immediately.
2775 */
2776 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2777 {
2778 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2779 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2780 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2781 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2782 return VINF_SUCCESS;
2783 }
2784
2785 /* If global pages are not supported, then all flushes are global. */
2786 if (!(cr4 & X86_CR4_PGE))
2787 fGlobal = true;
2788 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2789 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2790
2791 /*
2792 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2793 * This should be done before SyncCR3.
2794 */
2795 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2796 {
2797 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2798
2799 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2800 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2801#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2802 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2803 {
2804 RTGCPHYS GCPhysOut;
2805 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2806 if (RT_SUCCESS(rc2))
2807 GCPhysCR3 = GCPhysOut;
2808 else
2809 {
2810 /* CR3 SLAT translation failed but we try to pretend it
2811 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2812 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc=%Rrc\n", cr3, rc2));
2813 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2814 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2815 return rc2;
2816 }
2817 }
2818#endif
2819 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2820 if (GCPhysOldCR3 != GCPhysCR3)
2821 {
2822 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2823 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2824 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2825 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2826 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2827 }
2828
2829 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2830 if ( rc == VINF_PGM_SYNC_CR3
2831 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2832 {
2833 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2834#ifdef IN_RING3
2835 rc = pgmPoolSyncCR3(pVCpu);
2836#else
2837 if (rc == VINF_PGM_SYNC_CR3)
2838 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2839 return VINF_PGM_SYNC_CR3;
2840#endif
2841 }
2842 AssertRCReturn(rc, rc);
2843 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2844 }
2845
2846 /*
2847 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2848 */
2849 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2850
2851 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2852 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2853 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2854 rc = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2855
2856 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2857 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2858 if (rc == VINF_SUCCESS)
2859 {
2860 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2861 {
2862 /* Go back to ring 3 if a pgm pool sync is again pending. */
2863 return VINF_PGM_SYNC_CR3;
2864 }
2865
2866 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2867 {
2868 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2869 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2870 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2871 }
2872 }
2873
2874 /*
2875 * Now flush the CR3 (guest context).
2876 */
2877 if (rc == VINF_SUCCESS)
2878 PGM_INVL_VCPU_TLBS(pVCpu);
2879 return rc;
2880}
2881
2882
2883/**
2884 * Maps all the PAE PDPE entries.
2885 *
2886 * @returns VBox status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2888 * @param paPaePdpes The new PAE PDPE values.
2889 *
2890 * @remarks This function may be invoked during the process of changing the guest
2891 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
2892 * reflect PAE paging just yet.
2893 */
2894VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2895{
2896 Assert(paPaePdpes);
2897 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2898 {
2899 X86PDPE const PaePdpe = paPaePdpes[i];
2900
2901 /*
2902 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
2903 * are deferred.[1] Also, different situations require different handling of invalid
2904 * PDPE entries. Here we assume the caller has already validated or doesn't require
2905 * validation of the PDPEs.
2906 *
2907 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
2908 * validated by the VMX transition.
2909 *
2910 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
2911 */
2912 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
2913 {
2914 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2915 RTHCPTR HCPtr;
2916
2917 RTGCPHYS GCPhys;
2918#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2919 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2920 {
2921 PGMPTWALK Walk;
2922 PGMPTWALKGST GstWalk;
2923 RTGCPHYS const GCPhysNested = PaePdpe.u & X86_PDPE_PG_MASK;
2924 int const rc = pgmGstSlatWalk(pVCpu, GCPhysNested, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */,
2925 &Walk, &GstWalk);
2926 if (RT_SUCCESS(rc))
2927 GCPhys = Walk.GCPhys;
2928 else
2929 {
2930 /*
2931 * Second-level address translation of the PAE PDPE has failed but we must -NOT-
2932 * abort and return a failure now. This is because we're called from a Mov CRx
2933 * instruction (or similar operation). Let's just pretend success but flag that
2934 * we need to map this PDPE lazily later.
2935 *
2936 * See Intel spec. 25.3 "Changes to instruction behavior in VMX non-root operation".
2937 * See Intel spec. 28.3.1 "EPT Overview".
2938 */
2939 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2940 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2941 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2942 continue;
2943 }
2944 }
2945 else
2946#endif
2947 {
2948 GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
2949 }
2950
2951 PGM_LOCK_VOID(pVM);
2952 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2953 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
2954 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
2955 PGM_UNLOCK(pVM);
2956 if (RT_SUCCESS(rc))
2957 {
2958#ifdef IN_RING3
2959 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
2960 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
2961#else
2962 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
2963 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
2964#endif
2965 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
2966 continue;
2967 }
2968 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
2969 }
2970 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2971 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2972 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2973 }
2974 return VINF_SUCCESS;
2975}
2976
2977
2978/**
2979 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
2980 *
2981 * @returns VBox status code.
2982 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2983 * @param cr3 The guest CR3 value.
2984 *
2985 * @remarks This function may be invoked during the process of changing the guest
2986 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
2987 * PAE paging just yet.
2988 */
2989VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
2990{
2991 /*
2992 * Read the page-directory-pointer table (PDPT) at CR3.
2993 */
2994 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
2995 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2996
2997#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2998 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2999 {
3000 RTGCPHYS GCPhysOut;
3001 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
3002 if (RT_SUCCESS(rc))
3003 GCPhysCR3 = GCPhysOut;
3004 else
3005 {
3006 Log(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
3007 return rc;
3008 }
3009 }
3010#endif
3011
3012 RTHCPTR HCPtrGuestCr3;
3013 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
3014 if (RT_SUCCESS(rc))
3015 {
3016 /*
3017 * Validate the page-directory-pointer table entries (PDPE).
3018 */
3019 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
3020 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
3021 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
3022 {
3023 /*
3024 * Map the PDPT.
3025 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
3026 * that PGMFlushTLB will be called soon and only a change to CR3 then
3027 * will cause the shadow page tables to be updated.
3028 */
3029#ifdef IN_RING3
3030 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
3031 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
3032#else
3033 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
3034 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
3035#endif
3036
3037 /*
3038 * Update CPUM and map the 4 PAE PDPEs.
3039 */
3040 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
3041 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
3042 if (RT_SUCCESS(rc))
3043 {
3044#ifdef IN_RING3
3045 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
3046 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
3047#else
3048 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
3049 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
3050#endif
3051 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
3052 }
3053 }
3054 else
3055 rc = VERR_PGM_PAE_PDPE_RSVD;
3056 }
3057 return rc;
3058}
3059
3060
3061/**
3062 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
3063 *
3064 * @returns VBox status code, with the following informational code for
3065 * VM scheduling.
3066 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
3067 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
3068 *
3069 * @param pVCpu The cross context virtual CPU structure.
3070 * @param cr0 The new cr0.
3071 * @param cr4 The new cr4.
3072 * @param efer The new extended feature enable register.
3073 * @param fForce Whether to force a mode change.
3074 */
3075VMMDECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
3076{
3077 VMCPU_ASSERT_EMT(pVCpu);
3078
3079 /*
3080 * Calc the new guest mode.
3081 *
3082 * Note! We check PG before PE and without requiring PE because of the
3083 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
3084 */
3085 PGMMODE enmGuestMode;
3086 if (cr0 & X86_CR0_PG)
3087 {
3088 if (!(cr4 & X86_CR4_PAE))
3089 {
3090 bool const fPse = !!(cr4 & X86_CR4_PSE);
3091 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
3092 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
3093 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
3094 enmGuestMode = PGMMODE_32_BIT;
3095 }
3096 else if (!(efer & MSR_K6_EFER_LME))
3097 {
3098 if (!(efer & MSR_K6_EFER_NXE))
3099 enmGuestMode = PGMMODE_PAE;
3100 else
3101 enmGuestMode = PGMMODE_PAE_NX;
3102 }
3103 else
3104 {
3105 if (!(efer & MSR_K6_EFER_NXE))
3106 enmGuestMode = PGMMODE_AMD64;
3107 else
3108 enmGuestMode = PGMMODE_AMD64_NX;
3109 }
3110 }
3111 else if (!(cr0 & X86_CR0_PE))
3112 enmGuestMode = PGMMODE_REAL;
3113 else
3114 enmGuestMode = PGMMODE_PROTECTED;
3115
3116 /*
3117 * Did it change?
3118 */
3119 if ( !fForce
3120 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3121 return VINF_SUCCESS;
3122
3123 /* Flush the TLB */
3124 PGM_INVL_VCPU_TLBS(pVCpu);
3125 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
3126}
3127
3128
3129/**
3130 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3131 *
3132 * @returns PGM_TYPE_*.
3133 * @param pgmMode The mode value to convert.
3134 */
3135DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3136{
3137 switch (pgmMode)
3138 {
3139 case PGMMODE_REAL: return PGM_TYPE_REAL;
3140 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3141 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3142 case PGMMODE_PAE:
3143 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3144 case PGMMODE_AMD64:
3145 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3146 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3147 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3148 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3149 case PGMMODE_EPT: return PGM_TYPE_EPT;
3150 case PGMMODE_NONE: return PGM_TYPE_NONE;
3151 default:
3152 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3153 }
3154}
3155
3156
3157/**
3158 * Calculates the shadow paging mode.
3159 *
3160 * @returns The shadow paging mode.
3161 * @param pVM The cross context VM structure.
3162 * @param enmGuestMode The guest mode.
3163 * @param enmHostMode The host mode.
3164 * @param enmShadowMode The current shadow mode.
3165 */
3166static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3167{
3168 switch (enmGuestMode)
3169 {
3170 case PGMMODE_REAL:
3171 case PGMMODE_PROTECTED:
3172 switch (enmHostMode)
3173 {
3174 case SUPPAGINGMODE_32_BIT:
3175 case SUPPAGINGMODE_32_BIT_GLOBAL:
3176 enmShadowMode = PGMMODE_32_BIT;
3177 break;
3178
3179 case SUPPAGINGMODE_PAE:
3180 case SUPPAGINGMODE_PAE_NX:
3181 case SUPPAGINGMODE_PAE_GLOBAL:
3182 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3183 enmShadowMode = PGMMODE_PAE;
3184 break;
3185
3186 case SUPPAGINGMODE_AMD64:
3187 case SUPPAGINGMODE_AMD64_GLOBAL:
3188 case SUPPAGINGMODE_AMD64_NX:
3189 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3190 enmShadowMode = PGMMODE_PAE;
3191 break;
3192
3193 default:
3194 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3195 }
3196 break;
3197
3198 case PGMMODE_32_BIT:
3199 switch (enmHostMode)
3200 {
3201 case SUPPAGINGMODE_32_BIT:
3202 case SUPPAGINGMODE_32_BIT_GLOBAL:
3203 enmShadowMode = PGMMODE_32_BIT;
3204 break;
3205
3206 case SUPPAGINGMODE_PAE:
3207 case SUPPAGINGMODE_PAE_NX:
3208 case SUPPAGINGMODE_PAE_GLOBAL:
3209 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3210 enmShadowMode = PGMMODE_PAE;
3211 break;
3212
3213 case SUPPAGINGMODE_AMD64:
3214 case SUPPAGINGMODE_AMD64_GLOBAL:
3215 case SUPPAGINGMODE_AMD64_NX:
3216 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3217 enmShadowMode = PGMMODE_PAE;
3218 break;
3219
3220 default:
3221 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3222 }
3223 break;
3224
3225 case PGMMODE_PAE:
3226 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3227 switch (enmHostMode)
3228 {
3229 case SUPPAGINGMODE_32_BIT:
3230 case SUPPAGINGMODE_32_BIT_GLOBAL:
3231 enmShadowMode = PGMMODE_PAE;
3232 break;
3233
3234 case SUPPAGINGMODE_PAE:
3235 case SUPPAGINGMODE_PAE_NX:
3236 case SUPPAGINGMODE_PAE_GLOBAL:
3237 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3238 enmShadowMode = PGMMODE_PAE;
3239 break;
3240
3241 case SUPPAGINGMODE_AMD64:
3242 case SUPPAGINGMODE_AMD64_GLOBAL:
3243 case SUPPAGINGMODE_AMD64_NX:
3244 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3245 enmShadowMode = PGMMODE_PAE;
3246 break;
3247
3248 default:
3249 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3250 }
3251 break;
3252
3253 case PGMMODE_AMD64:
3254 case PGMMODE_AMD64_NX:
3255 switch (enmHostMode)
3256 {
3257 case SUPPAGINGMODE_32_BIT:
3258 case SUPPAGINGMODE_32_BIT_GLOBAL:
3259 enmShadowMode = PGMMODE_AMD64;
3260 break;
3261
3262 case SUPPAGINGMODE_PAE:
3263 case SUPPAGINGMODE_PAE_NX:
3264 case SUPPAGINGMODE_PAE_GLOBAL:
3265 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3266 enmShadowMode = PGMMODE_AMD64;
3267 break;
3268
3269 case SUPPAGINGMODE_AMD64:
3270 case SUPPAGINGMODE_AMD64_GLOBAL:
3271 case SUPPAGINGMODE_AMD64_NX:
3272 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3273 enmShadowMode = PGMMODE_AMD64;
3274 break;
3275
3276 default:
3277 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3278 }
3279 break;
3280
3281 default:
3282 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3283 }
3284
3285 /*
3286 * Override the shadow mode when NEM, IEM or nested paging is active.
3287 */
3288 if (!VM_IS_HM_ENABLED(pVM))
3289 {
3290 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3291 pVM->pgm.s.fNestedPaging = true;
3292 enmShadowMode = PGMMODE_NONE;
3293 }
3294 else
3295 {
3296 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3297 pVM->pgm.s.fNestedPaging = fNestedPaging;
3298 if (fNestedPaging)
3299 {
3300 if (HMIsVmxActive(pVM))
3301 enmShadowMode = PGMMODE_EPT;
3302 else
3303 {
3304 /* The nested SVM paging depends on the host one. */
3305 Assert(HMIsSvmActive(pVM));
3306 if ( enmGuestMode == PGMMODE_AMD64
3307 || enmGuestMode == PGMMODE_AMD64_NX)
3308 enmShadowMode = PGMMODE_NESTED_AMD64;
3309 else
3310 switch (pVM->pgm.s.enmHostMode)
3311 {
3312 case SUPPAGINGMODE_32_BIT:
3313 case SUPPAGINGMODE_32_BIT_GLOBAL:
3314 enmShadowMode = PGMMODE_NESTED_32BIT;
3315 break;
3316
3317 case SUPPAGINGMODE_PAE:
3318 case SUPPAGINGMODE_PAE_GLOBAL:
3319 case SUPPAGINGMODE_PAE_NX:
3320 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3321 enmShadowMode = PGMMODE_NESTED_PAE;
3322 break;
3323
3324 case SUPPAGINGMODE_AMD64:
3325 case SUPPAGINGMODE_AMD64_GLOBAL:
3326 case SUPPAGINGMODE_AMD64_NX:
3327 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3328 enmShadowMode = PGMMODE_NESTED_AMD64;
3329 break;
3330
3331 default:
3332 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3333 }
3334 }
3335 }
3336#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3337 else
3338 {
3339 /* Nested paging is a requirement for nested VT-x. */
3340 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3341 }
3342#endif
3343 }
3344
3345 return enmShadowMode;
3346}
3347
3348
3349/**
3350 * Performs the actual mode change.
3351 * This is called by PGMChangeMode and pgmR3InitPaging().
3352 *
3353 * @returns VBox status code. May suspend or power off the VM on error, but this
3354 * will trigger using FFs and not informational status codes.
3355 *
3356 * @param pVM The cross context VM structure.
3357 * @param pVCpu The cross context virtual CPU structure.
3358 * @param enmGuestMode The new guest mode. This is assumed to be different from
3359 * the current mode.
3360 * @param fForce Whether to force a shadow paging mode change.
3361 */
3362VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3363{
3364 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3365 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3366
3367 /*
3368 * Calc the shadow mode and switcher.
3369 */
3370 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3371 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3372
3373 /*
3374 * Exit old mode(s).
3375 */
3376 /* shadow */
3377 if (fShadowModeChanged)
3378 {
3379 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3380 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3381 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3382 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3383 {
3384 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3385 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3386 }
3387 }
3388 else
3389 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3390
3391 /* guest */
3392 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3393 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3394 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3395 {
3396 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3397 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3398 }
3399 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3400 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3401 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3402 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3403
3404 /*
3405 * Change the paging mode data indexes.
3406 */
3407 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3408 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3409 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3410 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3411 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3412 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3413 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3414#ifdef IN_RING3
3415 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3416#endif
3417
3418 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3419 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3420 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3421 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3422 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3423 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3424 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3425#ifdef IN_RING3
3426 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3427#endif
3428
3429 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3430 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3431 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3432 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3433 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3434 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3435 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
3436 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3437 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3438 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3439#ifdef VBOX_STRICT
3440 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3441#endif
3442
3443 /*
3444 * Determine SLAT mode -before- entering the new shadow mode!
3445 */
3446 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
3447
3448 /*
3449 * Enter new shadow mode (if changed).
3450 */
3451 if (fShadowModeChanged)
3452 {
3453 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3454 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu);
3455 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3456 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3457 }
3458
3459 /*
3460 * Always flag the necessary updates
3461 */
3462 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3463
3464 /*
3465 * Enter the new guest and shadow+guest modes.
3466 */
3467 /* Calc the new CR3 value. */
3468 RTGCPHYS GCPhysCR3;
3469 switch (enmGuestMode)
3470 {
3471 case PGMMODE_REAL:
3472 case PGMMODE_PROTECTED:
3473 GCPhysCR3 = NIL_RTGCPHYS;
3474 break;
3475
3476 case PGMMODE_32_BIT:
3477 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3478 break;
3479
3480 case PGMMODE_PAE_NX:
3481 case PGMMODE_PAE:
3482 if (!pVM->cpum.ro.GuestFeatures.fPae)
3483#ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3484 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3485 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3486#else
3487 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3488
3489#endif
3490 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3491 break;
3492
3493#ifdef VBOX_WITH_64_BITS_GUESTS
3494 case PGMMODE_AMD64_NX:
3495 case PGMMODE_AMD64:
3496 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3497 break;
3498#endif
3499 default:
3500 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3501 }
3502
3503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3504 /*
3505 * If a nested-guest is using EPT paging:
3506 * - Update the second-level address translation (SLAT) mode.
3507 * - Indicate that the CR3 is nested-guest physical address.
3508 */
3509 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3510 {
3511 if (PGMMODE_WITH_PAGING(enmGuestMode))
3512 {
3513 /*
3514 * Translate CR3 to its guest-physical address.
3515 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3516 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3517 */
3518 PGMPTWALK Walk;
3519 PGMPTWALKGST GstWalk;
3520 int const rc = pgmGstSlatWalk(pVCpu, GCPhysCR3, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */, &Walk,
3521 &GstWalk);
3522 if (RT_SUCCESS(rc))
3523 { /* likely */ }
3524 else
3525 {
3526 /*
3527 * SLAT failed but we avoid reporting this to the caller because the caller
3528 * is not supposed to fail. The only time the caller needs to indicate a
3529 * failure to software is when PAE paging is used by the nested-guest, but
3530 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3531 * In all other cases, the failure will be indicated when CR3 tries to be
3532 * translated on the next linear-address memory access.
3533 * See Intel spec. 27.2.1 "EPT Overview".
3534 */
3535 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3536
3537 /* Trying to coax PGM to succeed for the time being... */
3538 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3539 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3540 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3541 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3542 return VINF_SUCCESS;
3543 }
3544 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3545 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
3546 }
3547 }
3548 else
3549 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3550#endif
3551
3552 /*
3553 * Enter the new guest mode.
3554 */
3555 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3556 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3557 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3558
3559 /* Set the new guest CR3 (and nested-guest CR3). */
3560 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3561
3562 /* status codes. */
3563 AssertRC(rc);
3564 AssertRC(rc2);
3565 if (RT_SUCCESS(rc))
3566 {
3567 rc = rc2;
3568 if (RT_SUCCESS(rc)) /* no informational status codes. */
3569 rc = VINF_SUCCESS;
3570 }
3571
3572 /*
3573 * Notify HM.
3574 */
3575 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3576 return rc;
3577}
3578
3579
3580/**
3581 * Called by CPUM or REM when CR0.WP changes to 1.
3582 *
3583 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3584 * @thread EMT
3585 */
3586VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3587{
3588 /*
3589 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3590 *
3591 * Use the counter to judge whether there might be pool pages with active
3592 * hacks in them. If there are, we will be running the risk of messing up
3593 * the guest by allowing it to write to read-only pages. Thus, we have to
3594 * clear the page pool ASAP if there is the slightest chance.
3595 */
3596 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3597 {
3598 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3599
3600 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3601 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3602 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3603 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3604 }
3605}
3606
3607
3608/**
3609 * Gets the current guest paging mode.
3610 *
3611 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3612 *
3613 * @returns The current paging mode.
3614 * @param pVCpu The cross context virtual CPU structure.
3615 */
3616VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3617{
3618 return pVCpu->pgm.s.enmGuestMode;
3619}
3620
3621
3622/**
3623 * Gets the current shadow paging mode.
3624 *
3625 * @returns The current paging mode.
3626 * @param pVCpu The cross context virtual CPU structure.
3627 */
3628VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3629{
3630 return pVCpu->pgm.s.enmShadowMode;
3631}
3632
3633
3634/**
3635 * Gets the current host paging mode.
3636 *
3637 * @returns The current paging mode.
3638 * @param pVM The cross context VM structure.
3639 */
3640VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3641{
3642 switch (pVM->pgm.s.enmHostMode)
3643 {
3644 case SUPPAGINGMODE_32_BIT:
3645 case SUPPAGINGMODE_32_BIT_GLOBAL:
3646 return PGMMODE_32_BIT;
3647
3648 case SUPPAGINGMODE_PAE:
3649 case SUPPAGINGMODE_PAE_GLOBAL:
3650 return PGMMODE_PAE;
3651
3652 case SUPPAGINGMODE_PAE_NX:
3653 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3654 return PGMMODE_PAE_NX;
3655
3656 case SUPPAGINGMODE_AMD64:
3657 case SUPPAGINGMODE_AMD64_GLOBAL:
3658 return PGMMODE_AMD64;
3659
3660 case SUPPAGINGMODE_AMD64_NX:
3661 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3662 return PGMMODE_AMD64_NX;
3663
3664 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3665 }
3666
3667 return PGMMODE_INVALID;
3668}
3669
3670
3671/**
3672 * Get mode name.
3673 *
3674 * @returns read-only name string.
3675 * @param enmMode The mode which name is desired.
3676 */
3677VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3678{
3679 switch (enmMode)
3680 {
3681 case PGMMODE_REAL: return "Real";
3682 case PGMMODE_PROTECTED: return "Protected";
3683 case PGMMODE_32_BIT: return "32-bit";
3684 case PGMMODE_PAE: return "PAE";
3685 case PGMMODE_PAE_NX: return "PAE+NX";
3686 case PGMMODE_AMD64: return "AMD64";
3687 case PGMMODE_AMD64_NX: return "AMD64+NX";
3688 case PGMMODE_NESTED_32BIT: return "Nested-32";
3689 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3690 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3691 case PGMMODE_EPT: return "EPT";
3692 case PGMMODE_NONE: return "None";
3693 default: return "unknown mode value";
3694 }
3695}
3696
3697
3698#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3699/**
3700 * Gets the SLAT mode name.
3701 *
3702 * @returns The read-only SLAT mode descriptive string.
3703 * @param enmSlatMode The SLAT mode value.
3704 */
3705VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
3706{
3707 switch (enmSlatMode)
3708 {
3709 case PGMSLAT_DIRECT: return "Direct";
3710 case PGMSLAT_EPT: return "EPT";
3711 case PGMSLAT_32BIT: return "32-bit";
3712 case PGMSLAT_PAE: return "PAE";
3713 case PGMSLAT_AMD64: return "AMD64";
3714 default: return "Unknown";
3715 }
3716}
3717#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
3718
3719
3720/**
3721 * Gets the physical address represented in the guest CR3 as PGM sees it.
3722 *
3723 * This is mainly for logging and debugging.
3724 *
3725 * @returns PGM's guest CR3 value.
3726 * @param pVCpu The cross context virtual CPU structure.
3727 */
3728VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
3729{
3730 return pVCpu->pgm.s.GCPhysCR3;
3731}
3732
3733
3734
3735/**
3736 * Notification from CPUM that the EFER.NXE bit has changed.
3737 *
3738 * @param pVCpu The cross context virtual CPU structure of the CPU for
3739 * which EFER changed.
3740 * @param fNxe The new NXE state.
3741 */
3742VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
3743{
3744/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
3745 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
3746
3747 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
3748 if (fNxe)
3749 {
3750 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3751 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
3752 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
3753 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3754 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
3755 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
3756 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
3757 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
3758 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
3759 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
3760 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
3761
3762 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
3763 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
3764 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
3765 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
3766 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
3767 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
3768 }
3769 else
3770 {
3771 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
3772 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
3773 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
3774 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
3775 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
3776 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
3777 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
3778 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
3779 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
3780 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
3781 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
3782
3783 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
3784 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
3785 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
3786 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
3787 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
3788 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
3789 }
3790}
3791
3792
3793/**
3794 * Check if any pgm pool pages are marked dirty (not monitored)
3795 *
3796 * @returns bool locked/not locked
3797 * @param pVM The cross context VM structure.
3798 */
3799VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
3800{
3801 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
3802}
3803
3804
3805/**
3806 * Check if this VCPU currently owns the PGM lock.
3807 *
3808 * @returns bool owner/not owner
3809 * @param pVM The cross context VM structure.
3810 */
3811VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
3812{
3813 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
3814}
3815
3816
3817/**
3818 * Enable or disable large page usage
3819 *
3820 * @returns VBox status code.
3821 * @param pVM The cross context VM structure.
3822 * @param fUseLargePages Use/not use large pages
3823 */
3824VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
3825{
3826 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
3827
3828 pVM->pgm.s.fUseLargePages = fUseLargePages;
3829 return VINF_SUCCESS;
3830}
3831
3832
3833/**
3834 * Acquire the PGM lock.
3835 *
3836 * @returns VBox status code
3837 * @param pVM The cross context VM structure.
3838 * @param fVoid Set if the caller cannot handle failure returns.
3839 * @param SRC_POS The source position of the caller (RT_SRC_POS).
3840 */
3841#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
3842int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
3843#else
3844int pgmLock(PVMCC pVM, bool fVoid)
3845#endif
3846{
3847#if defined(VBOX_STRICT)
3848 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
3849#else
3850 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
3851#endif
3852 if (RT_SUCCESS(rc))
3853 return rc;
3854 if (fVoid)
3855 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
3856 else
3857 AssertRC(rc);
3858 return rc;
3859}
3860
3861
3862/**
3863 * Release the PGM lock.
3864 *
3865 * @param pVM The cross context VM structure.
3866 */
3867void pgmUnlock(PVMCC pVM)
3868{
3869 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
3870 pVM->pgm.s.cDeprecatedPageLocks = 0;
3871 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
3872 if (rc == VINF_SEM_NESTED)
3873 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
3874}
3875
3876
3877#if !defined(IN_R0) || defined(LOG_ENABLED)
3878
3879/** Format handler for PGMPAGE.
3880 * @copydoc FNRTSTRFORMATTYPE */
3881static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3882 const char *pszType, void const *pvValue,
3883 int cchWidth, int cchPrecision, unsigned fFlags,
3884 void *pvUser)
3885{
3886 size_t cch;
3887 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
3888 if (RT_VALID_PTR(pPage))
3889 {
3890 char szTmp[64+80];
3891
3892 cch = 0;
3893
3894 /* The single char state stuff. */
3895 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
3896 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
3897
3898# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
3899 if (IS_PART_INCLUDED(5))
3900 {
3901 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
3902 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
3903 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
3904 }
3905
3906 /* The type. */
3907 if (IS_PART_INCLUDED(4))
3908 {
3909 szTmp[cch++] = ':';
3910 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
3911 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
3912 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
3913 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
3914 }
3915
3916 /* The numbers. */
3917 if (IS_PART_INCLUDED(3))
3918 {
3919 szTmp[cch++] = ':';
3920 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3921 }
3922
3923 if (IS_PART_INCLUDED(2))
3924 {
3925 szTmp[cch++] = ':';
3926 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
3927 }
3928
3929 if (IS_PART_INCLUDED(6))
3930 {
3931 szTmp[cch++] = ':';
3932 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
3933 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
3934 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
3935 }
3936# undef IS_PART_INCLUDED
3937
3938 cch = pfnOutput(pvArgOutput, szTmp, cch);
3939#if 0
3940 size_t cch2 = 0;
3941 szTmp[cch2++] = '(';
3942 cch2 += RTStrFormatNumber(&szTmp[cch2], (uintptr_t)pPage, 16, 18, 0, RTSTR_F_SPECIAL | RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
3943 szTmp[cch2++] = ')';
3944 szTmp[cch2] = '\0';
3945 cch += pfnOutput(pvArgOutput, szTmp, cch2);
3946#endif
3947 }
3948 else
3949 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
3950 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
3951 return cch;
3952}
3953
3954
3955/** Format handler for PGMRAMRANGE.
3956 * @copydoc FNRTSTRFORMATTYPE */
3957static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
3958 const char *pszType, void const *pvValue,
3959 int cchWidth, int cchPrecision, unsigned fFlags,
3960 void *pvUser)
3961{
3962 size_t cch;
3963 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
3964 if (RT_VALID_PTR(pRam))
3965 {
3966 char szTmp[80];
3967 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
3968 cch = pfnOutput(pvArgOutput, szTmp, cch);
3969 }
3970 else
3971 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
3972 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
3973 return cch;
3974}
3975
3976/** Format type andlers to be registered/deregistered. */
3977static const struct
3978{
3979 char szType[24];
3980 PFNRTSTRFORMATTYPE pfnHandler;
3981} g_aPgmFormatTypes[] =
3982{
3983 { "pgmpage", pgmFormatTypeHandlerPage },
3984 { "pgmramrange", pgmFormatTypeHandlerRamRange }
3985};
3986
3987#endif /* !IN_R0 || LOG_ENABLED */
3988
3989/**
3990 * Registers the global string format types.
3991 *
3992 * This should be called at module load time or in some other manner that ensure
3993 * that it's called exactly one time.
3994 *
3995 * @returns IPRT status code on RTStrFormatTypeRegister failure.
3996 */
3997VMMDECL(int) PGMRegisterStringFormatTypes(void)
3998{
3999#if !defined(IN_R0) || defined(LOG_ENABLED)
4000 int rc = VINF_SUCCESS;
4001 unsigned i;
4002 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4003 {
4004 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4005# ifdef IN_RING0
4006 if (rc == VERR_ALREADY_EXISTS)
4007 {
4008 /* in case of cleanup failure in ring-0 */
4009 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4010 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4011 }
4012# endif
4013 }
4014 if (RT_FAILURE(rc))
4015 while (i-- > 0)
4016 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4017
4018 return rc;
4019#else
4020 return VINF_SUCCESS;
4021#endif
4022}
4023
4024
4025/**
4026 * Deregisters the global string format types.
4027 *
4028 * This should be called at module unload time or in some other manner that
4029 * ensure that it's called exactly one time.
4030 */
4031VMMDECL(void) PGMDeregisterStringFormatTypes(void)
4032{
4033#if !defined(IN_R0) || defined(LOG_ENABLED)
4034 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4035 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4036#endif
4037}
4038
4039
4040#ifdef VBOX_STRICT
4041/**
4042 * Asserts that everything related to the guest CR3 is correctly shadowed.
4043 *
4044 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
4045 * and assert the correctness of the guest CR3 mapping before asserting that the
4046 * shadow page tables is in sync with the guest page tables.
4047 *
4048 * @returns Number of conflicts.
4049 * @param pVM The cross context VM structure.
4050 * @param pVCpu The cross context virtual CPU structure.
4051 * @param cr3 The current guest CR3 register value.
4052 * @param cr4 The current guest CR4 register value.
4053 */
4054VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
4055{
4056 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4057
4058 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
4059 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
4060 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
4061
4062 PGM_LOCK_VOID(pVM);
4063 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
4064 PGM_UNLOCK(pVM);
4065
4066 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
4067 return cErrors;
4068}
4069#endif /* VBOX_STRICT */
4070
4071
4072/**
4073 * Updates PGM's copy of the guest's EPT pointer.
4074 *
4075 * @param pVCpu The cross context virtual CPU structure.
4076 * @param uEptPtr The EPT pointer.
4077 *
4078 * @remarks This can be called as part of VM-entry so we might be in the midst of
4079 * switching to VMX non-root mode.
4080 */
4081VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
4082{
4083 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4084 PGM_LOCK_VOID(pVM);
4085 pVCpu->pgm.s.uEptPtr = uEptPtr;
4086 pVCpu->pgm.s.pGstEptPml4R3 = 0;
4087 pVCpu->pgm.s.pGstEptPml4R0 = 0;
4088 PGM_UNLOCK(pVM);
4089}
4090
4091#ifdef PGM_WITH_PAGE_ZEROING_DETECTION
4092
4093/**
4094 * Helper for checking whether XMM0 is zero, possibly retriving external state.
4095 */
4096static bool pgmHandlePageZeroingIsXmm0Zero(PVMCPUCC pVCpu, PCPUMCTX pCtx)
4097{
4098 if (pCtx->fExtrn & CPUMCTX_EXTRN_SSE_AVX)
4099 {
4100 int rc = CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_SSE_AVX);
4101 AssertRCReturn(rc, false);
4102 }
4103 return pCtx->XState.x87.aXMM[0].au64[0] == 0
4104 && pCtx->XState.x87.aXMM[0].au64[1] == 0
4105 && pCtx->XState.x87.aXMM[0].au64[2] == 0
4106 && pCtx->XState.x87.aXMM[0].au64[3] == 0;
4107}
4108
4109
4110/**
4111 * Helper for comparing opcode bytes.
4112 */
4113static bool pgmHandlePageZeroingMatchOpcodes(PVMCPUCC pVCpu, PCPUMCTX pCtx, uint8_t const *pbOpcodes, uint32_t cbOpcodes)
4114{
4115 uint8_t abTmp[64];
4116 AssertMsgReturn(cbOpcodes <= sizeof(abTmp), ("cbOpcodes=%#x\n", cbOpcodes), false);
4117 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abTmp, pCtx->rip + pCtx->cs.u64Base, cbOpcodes);
4118 if (RT_SUCCESS(rc))
4119 return memcmp(abTmp, pbOpcodes, cbOpcodes) == 0;
4120 return false;
4121}
4122
4123
4124/**
4125 * Called on faults on ZERO pages to check if the guest is trying to zero it.
4126 *
4127 * Since it's a waste of time to zero a ZERO page and it will cause an
4128 * unnecessary page allocation, we'd like to detect and avoid this.
4129 * If any known page zeroing code is detected, this function will update the CPU
4130 * state to pretend the page was zeroed by the code.
4131 *
4132 * @returns true if page zeroing code was detected and CPU state updated to skip
4133 * the code.
4134 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4135 * @param pCtx The guest register context.
4136 */
4137static bool pgmHandlePageZeroingCode(PVMCPUCC pVCpu, PCPUMCTX pCtx)
4138{
4139 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
4140
4141 /*
4142 * Sort by mode first.
4143 */
4144 if (CPUMIsGuestInLongModeEx(pCtx))
4145 {
4146 if (CPUMIsGuestIn64BitCodeEx(pCtx))
4147 {
4148 /*
4149 * 64-bit code.
4150 */
4151 Log9(("pgmHandlePageZeroingCode: not page zeroing - 64-bit\n"));
4152 }
4153 else if (pCtx->cs.Attr.n.u1DefBig)
4154 Log9(("pgmHandlePageZeroingCode: not page zeroing - 32-bit lm\n"));
4155 else
4156 Log9(("pgmHandlePageZeroingCode: not page zeroing - 16-bit lm\n"));
4157 }
4158 else if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
4159 {
4160 if (pCtx->cs.Attr.n.u1DefBig)
4161 {
4162 /*
4163 * 32-bit paged protected mode code.
4164 */
4165 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX
4166 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP | CPUMCTX_EXTRN_RSI | CPUMCTX_EXTRN_RDI
4167 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
4168
4169 /* 1. Generic 'rep stosd' detection. */
4170 static uint8_t const s_abRepStosD[] = { 0xf3, 0xab };
4171 if ( pCtx->eax == 0
4172 && pCtx->ecx == X86_PAGE_SIZE / 4
4173 && !(pCtx->edi & X86_PAGE_OFFSET_MASK)
4174 && pgmHandlePageZeroingMatchOpcodes(pVCpu, pCtx, s_abRepStosD, sizeof(s_abRepStosD)))
4175 {
4176 pCtx->ecx = 0;
4177 pCtx->edi += X86_PAGE_SIZE;
4178 Log9(("pgmHandlePageZeroingCode: REP STOSD: eip=%RX32 -> %RX32\n", pCtx->eip, pCtx->eip + sizeof(s_abRepStosD)));
4179 pCtx->eip += sizeof(s_abRepStosD);
4180 return true;
4181 }
4182
4183 /* 2. Windows 2000 sp4 KiXMMIZeroPageNoSave loop code: */
4184 static uint8_t const s_abW2kSp4XmmZero[] =
4185 {
4186 0x0f, 0x2b, 0x01,
4187 0x0f, 0x2b, 0x41, 0x10,
4188 0x0f, 0x2b, 0x41, 0x20,
4189 0x0f, 0x2b, 0x41, 0x30,
4190 0x83, 0xc1, 0x40,
4191 0x48,
4192 0x75, 0xeb,
4193 };
4194 if ( pCtx->eax == 64
4195 && !(pCtx->ecx & X86_PAGE_OFFSET_MASK)
4196 && pgmHandlePageZeroingMatchOpcodes(pVCpu, pCtx, s_abW2kSp4XmmZero, sizeof(s_abW2kSp4XmmZero))
4197 && pgmHandlePageZeroingIsXmm0Zero(pVCpu, pCtx))
4198 {
4199 pCtx->eax = 1;
4200 pCtx->ecx += X86_PAGE_SIZE;
4201 Log9(("pgmHandlePageZeroingCode: w2k sp4 xmm: eip=%RX32 -> %RX32\n",
4202 pCtx->eip, pCtx->eip + sizeof(s_abW2kSp4XmmZero) - 3));
4203 pCtx->eip += sizeof(s_abW2kSp4XmmZero) - 3;
4204 return true;
4205 }
4206 Log9(("pgmHandlePageZeroingCode: not page zeroing - 32-bit\n"));
4207 }
4208 else if (!pCtx->eflags.Bits.u1VM)
4209 Log9(("pgmHandlePageZeroingCode: not page zeroing - 16-bit\n"));
4210 else
4211 Log9(("pgmHandlePageZeroingCode: not page zeroing - v86\n"));
4212 }
4213 return false;
4214}
4215
4216#endif /* PGM_WITH_PAGE_ZEROING_DETECTION */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette