VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 73199

Last change on this file since 73199 was 73199, checked in by vboxsync, 7 years ago

PGM: Working on eliminating PGMMODEDATA and the corresponding PGMCPU section so we can do mode switching in ring-0. This first part deals with guest mode specific pointers. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 102.8 KB
Line 
1/* $Id: PGMAll.cpp 73199 2018-07-18 12:13:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/csam.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/trpm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/em.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vmm/vm.h>
42#include "PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*********************************************************************************************************************************
52* Structures and Typedefs *
53*********************************************************************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** Pointer to the VM. */
61 PVM pVM;
62 /** Pointer to the VMCPU. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*********************************************************************************************************************************
72* Internal Functions *
73*********************************************************************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76#ifndef IN_RC
77static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
78static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
79#endif
80
81
82/*
83 * Shadow - 32-bit mode
84 */
85#define PGM_SHW_TYPE PGM_TYPE_32BIT
86#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
87#include "PGMAllShw.h"
88
89/* Guest - real mode */
90#define PGM_GST_TYPE PGM_TYPE_REAL
91#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
92#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
93#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
94#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
95#include "PGMGstDefs.h"
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef BTH_PGMPOOLKIND_ROOT
100#undef PGM_BTH_NAME
101#undef PGM_GST_TYPE
102#undef PGM_GST_NAME
103
104/* Guest - protected mode */
105#define PGM_GST_TYPE PGM_TYPE_PROT
106#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
107#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
108#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
109#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
110#include "PGMGstDefs.h"
111#include "PGMAllGst.h"
112#include "PGMAllBth.h"
113#undef BTH_PGMPOOLKIND_PT_FOR_PT
114#undef BTH_PGMPOOLKIND_ROOT
115#undef PGM_BTH_NAME
116#undef PGM_GST_TYPE
117#undef PGM_GST_NAME
118
119/* Guest - 32-bit mode */
120#define PGM_GST_TYPE PGM_TYPE_32BIT
121#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
123#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
124#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
125#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
126#include "PGMGstDefs.h"
127#include "PGMAllGst.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_BIG
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136#undef PGM_SHW_TYPE
137#undef PGM_SHW_NAME
138
139
140/*
141 * Shadow - PAE mode
142 */
143#define PGM_SHW_TYPE PGM_TYPE_PAE
144#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#include "PGMAllShw.h"
147
148/* Guest - real mode */
149#define PGM_GST_TYPE PGM_TYPE_REAL
150#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
153#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
154#include "PGMGstDefs.h"
155#include "PGMAllBth.h"
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef BTH_PGMPOOLKIND_ROOT
158#undef PGM_BTH_NAME
159#undef PGM_GST_TYPE
160#undef PGM_GST_NAME
161
162/* Guest - protected mode */
163#define PGM_GST_TYPE PGM_TYPE_PROT
164#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
167#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
168#include "PGMGstDefs.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef BTH_PGMPOOLKIND_ROOT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176/* Guest - 32-bit mode */
177#define PGM_GST_TYPE PGM_TYPE_32BIT
178#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
179#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
180#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
181#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
182#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
183#include "PGMGstDefs.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef BTH_PGMPOOLKIND_ROOT
188#undef PGM_BTH_NAME
189#undef PGM_GST_TYPE
190#undef PGM_GST_NAME
191
192
193/* Guest - PAE mode */
194#define PGM_GST_TYPE PGM_TYPE_PAE
195#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
196#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
197#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
198#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
199#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
200#include "PGMGstDefs.h"
201#include "PGMAllGst.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_BIG
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#undef PGM_SHW_TYPE
211#undef PGM_SHW_NAME
212
213
214#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
215/*
216 * Shadow - AMD64 mode
217 */
218# define PGM_SHW_TYPE PGM_TYPE_AMD64
219# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
220# include "PGMAllShw.h"
221
222/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
223# define PGM_GST_TYPE PGM_TYPE_PROT
224# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
225# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
226# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
227# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
228# include "PGMGstDefs.h"
229# include "PGMAllBth.h"
230# undef BTH_PGMPOOLKIND_PT_FOR_PT
231# undef BTH_PGMPOOLKIND_ROOT
232# undef PGM_BTH_NAME
233# undef PGM_GST_TYPE
234# undef PGM_GST_NAME
235
236# ifdef VBOX_WITH_64_BITS_GUESTS
237/* Guest - AMD64 mode */
238# define PGM_GST_TYPE PGM_TYPE_AMD64
239# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
240# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
241# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
242# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
243# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
244# include "PGMGstDefs.h"
245# include "PGMAllGst.h"
246# include "PGMAllBth.h"
247# undef BTH_PGMPOOLKIND_PT_FOR_BIG
248# undef BTH_PGMPOOLKIND_PT_FOR_PT
249# undef BTH_PGMPOOLKIND_ROOT
250# undef PGM_BTH_NAME
251# undef PGM_GST_TYPE
252# undef PGM_GST_NAME
253# endif /* VBOX_WITH_64_BITS_GUESTS */
254
255# undef PGM_SHW_TYPE
256# undef PGM_SHW_NAME
257
258
259/*
260 * Shadow - Nested paging mode
261 */
262# define PGM_SHW_TYPE PGM_TYPE_NESTED
263# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
264# include "PGMAllShw.h"
265
266/* Guest - real mode */
267# define PGM_GST_TYPE PGM_TYPE_REAL
268# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
269# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
270# include "PGMGstDefs.h"
271# include "PGMAllBth.h"
272# undef PGM_BTH_NAME
273# undef PGM_GST_TYPE
274# undef PGM_GST_NAME
275
276/* Guest - protected mode */
277# define PGM_GST_TYPE PGM_TYPE_PROT
278# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
279# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
280# include "PGMGstDefs.h"
281# include "PGMAllBth.h"
282# undef PGM_BTH_NAME
283# undef PGM_GST_TYPE
284# undef PGM_GST_NAME
285
286/* Guest - 32-bit mode */
287# define PGM_GST_TYPE PGM_TYPE_32BIT
288# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
289# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
290# include "PGMGstDefs.h"
291# include "PGMAllBth.h"
292# undef PGM_BTH_NAME
293# undef PGM_GST_TYPE
294# undef PGM_GST_NAME
295
296/* Guest - PAE mode */
297# define PGM_GST_TYPE PGM_TYPE_PAE
298# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
299# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
300# include "PGMGstDefs.h"
301# include "PGMAllBth.h"
302# undef PGM_BTH_NAME
303# undef PGM_GST_TYPE
304# undef PGM_GST_NAME
305
306# ifdef VBOX_WITH_64_BITS_GUESTS
307/* Guest - AMD64 mode */
308# define PGM_GST_TYPE PGM_TYPE_AMD64
309# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
310# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
311# include "PGMGstDefs.h"
312# include "PGMAllBth.h"
313# undef PGM_BTH_NAME
314# undef PGM_GST_TYPE
315# undef PGM_GST_NAME
316# endif /* VBOX_WITH_64_BITS_GUESTS */
317
318# undef PGM_SHW_TYPE
319# undef PGM_SHW_NAME
320
321
322/*
323 * Shadow - EPT
324 */
325# define PGM_SHW_TYPE PGM_TYPE_EPT
326# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
327# include "PGMAllShw.h"
328
329/* Guest - real mode */
330# define PGM_GST_TYPE PGM_TYPE_REAL
331# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
332# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
333# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
334# include "PGMGstDefs.h"
335# include "PGMAllBth.h"
336# undef BTH_PGMPOOLKIND_PT_FOR_PT
337# undef PGM_BTH_NAME
338# undef PGM_GST_TYPE
339# undef PGM_GST_NAME
340
341/* Guest - protected mode */
342# define PGM_GST_TYPE PGM_TYPE_PROT
343# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
344# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
345# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
346# include "PGMGstDefs.h"
347# include "PGMAllBth.h"
348# undef BTH_PGMPOOLKIND_PT_FOR_PT
349# undef PGM_BTH_NAME
350# undef PGM_GST_TYPE
351# undef PGM_GST_NAME
352
353/* Guest - 32-bit mode */
354# define PGM_GST_TYPE PGM_TYPE_32BIT
355# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
356# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
357# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
358# include "PGMGstDefs.h"
359# include "PGMAllBth.h"
360# undef BTH_PGMPOOLKIND_PT_FOR_PT
361# undef PGM_BTH_NAME
362# undef PGM_GST_TYPE
363# undef PGM_GST_NAME
364
365/* Guest - PAE mode */
366# define PGM_GST_TYPE PGM_TYPE_PAE
367# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
368# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
369# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
370# include "PGMGstDefs.h"
371# include "PGMAllBth.h"
372# undef BTH_PGMPOOLKIND_PT_FOR_PT
373# undef PGM_BTH_NAME
374# undef PGM_GST_TYPE
375# undef PGM_GST_NAME
376
377# ifdef VBOX_WITH_64_BITS_GUESTS
378/* Guest - AMD64 mode */
379# define PGM_GST_TYPE PGM_TYPE_AMD64
380# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
381# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
382# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
383# include "PGMGstDefs.h"
384# include "PGMAllBth.h"
385# undef BTH_PGMPOOLKIND_PT_FOR_PT
386# undef PGM_BTH_NAME
387# undef PGM_GST_TYPE
388# undef PGM_GST_NAME
389# endif /* VBOX_WITH_64_BITS_GUESTS */
390
391# undef PGM_SHW_TYPE
392# undef PGM_SHW_NAME
393
394#endif /* !IN_RC */
395
396
397/**
398 * Guest mode data array.
399 */
400PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
401{
402 /* NULL entry. */
403 { UINT32_MAX, NULL, NULL, NULL, NULL, NULL },
404 {
405 PGM_TYPE_REAL,
406 PGM_GST_NAME_REAL(GetPage),
407 PGM_GST_NAME_REAL(ModifyPage),
408 PGM_GST_NAME_REAL(GetPDE),
409#ifdef IN_RING3
410 PGM_GST_NAME_REAL(Enter),
411 PGM_GST_NAME_REAL(Exit),
412 PGM_GST_NAME_REAL(Relocate),
413#else
414 NULL, NULL, NULL,
415#endif
416 },
417 {
418 PGM_TYPE_PROT,
419 PGM_GST_NAME_PROT(GetPage),
420 PGM_GST_NAME_PROT(ModifyPage),
421 PGM_GST_NAME_PROT(GetPDE),
422#ifdef IN_RING3
423 PGM_GST_NAME_PROT(Enter),
424 PGM_GST_NAME_PROT(Exit),
425 PGM_GST_NAME_PROT(Relocate),
426#else
427 NULL, NULL, NULL,
428#endif
429 },
430 {
431 PGM_TYPE_32BIT,
432 PGM_GST_NAME_32BIT(GetPage),
433 PGM_GST_NAME_32BIT(ModifyPage),
434 PGM_GST_NAME_32BIT(GetPDE),
435#ifdef IN_RING3
436 PGM_GST_NAME_32BIT(Enter),
437 PGM_GST_NAME_32BIT(Exit),
438 PGM_GST_NAME_32BIT(Relocate),
439#else
440 NULL, NULL, NULL,
441#endif
442 },
443 {
444 PGM_TYPE_PAE,
445 PGM_GST_NAME_PAE(GetPage),
446 PGM_GST_NAME_PAE(ModifyPage),
447 PGM_GST_NAME_PAE(GetPDE),
448#ifdef IN_RING3
449 PGM_GST_NAME_PAE(Enter),
450 PGM_GST_NAME_PAE(Exit),
451 PGM_GST_NAME_PAE(Relocate),
452#else
453 NULL, NULL, NULL,
454#endif
455 },
456#if defined(VBOX_WITH_64_BITS_GUESTS) && !defined(IN_RC)
457 {
458 PGM_TYPE_AMD64,
459 PGM_GST_NAME_AMD64(GetPage),
460 PGM_GST_NAME_AMD64(ModifyPage),
461 PGM_GST_NAME_AMD64(GetPDE),
462# ifdef IN_RING3
463 PGM_GST_NAME_AMD64(Enter),
464 PGM_GST_NAME_AMD64(Exit),
465 PGM_GST_NAME_AMD64(Relocate),
466# else
467 NULL, NULL, NULL,
468# endif
469 },
470#endif
471};
472
473
474#ifndef IN_RING3
475/**
476 * #PF Handler.
477 *
478 * @returns VBox status code (appropriate for trap handling and GC return).
479 * @param pVCpu The cross context virtual CPU structure.
480 * @param uErr The trap error code.
481 * @param pRegFrame Trap register frame.
482 * @param pvFault The fault address.
483 */
484VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
485{
486 PVM pVM = pVCpu->CTX_SUFF(pVM);
487
488 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
489 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
490 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
491
492
493#ifdef VBOX_WITH_STATISTICS
494 /*
495 * Error code stats.
496 */
497 if (uErr & X86_TRAP_PF_US)
498 {
499 if (!(uErr & X86_TRAP_PF_P))
500 {
501 if (uErr & X86_TRAP_PF_RW)
502 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
503 else
504 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
505 }
506 else if (uErr & X86_TRAP_PF_RW)
507 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
508 else if (uErr & X86_TRAP_PF_RSVD)
509 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
510 else if (uErr & X86_TRAP_PF_ID)
511 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
512 else
513 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
514 }
515 else
516 { /* Supervisor */
517 if (!(uErr & X86_TRAP_PF_P))
518 {
519 if (uErr & X86_TRAP_PF_RW)
520 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
521 else
522 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
523 }
524 else if (uErr & X86_TRAP_PF_RW)
525 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
526 else if (uErr & X86_TRAP_PF_ID)
527 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
528 else if (uErr & X86_TRAP_PF_RSVD)
529 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
530 }
531#endif /* VBOX_WITH_STATISTICS */
532
533 /*
534 * Call the worker.
535 */
536 bool fLockTaken = false;
537 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
538 if (fLockTaken)
539 {
540 PGM_LOCK_ASSERT_OWNER(pVM);
541 pgmUnlock(pVM);
542 }
543 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
544
545 /*
546 * Return code tweaks.
547 */
548 if (rc != VINF_SUCCESS)
549 {
550 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
551 rc = VINF_SUCCESS;
552
553# ifdef IN_RING0
554 /* Note: hack alert for difficult to reproduce problem. */
555 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
556 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
557 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
558 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
559 {
560 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
561 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
562 rc = VINF_SUCCESS;
563 }
564# endif
565 }
566
567 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
568 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
569 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
570 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
571 return rc;
572}
573#endif /* !IN_RING3 */
574
575
576/**
577 * Prefetch a page
578 *
579 * Typically used to sync commonly used pages before entering raw mode
580 * after a CR3 reload.
581 *
582 * @returns VBox status code suitable for scheduling.
583 * @retval VINF_SUCCESS on success.
584 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
585 * @param pVCpu The cross context virtual CPU structure.
586 * @param GCPtrPage Page to invalidate.
587 */
588VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
589{
590 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
591 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
592 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
593 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
594 return rc;
595}
596
597
598/**
599 * Gets the mapping corresponding to the specified address (if any).
600 *
601 * @returns Pointer to the mapping.
602 * @returns NULL if not
603 *
604 * @param pVM The cross context VM structure.
605 * @param GCPtr The guest context pointer.
606 */
607PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
608{
609 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
610 while (pMapping)
611 {
612 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
613 break;
614 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
615 return pMapping;
616 pMapping = pMapping->CTX_SUFF(pNext);
617 }
618 return NULL;
619}
620
621
622/**
623 * Verifies a range of pages for read or write access
624 *
625 * Only checks the guest's page tables
626 *
627 * @returns VBox status code.
628 * @param pVCpu The cross context virtual CPU structure.
629 * @param Addr Guest virtual address to check
630 * @param cbSize Access size
631 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
632 * @remarks Current not in use.
633 */
634VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
635{
636 /*
637 * Validate input.
638 */
639 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
640 {
641 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
642 return VERR_INVALID_PARAMETER;
643 }
644
645 uint64_t fPage;
646 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
647 if (RT_FAILURE(rc))
648 {
649 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
650 return VINF_EM_RAW_GUEST_TRAP;
651 }
652
653 /*
654 * Check if the access would cause a page fault
655 *
656 * Note that hypervisor page directories are not present in the guest's tables, so this check
657 * is sufficient.
658 */
659 bool fWrite = !!(fAccess & X86_PTE_RW);
660 bool fUser = !!(fAccess & X86_PTE_US);
661 if ( !(fPage & X86_PTE_P)
662 || (fWrite && !(fPage & X86_PTE_RW))
663 || (fUser && !(fPage & X86_PTE_US)) )
664 {
665 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
666 return VINF_EM_RAW_GUEST_TRAP;
667 }
668 if ( RT_SUCCESS(rc)
669 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
670 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
671 return rc;
672}
673
674
675/**
676 * Verifies a range of pages for read or write access
677 *
678 * Supports handling of pages marked for dirty bit tracking and CSAM
679 *
680 * @returns VBox status code.
681 * @param pVCpu The cross context virtual CPU structure.
682 * @param Addr Guest virtual address to check
683 * @param cbSize Access size
684 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
685 */
686VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
687{
688 PVM pVM = pVCpu->CTX_SUFF(pVM);
689
690 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
691
692 /*
693 * Get going.
694 */
695 uint64_t fPageGst;
696 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
697 if (RT_FAILURE(rc))
698 {
699 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
700 return VINF_EM_RAW_GUEST_TRAP;
701 }
702
703 /*
704 * Check if the access would cause a page fault
705 *
706 * Note that hypervisor page directories are not present in the guest's tables, so this check
707 * is sufficient.
708 */
709 const bool fWrite = !!(fAccess & X86_PTE_RW);
710 const bool fUser = !!(fAccess & X86_PTE_US);
711 if ( !(fPageGst & X86_PTE_P)
712 || (fWrite && !(fPageGst & X86_PTE_RW))
713 || (fUser && !(fPageGst & X86_PTE_US)) )
714 {
715 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
716 return VINF_EM_RAW_GUEST_TRAP;
717 }
718
719 if (!pVM->pgm.s.fNestedPaging)
720 {
721 /*
722 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
723 */
724 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
725 if ( rc == VERR_PAGE_NOT_PRESENT
726 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
727 {
728 /*
729 * Page is not present in our page tables.
730 * Try to sync it!
731 */
732 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
733 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
734 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
735 if (rc != VINF_SUCCESS)
736 return rc;
737 }
738 else
739 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
740 }
741
742#if 0 /* def VBOX_STRICT; triggers too often now */
743 /*
744 * This check is a bit paranoid, but useful.
745 */
746 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
747 uint64_t fPageShw;
748 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
749 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
750 || (fWrite && !(fPageShw & X86_PTE_RW))
751 || (fUser && !(fPageShw & X86_PTE_US)) )
752 {
753 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
754 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
755 return VINF_EM_RAW_GUEST_TRAP;
756 }
757#endif
758
759 if ( RT_SUCCESS(rc)
760 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
761 || Addr + cbSize < Addr))
762 {
763 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
764 for (;;)
765 {
766 Addr += PAGE_SIZE;
767 if (cbSize > PAGE_SIZE)
768 cbSize -= PAGE_SIZE;
769 else
770 cbSize = 1;
771 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
772 if (rc != VINF_SUCCESS)
773 break;
774 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
775 break;
776 }
777 }
778 return rc;
779}
780
781
782/**
783 * Emulation of the invlpg instruction (HC only actually).
784 *
785 * @returns Strict VBox status code, special care required.
786 * @retval VINF_PGM_SYNC_CR3 - handled.
787 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
788 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
789 *
790 * @param pVCpu The cross context virtual CPU structure.
791 * @param GCPtrPage Page to invalidate.
792 *
793 * @remark ASSUMES the page table entry or page directory is valid. Fairly
794 * safe, but there could be edge cases!
795 *
796 * @todo Flush page or page directory only if necessary!
797 * @todo VBOXSTRICTRC
798 */
799VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
800{
801 PVM pVM = pVCpu->CTX_SUFF(pVM);
802 int rc;
803 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
804
805#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
806 /*
807 * Notify the recompiler so it can record this instruction.
808 */
809 REMNotifyInvalidatePage(pVM, GCPtrPage);
810#endif /* !IN_RING3 */
811 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
812
813
814#ifdef IN_RC
815 /*
816 * Check for conflicts and pending CR3 monitoring updates.
817 */
818 if (pgmMapAreMappingsFloating(pVM))
819 {
820 if ( pgmGetMapping(pVM, GCPtrPage)
821 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
822 {
823 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
824 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
825 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
826 return VINF_PGM_SYNC_CR3;
827 }
828
829 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
830 {
831 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
832 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
833 return VINF_EM_RAW_EMULATE_INSTR;
834 }
835 }
836#endif /* IN_RC */
837
838 /*
839 * Call paging mode specific worker.
840 */
841 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
842 pgmLock(pVM);
843 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
844 pgmUnlock(pVM);
845 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
846
847#ifdef IN_RING3
848 /*
849 * Check if we have a pending update of the CR3 monitoring.
850 */
851 if ( RT_SUCCESS(rc)
852 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
853 {
854 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
855 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
856 }
857
858# ifdef VBOX_WITH_RAW_MODE
859 /*
860 * Inform CSAM about the flush
861 *
862 * Note: This is to check if monitored pages have been changed; when we implement
863 * callbacks for virtual handlers, this is no longer required.
864 */
865 CSAMR3FlushPage(pVM, GCPtrPage);
866# endif
867#endif /* IN_RING3 */
868
869 /* Ignore all irrelevant error codes. */
870 if ( rc == VERR_PAGE_NOT_PRESENT
871 || rc == VERR_PAGE_TABLE_NOT_PRESENT
872 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
873 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
874 rc = VINF_SUCCESS;
875
876 return rc;
877}
878
879
880/**
881 * Executes an instruction using the interpreter.
882 *
883 * @returns VBox status code (appropriate for trap handling and GC return).
884 * @param pVM The cross context VM structure.
885 * @param pVCpu The cross context virtual CPU structure.
886 * @param pRegFrame Register frame.
887 * @param pvFault Fault address.
888 */
889VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
890{
891 NOREF(pVM);
892 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
893 if (rc == VERR_EM_INTERPRETER)
894 rc = VINF_EM_RAW_EMULATE_INSTR;
895 if (rc != VINF_SUCCESS)
896 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
897 return rc;
898}
899
900
901/**
902 * Gets effective page information (from the VMM page directory).
903 *
904 * @returns VBox status code.
905 * @param pVCpu The cross context virtual CPU structure.
906 * @param GCPtr Guest Context virtual address of the page.
907 * @param pfFlags Where to store the flags. These are X86_PTE_*.
908 * @param pHCPhys Where to store the HC physical address of the page.
909 * This is page aligned.
910 * @remark You should use PGMMapGetPage() for pages in a mapping.
911 */
912VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
913{
914 pgmLock(pVCpu->CTX_SUFF(pVM));
915 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
916 pgmUnlock(pVCpu->CTX_SUFF(pVM));
917 return rc;
918}
919
920
921/**
922 * Modify page flags for a range of pages in the shadow context.
923 *
924 * The existing flags are ANDed with the fMask and ORed with the fFlags.
925 *
926 * @returns VBox status code.
927 * @param pVCpu The cross context virtual CPU structure.
928 * @param GCPtr Virtual address of the first page in the range.
929 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
930 * @param fMask The AND mask - page flags X86_PTE_*.
931 * Be very CAREFUL when ~'ing constants which could be 32-bit!
932 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
933 * @remark You must use PGMMapModifyPage() for pages in a mapping.
934 */
935DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
936{
937 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
938 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
939
940 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
941
942 PVM pVM = pVCpu->CTX_SUFF(pVM);
943 pgmLock(pVM);
944 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
945 pgmUnlock(pVM);
946 return rc;
947}
948
949
950/**
951 * Changing the page flags for a single page in the shadow page tables so as to
952 * make it read-only.
953 *
954 * @returns VBox status code.
955 * @param pVCpu The cross context virtual CPU structure.
956 * @param GCPtr Virtual address of the first page in the range.
957 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
958 */
959VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
960{
961 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
962}
963
964
965/**
966 * Changing the page flags for a single page in the shadow page tables so as to
967 * make it writable.
968 *
969 * The call must know with 101% certainty that the guest page tables maps this
970 * as writable too. This function will deal shared, zero and write monitored
971 * pages.
972 *
973 * @returns VBox status code.
974 * @param pVCpu The cross context virtual CPU structure.
975 * @param GCPtr Virtual address of the first page in the range.
976 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
977 */
978VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
979{
980 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
981}
982
983
984/**
985 * Changing the page flags for a single page in the shadow page tables so as to
986 * make it not present.
987 *
988 * @returns VBox status code.
989 * @param pVCpu The cross context virtual CPU structure.
990 * @param GCPtr Virtual address of the first page in the range.
991 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
992 */
993VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
994{
995 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
996}
997
998
999/**
1000 * Changing the page flags for a single page in the shadow page tables so as to
1001 * make it supervisor and writable.
1002 *
1003 * This if for dealing with CR0.WP=0 and readonly user pages.
1004 *
1005 * @returns VBox status code.
1006 * @param pVCpu The cross context virtual CPU structure.
1007 * @param GCPtr Virtual address of the first page in the range.
1008 * @param fBigPage Whether or not this is a big page. If it is, we have to
1009 * change the shadow PDE as well. If it isn't, the caller
1010 * has checked that the shadow PDE doesn't need changing.
1011 * We ASSUME 4KB pages backing the big page here!
1012 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1013 */
1014int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1015{
1016 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1017 if (rc == VINF_SUCCESS && fBigPage)
1018 {
1019 /* this is a bit ugly... */
1020 switch (pVCpu->pgm.s.enmShadowMode)
1021 {
1022 case PGMMODE_32_BIT:
1023 {
1024 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1025 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1026 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1027 pPde->n.u1Write = 1;
1028 Log(("-> PDE=%#llx (32)\n", pPde->u));
1029 break;
1030 }
1031 case PGMMODE_PAE:
1032 case PGMMODE_PAE_NX:
1033 {
1034 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1035 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1036 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1037 pPde->n.u1Write = 1;
1038 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1039 break;
1040 }
1041 default:
1042 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1043 }
1044 }
1045 return rc;
1046}
1047
1048
1049/**
1050 * Gets the shadow page directory for the specified address, PAE.
1051 *
1052 * @returns Pointer to the shadow PD.
1053 * @param pVCpu The cross context virtual CPU structure.
1054 * @param GCPtr The address.
1055 * @param uGstPdpe Guest PDPT entry. Valid.
1056 * @param ppPD Receives address of page directory
1057 */
1058int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1059{
1060 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1061 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1062 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1063 PVM pVM = pVCpu->CTX_SUFF(pVM);
1064 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1065 PPGMPOOLPAGE pShwPage;
1066 int rc;
1067
1068 PGM_LOCK_ASSERT_OWNER(pVM);
1069
1070 /* Allocate page directory if not present. */
1071 if ( !pPdpe->n.u1Present
1072 && !(pPdpe->u & X86_PDPE_PG_MASK))
1073 {
1074 RTGCPTR64 GCPdPt;
1075 PGMPOOLKIND enmKind;
1076
1077 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1078 {
1079 /* AMD-V nested paging or real/protected mode without paging. */
1080 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1081 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1082 }
1083 else
1084 {
1085 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1086 {
1087 if (!(uGstPdpe & X86_PDPE_P))
1088 {
1089 /* PD not present; guest must reload CR3 to change it.
1090 * No need to monitor anything in this case.
1091 */
1092 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1093
1094 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1095 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1096 uGstPdpe |= X86_PDPE_P;
1097 }
1098 else
1099 {
1100 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1101 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1102 }
1103 }
1104 else
1105 {
1106 GCPdPt = CPUMGetGuestCR3(pVCpu);
1107 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1108 }
1109 }
1110
1111 /* Create a reference back to the PDPT by using the index in its shadow page. */
1112 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1113 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1114 &pShwPage);
1115 AssertRCReturn(rc, rc);
1116
1117 /* The PD was cached or created; hook it up now. */
1118 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1119
1120# if defined(IN_RC)
1121 /*
1122 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1123 * PDPT entry; the CPU fetches them only during cr3 load, so any
1124 * non-present PDPT will continue to cause page faults.
1125 */
1126 ASMReloadCR3();
1127# endif
1128 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1129 }
1130 else
1131 {
1132 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1133 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1134 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1135
1136 pgmPoolCacheUsed(pPool, pShwPage);
1137 }
1138 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1139 return VINF_SUCCESS;
1140}
1141
1142
1143/**
1144 * Gets the pointer to the shadow page directory entry for an address, PAE.
1145 *
1146 * @returns Pointer to the PDE.
1147 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1148 * @param GCPtr The address.
1149 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1150 */
1151DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1152{
1153 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1154 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1155 PVM pVM = pVCpu->CTX_SUFF(pVM);
1156
1157 PGM_LOCK_ASSERT_OWNER(pVM);
1158
1159 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1160 if (!pPdpt->a[iPdPt].n.u1Present)
1161 {
1162 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1163 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1164 }
1165 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1166
1167 /* Fetch the pgm pool shadow descriptor. */
1168 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1169 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1170
1171 *ppShwPde = pShwPde;
1172 return VINF_SUCCESS;
1173}
1174
1175#ifndef IN_RC
1176
1177/**
1178 * Syncs the SHADOW page directory pointer for the specified address.
1179 *
1180 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1181 *
1182 * The caller is responsible for making sure the guest has a valid PD before
1183 * calling this function.
1184 *
1185 * @returns VBox status code.
1186 * @param pVCpu The cross context virtual CPU structure.
1187 * @param GCPtr The address.
1188 * @param uGstPml4e Guest PML4 entry (valid).
1189 * @param uGstPdpe Guest PDPT entry (valid).
1190 * @param ppPD Receives address of page directory
1191 */
1192static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1193{
1194 PVM pVM = pVCpu->CTX_SUFF(pVM);
1195 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1196 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1197 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1198 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1199 PPGMPOOLPAGE pShwPage;
1200 int rc;
1201
1202 PGM_LOCK_ASSERT_OWNER(pVM);
1203
1204 /* Allocate page directory pointer table if not present. */
1205 if ( !pPml4e->n.u1Present
1206 && !(pPml4e->u & X86_PML4E_PG_MASK))
1207 {
1208 RTGCPTR64 GCPml4;
1209 PGMPOOLKIND enmKind;
1210
1211 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1212
1213 if (fNestedPagingOrNoGstPaging)
1214 {
1215 /* AMD-V nested paging or real/protected mode without paging */
1216 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1217 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1218 }
1219 else
1220 {
1221 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1222 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1223 }
1224
1225 /* Create a reference back to the PDPT by using the index in its shadow page. */
1226 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1227 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1228 &pShwPage);
1229 AssertRCReturn(rc, rc);
1230 }
1231 else
1232 {
1233 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1234 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1235
1236 pgmPoolCacheUsed(pPool, pShwPage);
1237 }
1238 /* The PDPT was cached or created; hook it up now. */
1239 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1240
1241 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1242 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1243 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1244
1245 /* Allocate page directory if not present. */
1246 if ( !pPdpe->n.u1Present
1247 && !(pPdpe->u & X86_PDPE_PG_MASK))
1248 {
1249 RTGCPTR64 GCPdPt;
1250 PGMPOOLKIND enmKind;
1251
1252 if (fNestedPagingOrNoGstPaging)
1253 {
1254 /* AMD-V nested paging or real/protected mode without paging */
1255 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1256 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1257 }
1258 else
1259 {
1260 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1261 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1262 }
1263
1264 /* Create a reference back to the PDPT by using the index in its shadow page. */
1265 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1266 pShwPage->idx, iPdPt, false /*fLockPage*/,
1267 &pShwPage);
1268 AssertRCReturn(rc, rc);
1269 }
1270 else
1271 {
1272 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1273 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1274
1275 pgmPoolCacheUsed(pPool, pShwPage);
1276 }
1277 /* The PD was cached or created; hook it up now. */
1278 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1279
1280 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1281 return VINF_SUCCESS;
1282}
1283
1284
1285/**
1286 * Gets the SHADOW page directory pointer for the specified address (long mode).
1287 *
1288 * @returns VBox status code.
1289 * @param pVCpu The cross context virtual CPU structure.
1290 * @param GCPtr The address.
1291 * @param ppPdpt Receives address of pdpt
1292 * @param ppPD Receives address of page directory
1293 */
1294DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1295{
1296 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1297 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1298
1299 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1300
1301 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1302 if (ppPml4e)
1303 *ppPml4e = (PX86PML4E)pPml4e;
1304
1305 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1306
1307 if (!pPml4e->n.u1Present)
1308 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1309
1310 PVM pVM = pVCpu->CTX_SUFF(pVM);
1311 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1312 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1313 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1314
1315 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1316 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1317 if (!pPdpt->a[iPdPt].n.u1Present)
1318 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1319
1320 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1321 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1322
1323 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1324 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1331 * backing pages in case the PDPT or PML4 entry is missing.
1332 *
1333 * @returns VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param GCPtr The address.
1336 * @param ppPdpt Receives address of pdpt
1337 * @param ppPD Receives address of page directory
1338 */
1339static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1340{
1341 PVM pVM = pVCpu->CTX_SUFF(pVM);
1342 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1343 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1344 PEPTPML4 pPml4;
1345 PEPTPML4E pPml4e;
1346 PPGMPOOLPAGE pShwPage;
1347 int rc;
1348
1349 Assert(pVM->pgm.s.fNestedPaging);
1350 PGM_LOCK_ASSERT_OWNER(pVM);
1351
1352 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1353 Assert(pPml4);
1354
1355 /* Allocate page directory pointer table if not present. */
1356 pPml4e = &pPml4->a[iPml4];
1357 if ( !pPml4e->n.u1Present
1358 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1359 {
1360 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1361 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1362
1363 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1364 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1365 &pShwPage);
1366 AssertRCReturn(rc, rc);
1367 }
1368 else
1369 {
1370 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1371 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1372
1373 pgmPoolCacheUsed(pPool, pShwPage);
1374 }
1375 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1376 pPml4e->u = pShwPage->Core.Key;
1377 pPml4e->n.u1Present = 1;
1378 pPml4e->n.u1Write = 1;
1379 pPml4e->n.u1Execute = 1;
1380
1381 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1382 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1383 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1384
1385 if (ppPdpt)
1386 *ppPdpt = pPdpt;
1387
1388 /* Allocate page directory if not present. */
1389 if ( !pPdpe->n.u1Present
1390 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1391 {
1392 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1393 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1394 pShwPage->idx, iPdPt, false /*fLockPage*/,
1395 &pShwPage);
1396 AssertRCReturn(rc, rc);
1397 }
1398 else
1399 {
1400 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1401 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1402
1403 pgmPoolCacheUsed(pPool, pShwPage);
1404 }
1405 /* The PD was cached or created; hook it up now and fill with the default value. */
1406 pPdpe->u = pShwPage->Core.Key;
1407 pPdpe->n.u1Present = 1;
1408 pPdpe->n.u1Write = 1;
1409 pPdpe->n.u1Execute = 1;
1410
1411 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1412 return VINF_SUCCESS;
1413}
1414
1415#endif /* IN_RC */
1416
1417#ifdef IN_RING0
1418/**
1419 * Synchronizes a range of nested page table entries.
1420 *
1421 * The caller must own the PGM lock.
1422 *
1423 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1424 * @param GCPhys Where to start.
1425 * @param cPages How many pages which entries should be synced.
1426 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1427 * host paging mode for AMD-V).
1428 */
1429int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1430{
1431 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1432
1433 int rc;
1434 switch (enmShwPagingMode)
1435 {
1436 case PGMMODE_32_BIT:
1437 {
1438 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1439 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1440 break;
1441 }
1442
1443 case PGMMODE_PAE:
1444 case PGMMODE_PAE_NX:
1445 {
1446 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1447 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1448 break;
1449 }
1450
1451 case PGMMODE_AMD64:
1452 case PGMMODE_AMD64_NX:
1453 {
1454 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1455 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1456 break;
1457 }
1458
1459 case PGMMODE_EPT:
1460 {
1461 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1462 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1463 break;
1464 }
1465
1466 default:
1467 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1468 }
1469 return rc;
1470}
1471#endif /* IN_RING0 */
1472
1473
1474/**
1475 * Gets effective Guest OS page information.
1476 *
1477 * When GCPtr is in a big page, the function will return as if it was a normal
1478 * 4KB page. If the need for distinguishing between big and normal page becomes
1479 * necessary at a later point, a PGMGstGetPage() will be created for that
1480 * purpose.
1481 *
1482 * @returns VBox status code.
1483 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1484 * @param GCPtr Guest Context virtual address of the page.
1485 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1486 * @param pGCPhys Where to store the GC physical address of the page.
1487 * This is page aligned. The fact that the
1488 */
1489VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1490{
1491 VMCPU_ASSERT_EMT(pVCpu);
1492 intptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1493 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1494 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
1495 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pfFlags, pGCPhys);
1496}
1497
1498
1499/**
1500 * Performs a guest page table walk.
1501 *
1502 * The guest should be in paged protect mode or long mode when making a call to
1503 * this function.
1504 *
1505 * @returns VBox status code.
1506 * @retval VINF_SUCCESS on success.
1507 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1508 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1509 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1510 *
1511 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1512 * @param GCPtr The guest virtual address to walk by.
1513 * @param pWalk Where to return the walk result. This is valid for some
1514 * error codes as well.
1515 */
1516int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1517{
1518 VMCPU_ASSERT_EMT(pVCpu);
1519 switch (pVCpu->pgm.s.enmGuestMode)
1520 {
1521 case PGMMODE_32_BIT:
1522 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1523 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1524
1525 case PGMMODE_PAE:
1526 case PGMMODE_PAE_NX:
1527 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1528 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1529
1530#if !defined(IN_RC)
1531 case PGMMODE_AMD64:
1532 case PGMMODE_AMD64_NX:
1533 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1534 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1535#endif
1536
1537 case PGMMODE_REAL:
1538 case PGMMODE_PROTECTED:
1539 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1540 return VERR_PGM_NOT_USED_IN_MODE;
1541
1542#if defined(IN_RC)
1543 case PGMMODE_AMD64:
1544 case PGMMODE_AMD64_NX:
1545#endif
1546 case PGMMODE_NESTED:
1547 case PGMMODE_EPT:
1548 default:
1549 AssertFailed();
1550 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1551 return VERR_PGM_NOT_USED_IN_MODE;
1552 }
1553}
1554
1555
1556/**
1557 * Tries to continue the previous walk.
1558 *
1559 * @note Requires the caller to hold the PGM lock from the first
1560 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1561 * we cannot use the pointers.
1562 *
1563 * @returns VBox status code.
1564 * @retval VINF_SUCCESS on success.
1565 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1566 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1567 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1568 *
1569 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1570 * @param GCPtr The guest virtual address to walk by.
1571 * @param pWalk Pointer to the previous walk result and where to return
1572 * the result of this walk. This is valid for some error
1573 * codes as well.
1574 */
1575int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1576{
1577 /*
1578 * We can only handle successfully walks.
1579 * We also limit ourselves to the next page.
1580 */
1581 if ( pWalk->u.Core.fSucceeded
1582 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
1583 {
1584 Assert(pWalk->u.Core.uLevel == 0);
1585 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1586 {
1587 /*
1588 * AMD64
1589 */
1590 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
1591 {
1592 /*
1593 * We fall back to full walk if the PDE table changes, if any
1594 * reserved bits are set, or if the effective page access changes.
1595 */
1596 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1597 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1598 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1599 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1600
1601 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
1602 {
1603 if (pWalk->u.Amd64.pPte)
1604 {
1605 X86PTEPAE Pte;
1606 Pte.u = pWalk->u.Amd64.pPte[1].u;
1607 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1608 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1609 {
1610
1611 pWalk->u.Core.GCPtr = GCPtr;
1612 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1613 pWalk->u.Amd64.Pte.u = Pte.u;
1614 pWalk->u.Amd64.pPte++;
1615 return VINF_SUCCESS;
1616 }
1617 }
1618 }
1619 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
1620 {
1621 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
1622 if (pWalk->u.Amd64.pPde)
1623 {
1624 X86PDEPAE Pde;
1625 Pde.u = pWalk->u.Amd64.pPde[1].u;
1626 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
1627 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1628 {
1629 /* Get the new PTE and check out the first entry. */
1630 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1631 &pWalk->u.Amd64.pPt);
1632 if (RT_SUCCESS(rc))
1633 {
1634 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
1635 X86PTEPAE Pte;
1636 Pte.u = pWalk->u.Amd64.pPte->u;
1637 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1638 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1639 {
1640 pWalk->u.Core.GCPtr = GCPtr;
1641 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1642 pWalk->u.Amd64.Pte.u = Pte.u;
1643 pWalk->u.Amd64.Pde.u = Pde.u;
1644 pWalk->u.Amd64.pPde++;
1645 return VINF_SUCCESS;
1646 }
1647 }
1648 }
1649 }
1650 }
1651 }
1652 else if (!pWalk->u.Core.fGigantPage)
1653 {
1654 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
1655 {
1656 pWalk->u.Core.GCPtr = GCPtr;
1657 pWalk->u.Core.GCPhys += PAGE_SIZE;
1658 return VINF_SUCCESS;
1659 }
1660 }
1661 else
1662 {
1663 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
1664 {
1665 pWalk->u.Core.GCPtr = GCPtr;
1666 pWalk->u.Core.GCPhys += PAGE_SIZE;
1667 return VINF_SUCCESS;
1668 }
1669 }
1670 }
1671 }
1672 /* Case we don't handle. Do full walk. */
1673 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
1674}
1675
1676
1677/**
1678 * Checks if the page is present.
1679 *
1680 * @returns true if the page is present.
1681 * @returns false if the page is not present.
1682 * @param pVCpu The cross context virtual CPU structure.
1683 * @param GCPtr Address within the page.
1684 */
1685VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1686{
1687 VMCPU_ASSERT_EMT(pVCpu);
1688 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1689 return RT_SUCCESS(rc);
1690}
1691
1692
1693/**
1694 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1695 *
1696 * @returns VBox status code.
1697 * @param pVCpu The cross context virtual CPU structure.
1698 * @param GCPtr The address of the first page.
1699 * @param cb The size of the range in bytes.
1700 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1701 */
1702VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1703{
1704 VMCPU_ASSERT_EMT(pVCpu);
1705 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1706}
1707
1708
1709/**
1710 * Modify page flags for a range of pages in the guest's tables
1711 *
1712 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1713 *
1714 * @returns VBox status code.
1715 * @param pVCpu The cross context virtual CPU structure.
1716 * @param GCPtr Virtual address of the first page in the range.
1717 * @param cb Size (in bytes) of the range to apply the modification to.
1718 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1719 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1720 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1721 */
1722VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1723{
1724 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1725 VMCPU_ASSERT_EMT(pVCpu);
1726
1727 /*
1728 * Validate input.
1729 */
1730 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1731 Assert(cb);
1732
1733 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1734
1735 /*
1736 * Adjust input.
1737 */
1738 cb += GCPtr & PAGE_OFFSET_MASK;
1739 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1740 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1741
1742 /*
1743 * Call worker.
1744 */
1745 intptr_t idx = pVCpu->pgm.s.idxGuestModeData;
1746 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
1747 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
1748 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
1749
1750 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1751 return rc;
1752}
1753
1754
1755#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1756
1757/**
1758 * Performs the lazy mapping of the 32-bit guest PD.
1759 *
1760 * @returns VBox status code.
1761 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1762 * @param ppPd Where to return the pointer to the mapping. This is
1763 * always set.
1764 */
1765int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1766{
1767 PVM pVM = pVCpu->CTX_SUFF(pVM);
1768 pgmLock(pVM);
1769
1770 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1771
1772 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1773 PPGMPAGE pPage;
1774 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1775 if (RT_SUCCESS(rc))
1776 {
1777 RTHCPTR HCPtrGuestCR3;
1778 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1779 if (RT_SUCCESS(rc))
1780 {
1781 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1782# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1783 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1784# endif
1785 *ppPd = (PX86PD)HCPtrGuestCR3;
1786
1787 pgmUnlock(pVM);
1788 return VINF_SUCCESS;
1789 }
1790
1791 AssertRC(rc);
1792 }
1793 pgmUnlock(pVM);
1794
1795 *ppPd = NULL;
1796 return rc;
1797}
1798
1799
1800/**
1801 * Performs the lazy mapping of the PAE guest PDPT.
1802 *
1803 * @returns VBox status code.
1804 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1805 * @param ppPdpt Where to return the pointer to the mapping. This is
1806 * always set.
1807 */
1808int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1809{
1810 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1811 PVM pVM = pVCpu->CTX_SUFF(pVM);
1812 pgmLock(pVM);
1813
1814 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1815 PPGMPAGE pPage;
1816 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1817 if (RT_SUCCESS(rc))
1818 {
1819 RTHCPTR HCPtrGuestCR3;
1820 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1821 if (RT_SUCCESS(rc))
1822 {
1823 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1824# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1825 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1826# endif
1827 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1828
1829 pgmUnlock(pVM);
1830 return VINF_SUCCESS;
1831 }
1832
1833 AssertRC(rc);
1834 }
1835
1836 pgmUnlock(pVM);
1837 *ppPdpt = NULL;
1838 return rc;
1839}
1840
1841
1842/**
1843 * Performs the lazy mapping / updating of a PAE guest PD.
1844 *
1845 * @returns Pointer to the mapping.
1846 * @returns VBox status code.
1847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1848 * @param iPdpt Which PD entry to map (0..3).
1849 * @param ppPd Where to return the pointer to the mapping. This is
1850 * always set.
1851 */
1852int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1853{
1854 PVM pVM = pVCpu->CTX_SUFF(pVM);
1855 pgmLock(pVM);
1856
1857 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1858 Assert(pGuestPDPT);
1859 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1860 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1861 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1862
1863 PPGMPAGE pPage;
1864 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1865 if (RT_SUCCESS(rc))
1866 {
1867 RTRCPTR RCPtr = NIL_RTRCPTR;
1868 RTHCPTR HCPtr = NIL_RTHCPTR;
1869#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1870 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1871 AssertRC(rc);
1872#endif
1873 if (RT_SUCCESS(rc) && fChanged)
1874 {
1875 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1876 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1877 }
1878 if (RT_SUCCESS(rc))
1879 {
1880 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1881# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1882 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1883# endif
1884 if (fChanged)
1885 {
1886 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1887 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1888 }
1889
1890 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1891 pgmUnlock(pVM);
1892 return VINF_SUCCESS;
1893 }
1894 }
1895
1896 /* Invalid page or some failure, invalidate the entry. */
1897 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1898 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1899# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1900 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1901# endif
1902 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1903
1904 pgmUnlock(pVM);
1905 return rc;
1906}
1907
1908#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1909#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1910/**
1911 * Performs the lazy mapping of the 32-bit guest PD.
1912 *
1913 * @returns VBox status code.
1914 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1915 * @param ppPml4 Where to return the pointer to the mapping. This will
1916 * always be set.
1917 */
1918int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1919{
1920 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1921 PVM pVM = pVCpu->CTX_SUFF(pVM);
1922 pgmLock(pVM);
1923
1924 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1925 PPGMPAGE pPage;
1926 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1927 if (RT_SUCCESS(rc))
1928 {
1929 RTHCPTR HCPtrGuestCR3;
1930 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1931 if (RT_SUCCESS(rc))
1932 {
1933 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1934# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1935 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1936# endif
1937 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1938
1939 pgmUnlock(pVM);
1940 return VINF_SUCCESS;
1941 }
1942 }
1943
1944 pgmUnlock(pVM);
1945 *ppPml4 = NULL;
1946 return rc;
1947}
1948#endif
1949
1950
1951/**
1952 * Gets the PAE PDPEs values cached by the CPU.
1953 *
1954 * @returns VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure.
1956 * @param paPdpes Where to return the four PDPEs. The array
1957 * pointed to must have 4 entries.
1958 */
1959VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1960{
1961 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1962
1963 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1964 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1965 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1966 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1967 return VINF_SUCCESS;
1968}
1969
1970
1971/**
1972 * Sets the PAE PDPEs values cached by the CPU.
1973 *
1974 * @remarks This must be called *AFTER* PGMUpdateCR3.
1975 *
1976 * @param pVCpu The cross context virtual CPU structure.
1977 * @param paPdpes The four PDPE values. The array pointed to must
1978 * have exactly 4 entries.
1979 *
1980 * @remarks No-long-jump zone!!!
1981 */
1982VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1983{
1984 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1985
1986 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1987 {
1988 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1989 {
1990 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1991
1992 /* Force lazy remapping if it changed in any way. */
1993 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1994# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1995 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1996# endif
1997 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1998 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1999 }
2000 }
2001
2002 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
2003}
2004
2005
2006/**
2007 * Gets the current CR3 register value for the shadow memory context.
2008 * @returns CR3 value.
2009 * @param pVCpu The cross context virtual CPU structure.
2010 */
2011VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2012{
2013 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2014 AssertPtrReturn(pPoolPage, 0);
2015 return pPoolPage->Core.Key;
2016}
2017
2018
2019/**
2020 * Gets the current CR3 register value for the nested memory context.
2021 * @returns CR3 value.
2022 * @param pVCpu The cross context virtual CPU structure.
2023 * @param enmShadowMode The shadow paging mode.
2024 */
2025VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
2026{
2027 NOREF(enmShadowMode);
2028 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
2029 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
2030}
2031
2032
2033/**
2034 * Gets the current CR3 register value for the HC intermediate memory context.
2035 * @returns CR3 value.
2036 * @param pVM The cross context VM structure.
2037 */
2038VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
2039{
2040 switch (pVM->pgm.s.enmHostMode)
2041 {
2042 case SUPPAGINGMODE_32_BIT:
2043 case SUPPAGINGMODE_32_BIT_GLOBAL:
2044 return pVM->pgm.s.HCPhysInterPD;
2045
2046 case SUPPAGINGMODE_PAE:
2047 case SUPPAGINGMODE_PAE_GLOBAL:
2048 case SUPPAGINGMODE_PAE_NX:
2049 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2050 return pVM->pgm.s.HCPhysInterPaePDPT;
2051
2052 case SUPPAGINGMODE_AMD64:
2053 case SUPPAGINGMODE_AMD64_GLOBAL:
2054 case SUPPAGINGMODE_AMD64_NX:
2055 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2056 return pVM->pgm.s.HCPhysInterPaePDPT;
2057
2058 default:
2059 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
2060 return NIL_RTHCPHYS;
2061 }
2062}
2063
2064
2065/**
2066 * Gets the current CR3 register value for the RC intermediate memory context.
2067 * @returns CR3 value.
2068 * @param pVM The cross context VM structure.
2069 * @param pVCpu The cross context virtual CPU structure.
2070 */
2071VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
2072{
2073 switch (pVCpu->pgm.s.enmShadowMode)
2074 {
2075 case PGMMODE_32_BIT:
2076 return pVM->pgm.s.HCPhysInterPD;
2077
2078 case PGMMODE_PAE:
2079 case PGMMODE_PAE_NX:
2080 return pVM->pgm.s.HCPhysInterPaePDPT;
2081
2082 case PGMMODE_AMD64:
2083 case PGMMODE_AMD64_NX:
2084 return pVM->pgm.s.HCPhysInterPaePML4;
2085
2086 case PGMMODE_EPT:
2087 case PGMMODE_NESTED:
2088 return 0; /* not relevant */
2089
2090 default:
2091 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
2092 return NIL_RTHCPHYS;
2093 }
2094}
2095
2096
2097/**
2098 * Gets the CR3 register value for the 32-Bit intermediate memory context.
2099 * @returns CR3 value.
2100 * @param pVM The cross context VM structure.
2101 */
2102VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
2103{
2104 return pVM->pgm.s.HCPhysInterPD;
2105}
2106
2107
2108/**
2109 * Gets the CR3 register value for the PAE intermediate memory context.
2110 * @returns CR3 value.
2111 * @param pVM The cross context VM structure.
2112 */
2113VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
2114{
2115 return pVM->pgm.s.HCPhysInterPaePDPT;
2116}
2117
2118
2119/**
2120 * Gets the CR3 register value for the AMD64 intermediate memory context.
2121 * @returns CR3 value.
2122 * @param pVM The cross context VM structure.
2123 */
2124VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
2125{
2126 return pVM->pgm.s.HCPhysInterPaePML4;
2127}
2128
2129
2130/**
2131 * Performs and schedules necessary updates following a CR3 load or reload.
2132 *
2133 * This will normally involve mapping the guest PD or nPDPT
2134 *
2135 * @returns VBox status code.
2136 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2137 * safely be ignored and overridden since the FF will be set too then.
2138 * @param pVCpu The cross context virtual CPU structure.
2139 * @param cr3 The new cr3.
2140 * @param fGlobal Indicates whether this is a global flush or not.
2141 */
2142VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
2143{
2144 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2145 PVM pVM = pVCpu->CTX_SUFF(pVM);
2146
2147 VMCPU_ASSERT_EMT(pVCpu);
2148
2149 /*
2150 * Always flag the necessary updates; necessary for hardware acceleration
2151 */
2152 /** @todo optimize this, it shouldn't always be necessary. */
2153 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2154 if (fGlobal)
2155 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2156 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2157
2158 /*
2159 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2160 */
2161 int rc = VINF_SUCCESS;
2162 RTGCPHYS GCPhysCR3;
2163 switch (pVCpu->pgm.s.enmGuestMode)
2164 {
2165 case PGMMODE_PAE:
2166 case PGMMODE_PAE_NX:
2167 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2168 break;
2169 case PGMMODE_AMD64:
2170 case PGMMODE_AMD64_NX:
2171 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2172 break;
2173 default:
2174 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2175 break;
2176 }
2177 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2178
2179 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2180 {
2181 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2182 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2183 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2184 if (RT_LIKELY(rc == VINF_SUCCESS))
2185 {
2186 if (pgmMapAreMappingsFloating(pVM))
2187 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2188 }
2189 else
2190 {
2191 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2192 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2193 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2194 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2195 if (pgmMapAreMappingsFloating(pVM))
2196 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2197 }
2198
2199 if (fGlobal)
2200 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2201 else
2202 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
2203 }
2204 else
2205 {
2206# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2207 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2208 if (pPool->cDirtyPages)
2209 {
2210 pgmLock(pVM);
2211 pgmPoolResetDirtyPages(pVM);
2212 pgmUnlock(pVM);
2213 }
2214# endif
2215 /*
2216 * Check if we have a pending update of the CR3 monitoring.
2217 */
2218 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2219 {
2220 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2221 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2222 }
2223 if (fGlobal)
2224 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2225 else
2226 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2227 }
2228
2229 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2230 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2231 return rc;
2232}
2233
2234
2235/**
2236 * Performs and schedules necessary updates following a CR3 load or reload when
2237 * using nested or extended paging.
2238 *
2239 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2240 * TLB and triggering a SyncCR3.
2241 *
2242 * This will normally involve mapping the guest PD or nPDPT
2243 *
2244 * @returns VBox status code.
2245 * @retval VINF_SUCCESS.
2246 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2247 * paging modes). This can safely be ignored and overridden since the
2248 * FF will be set too then.
2249 * @param pVCpu The cross context virtual CPU structure.
2250 * @param cr3 The new cr3.
2251 */
2252VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2253{
2254 VMCPU_ASSERT_EMT(pVCpu);
2255 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2256
2257 /* We assume we're only called in nested paging mode. */
2258 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2259 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2260 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2261
2262 /*
2263 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2264 */
2265 int rc = VINF_SUCCESS;
2266 RTGCPHYS GCPhysCR3;
2267 switch (pVCpu->pgm.s.enmGuestMode)
2268 {
2269 case PGMMODE_PAE:
2270 case PGMMODE_PAE_NX:
2271 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2272 break;
2273 case PGMMODE_AMD64:
2274 case PGMMODE_AMD64_NX:
2275 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2276 break;
2277 default:
2278 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2279 break;
2280 }
2281 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2282
2283 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2284 {
2285 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2286 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2287 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2288 }
2289
2290 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2291 return rc;
2292}
2293
2294
2295/**
2296 * Synchronize the paging structures.
2297 *
2298 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2299 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2300 * in several places, most importantly whenever the CR3 is loaded.
2301 *
2302 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2303 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2304 * the VMM into guest context.
2305 * @param pVCpu The cross context virtual CPU structure.
2306 * @param cr0 Guest context CR0 register
2307 * @param cr3 Guest context CR3 register
2308 * @param cr4 Guest context CR4 register
2309 * @param fGlobal Including global page directories or not
2310 */
2311VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2312{
2313 int rc;
2314
2315 VMCPU_ASSERT_EMT(pVCpu);
2316
2317 /*
2318 * The pool may have pending stuff and even require a return to ring-3 to
2319 * clear the whole thing.
2320 */
2321 rc = pgmPoolSyncCR3(pVCpu);
2322 if (rc != VINF_SUCCESS)
2323 return rc;
2324
2325 /*
2326 * We might be called when we shouldn't.
2327 *
2328 * The mode switching will ensure that the PD is resynced after every mode
2329 * switch. So, if we find ourselves here when in protected or real mode
2330 * we can safely clear the FF and return immediately.
2331 */
2332 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2333 {
2334 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2335 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2336 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2337 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2338 return VINF_SUCCESS;
2339 }
2340
2341 /* If global pages are not supported, then all flushes are global. */
2342 if (!(cr4 & X86_CR4_PGE))
2343 fGlobal = true;
2344 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2345 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2346
2347 /*
2348 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2349 * This should be done before SyncCR3.
2350 */
2351 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2352 {
2353 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2354
2355 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2356 RTGCPHYS GCPhysCR3;
2357 switch (pVCpu->pgm.s.enmGuestMode)
2358 {
2359 case PGMMODE_PAE:
2360 case PGMMODE_PAE_NX:
2361 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2362 break;
2363 case PGMMODE_AMD64:
2364 case PGMMODE_AMD64_NX:
2365 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2366 break;
2367 default:
2368 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2369 break;
2370 }
2371 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2372
2373 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2374 {
2375 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2376 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2377 }
2378
2379 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2380 if ( rc == VINF_PGM_SYNC_CR3
2381 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2382 {
2383 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2384#ifdef IN_RING3
2385 rc = pgmPoolSyncCR3(pVCpu);
2386#else
2387 if (rc == VINF_PGM_SYNC_CR3)
2388 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2389 return VINF_PGM_SYNC_CR3;
2390#endif
2391 }
2392 AssertRCReturn(rc, rc);
2393 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2394 }
2395
2396 /*
2397 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2398 */
2399 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2400 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2401 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2402 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2403 if (rc == VINF_SUCCESS)
2404 {
2405 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2406 {
2407 /* Go back to ring 3 if a pgm pool sync is again pending. */
2408 return VINF_PGM_SYNC_CR3;
2409 }
2410
2411 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2412 {
2413 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2414 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2415 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2416 }
2417
2418 /*
2419 * Check if we have a pending update of the CR3 monitoring.
2420 */
2421 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2422 {
2423 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2424 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2425 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2426 }
2427 }
2428
2429 /*
2430 * Now flush the CR3 (guest context).
2431 */
2432 if (rc == VINF_SUCCESS)
2433 PGM_INVL_VCPU_TLBS(pVCpu);
2434 return rc;
2435}
2436
2437
2438/**
2439 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2440 *
2441 * @returns VBox status code, with the following informational code for
2442 * VM scheduling.
2443 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2444 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2445 * (I.e. not in R3.)
2446 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2447 *
2448 * @param pVCpu The cross context virtual CPU structure.
2449 * @param cr0 The new cr0.
2450 * @param cr4 The new cr4.
2451 * @param efer The new extended feature enable register.
2452 */
2453VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2454{
2455 VMCPU_ASSERT_EMT(pVCpu);
2456
2457 /*
2458 * Calc the new guest mode.
2459 *
2460 * Note! We check PG before PE and without requiring PE because of the
2461 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2462 */
2463 PGMMODE enmGuestMode;
2464 if (cr0 & X86_CR0_PG)
2465 {
2466 if (!(cr4 & X86_CR4_PAE))
2467 {
2468 bool const fPse = !!(cr4 & X86_CR4_PSE);
2469 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2470 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2471 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2472 enmGuestMode = PGMMODE_32_BIT;
2473 }
2474 else if (!(efer & MSR_K6_EFER_LME))
2475 {
2476 if (!(efer & MSR_K6_EFER_NXE))
2477 enmGuestMode = PGMMODE_PAE;
2478 else
2479 enmGuestMode = PGMMODE_PAE_NX;
2480 }
2481 else
2482 {
2483 if (!(efer & MSR_K6_EFER_NXE))
2484 enmGuestMode = PGMMODE_AMD64;
2485 else
2486 enmGuestMode = PGMMODE_AMD64_NX;
2487 }
2488 }
2489 else if (!(cr0 & X86_CR0_PE))
2490 enmGuestMode = PGMMODE_REAL;
2491 else
2492 enmGuestMode = PGMMODE_PROTECTED;
2493
2494 /*
2495 * Did it change?
2496 */
2497 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2498 return VINF_SUCCESS;
2499
2500 /* Flush the TLB */
2501 PGM_INVL_VCPU_TLBS(pVCpu);
2502
2503#ifdef IN_RING3
2504 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2505#else
2506 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2507 return VINF_PGM_CHANGE_MODE;
2508#endif
2509}
2510
2511
2512/**
2513 * Called by CPUM or REM when CR0.WP changes to 1.
2514 *
2515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2516 * @thread EMT
2517 */
2518VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2519{
2520 /*
2521 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2522 *
2523 * Use the counter to judge whether there might be pool pages with active
2524 * hacks in them. If there are, we will be running the risk of messing up
2525 * the guest by allowing it to write to read-only pages. Thus, we have to
2526 * clear the page pool ASAP if there is the slightest chance.
2527 */
2528 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2529 {
2530 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2531
2532 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2533 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2534 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2535 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2536 }
2537}
2538
2539
2540/**
2541 * Gets the current guest paging mode.
2542 *
2543 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2544 *
2545 * @returns The current paging mode.
2546 * @param pVCpu The cross context virtual CPU structure.
2547 */
2548VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2549{
2550 return pVCpu->pgm.s.enmGuestMode;
2551}
2552
2553
2554/**
2555 * Gets the current shadow paging mode.
2556 *
2557 * @returns The current paging mode.
2558 * @param pVCpu The cross context virtual CPU structure.
2559 */
2560VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2561{
2562 return pVCpu->pgm.s.enmShadowMode;
2563}
2564
2565
2566/**
2567 * Gets the current host paging mode.
2568 *
2569 * @returns The current paging mode.
2570 * @param pVM The cross context VM structure.
2571 */
2572VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2573{
2574 switch (pVM->pgm.s.enmHostMode)
2575 {
2576 case SUPPAGINGMODE_32_BIT:
2577 case SUPPAGINGMODE_32_BIT_GLOBAL:
2578 return PGMMODE_32_BIT;
2579
2580 case SUPPAGINGMODE_PAE:
2581 case SUPPAGINGMODE_PAE_GLOBAL:
2582 return PGMMODE_PAE;
2583
2584 case SUPPAGINGMODE_PAE_NX:
2585 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2586 return PGMMODE_PAE_NX;
2587
2588 case SUPPAGINGMODE_AMD64:
2589 case SUPPAGINGMODE_AMD64_GLOBAL:
2590 return PGMMODE_AMD64;
2591
2592 case SUPPAGINGMODE_AMD64_NX:
2593 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2594 return PGMMODE_AMD64_NX;
2595
2596 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2597 }
2598
2599 return PGMMODE_INVALID;
2600}
2601
2602
2603/**
2604 * Get mode name.
2605 *
2606 * @returns read-only name string.
2607 * @param enmMode The mode which name is desired.
2608 */
2609VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2610{
2611 switch (enmMode)
2612 {
2613 case PGMMODE_REAL: return "Real";
2614 case PGMMODE_PROTECTED: return "Protected";
2615 case PGMMODE_32_BIT: return "32-bit";
2616 case PGMMODE_PAE: return "PAE";
2617 case PGMMODE_PAE_NX: return "PAE+NX";
2618 case PGMMODE_AMD64: return "AMD64";
2619 case PGMMODE_AMD64_NX: return "AMD64+NX";
2620 case PGMMODE_NESTED: return "Nested";
2621 case PGMMODE_EPT: return "EPT";
2622 default: return "unknown mode value";
2623 }
2624}
2625
2626
2627/**
2628 * Gets the physical address represented in the guest CR3 as PGM sees it.
2629 *
2630 * This is mainly for logging and debugging.
2631 *
2632 * @returns PGM's guest CR3 value.
2633 * @param pVCpu The cross context virtual CPU structure.
2634 */
2635VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
2636{
2637 return pVCpu->pgm.s.GCPhysCR3;
2638}
2639
2640
2641
2642/**
2643 * Notification from CPUM that the EFER.NXE bit has changed.
2644 *
2645 * @param pVCpu The cross context virtual CPU structure of the CPU for
2646 * which EFER changed.
2647 * @param fNxe The new NXE state.
2648 */
2649VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2650{
2651/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2652 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2653
2654 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2655 if (fNxe)
2656 {
2657 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2658 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2659 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2660 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2661 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2662 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2663 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2664 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2665 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2666 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2667 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2668
2669 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2670 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2671 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2672 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2673 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2674 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2675 }
2676 else
2677 {
2678 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2679 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2680 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2681 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2682 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2683 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2684 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2685 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2686 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2687 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2688 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2689
2690 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2691 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2692 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2693 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2694 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2695 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2696 }
2697}
2698
2699
2700/**
2701 * Check if any pgm pool pages are marked dirty (not monitored)
2702 *
2703 * @returns bool locked/not locked
2704 * @param pVM The cross context VM structure.
2705 */
2706VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2707{
2708 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2709}
2710
2711
2712/**
2713 * Check if this VCPU currently owns the PGM lock.
2714 *
2715 * @returns bool owner/not owner
2716 * @param pVM The cross context VM structure.
2717 */
2718VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2719{
2720 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2721}
2722
2723
2724/**
2725 * Enable or disable large page usage
2726 *
2727 * @returns VBox status code.
2728 * @param pVM The cross context VM structure.
2729 * @param fUseLargePages Use/not use large pages
2730 */
2731VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2732{
2733 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2734
2735 pVM->fUseLargePages = fUseLargePages;
2736 return VINF_SUCCESS;
2737}
2738
2739
2740/**
2741 * Acquire the PGM lock.
2742 *
2743 * @returns VBox status code
2744 * @param pVM The cross context VM structure.
2745 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2746 */
2747#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
2748int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2749#else
2750int pgmLock(PVM pVM)
2751#endif
2752{
2753#if defined(VBOX_STRICT) && defined(IN_RING3)
2754 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2755#else
2756 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2757#endif
2758#if defined(IN_RC) || defined(IN_RING0)
2759 if (rc == VERR_SEM_BUSY)
2760 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2761#endif
2762 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2763 return rc;
2764}
2765
2766
2767/**
2768 * Release the PGM lock.
2769 *
2770 * @returns VBox status code
2771 * @param pVM The cross context VM structure.
2772 */
2773void pgmUnlock(PVM pVM)
2774{
2775 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2776 pVM->pgm.s.cDeprecatedPageLocks = 0;
2777 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2778 if (rc == VINF_SEM_NESTED)
2779 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2780}
2781
2782#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2783
2784/**
2785 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2786 *
2787 * @returns VBox status code.
2788 * @param pVM The cross context VM structure.
2789 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2790 * @param GCPhys The guest physical address of the page to map. The
2791 * offset bits are not ignored.
2792 * @param ppv Where to return the address corresponding to @a GCPhys.
2793 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2794 */
2795int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2796{
2797 pgmLock(pVM);
2798
2799 /*
2800 * Convert it to a writable page and it on to the dynamic mapper.
2801 */
2802 int rc;
2803 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2804 if (RT_LIKELY(pPage))
2805 {
2806 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2807 if (RT_SUCCESS(rc))
2808 {
2809 void *pv;
2810 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2811 if (RT_SUCCESS(rc))
2812 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2813 }
2814 else
2815 AssertRC(rc);
2816 }
2817 else
2818 {
2819 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2820 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2821 }
2822
2823 pgmUnlock(pVM);
2824 return rc;
2825}
2826
2827#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2828#if !defined(IN_R0) || defined(LOG_ENABLED)
2829
2830/** Format handler for PGMPAGE.
2831 * @copydoc FNRTSTRFORMATTYPE */
2832static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2833 const char *pszType, void const *pvValue,
2834 int cchWidth, int cchPrecision, unsigned fFlags,
2835 void *pvUser)
2836{
2837 size_t cch;
2838 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2839 if (RT_VALID_PTR(pPage))
2840 {
2841 char szTmp[64+80];
2842
2843 cch = 0;
2844
2845 /* The single char state stuff. */
2846 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2847 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2848
2849#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2850 if (IS_PART_INCLUDED(5))
2851 {
2852 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2853 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2854 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2855 }
2856
2857 /* The type. */
2858 if (IS_PART_INCLUDED(4))
2859 {
2860 szTmp[cch++] = ':';
2861 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2862 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2863 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2864 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2865 }
2866
2867 /* The numbers. */
2868 if (IS_PART_INCLUDED(3))
2869 {
2870 szTmp[cch++] = ':';
2871 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2872 }
2873
2874 if (IS_PART_INCLUDED(2))
2875 {
2876 szTmp[cch++] = ':';
2877 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2878 }
2879
2880 if (IS_PART_INCLUDED(6))
2881 {
2882 szTmp[cch++] = ':';
2883 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2884 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2885 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2886 }
2887#undef IS_PART_INCLUDED
2888
2889 cch = pfnOutput(pvArgOutput, szTmp, cch);
2890 }
2891 else
2892 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2893 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2894 return cch;
2895}
2896
2897
2898/** Format handler for PGMRAMRANGE.
2899 * @copydoc FNRTSTRFORMATTYPE */
2900static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2901 const char *pszType, void const *pvValue,
2902 int cchWidth, int cchPrecision, unsigned fFlags,
2903 void *pvUser)
2904{
2905 size_t cch;
2906 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2907 if (VALID_PTR(pRam))
2908 {
2909 char szTmp[80];
2910 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2911 cch = pfnOutput(pvArgOutput, szTmp, cch);
2912 }
2913 else
2914 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2915 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2916 return cch;
2917}
2918
2919/** Format type andlers to be registered/deregistered. */
2920static const struct
2921{
2922 char szType[24];
2923 PFNRTSTRFORMATTYPE pfnHandler;
2924} g_aPgmFormatTypes[] =
2925{
2926 { "pgmpage", pgmFormatTypeHandlerPage },
2927 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2928};
2929
2930#endif /* !IN_R0 || LOG_ENABLED */
2931
2932/**
2933 * Registers the global string format types.
2934 *
2935 * This should be called at module load time or in some other manner that ensure
2936 * that it's called exactly one time.
2937 *
2938 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2939 */
2940VMMDECL(int) PGMRegisterStringFormatTypes(void)
2941{
2942#if !defined(IN_R0) || defined(LOG_ENABLED)
2943 int rc = VINF_SUCCESS;
2944 unsigned i;
2945 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2946 {
2947 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2948# ifdef IN_RING0
2949 if (rc == VERR_ALREADY_EXISTS)
2950 {
2951 /* in case of cleanup failure in ring-0 */
2952 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2953 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2954 }
2955# endif
2956 }
2957 if (RT_FAILURE(rc))
2958 while (i-- > 0)
2959 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2960
2961 return rc;
2962#else
2963 return VINF_SUCCESS;
2964#endif
2965}
2966
2967
2968/**
2969 * Deregisters the global string format types.
2970 *
2971 * This should be called at module unload time or in some other manner that
2972 * ensure that it's called exactly one time.
2973 */
2974VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2975{
2976#if !defined(IN_R0) || defined(LOG_ENABLED)
2977 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2978 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2979#endif
2980}
2981
2982#ifdef VBOX_STRICT
2983
2984/**
2985 * Asserts that there are no mapping conflicts.
2986 *
2987 * @returns Number of conflicts.
2988 * @param pVM The cross context VM structure.
2989 */
2990VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2991{
2992 unsigned cErrors = 0;
2993
2994 /* Only applies to raw mode -> 1 VPCU */
2995 Assert(pVM->cCpus == 1);
2996 PVMCPU pVCpu = &pVM->aCpus[0];
2997
2998 /*
2999 * Check for mapping conflicts.
3000 */
3001 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
3002 pMapping;
3003 pMapping = pMapping->CTX_SUFF(pNext))
3004 {
3005 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
3006 for (RTGCPTR GCPtr = pMapping->GCPtr;
3007 GCPtr <= pMapping->GCPtrLast;
3008 GCPtr += PAGE_SIZE)
3009 {
3010 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
3011 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
3012 {
3013 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
3014 cErrors++;
3015 break;
3016 }
3017 }
3018 }
3019
3020 return cErrors;
3021}
3022
3023
3024/**
3025 * Asserts that everything related to the guest CR3 is correctly shadowed.
3026 *
3027 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3028 * and assert the correctness of the guest CR3 mapping before asserting that the
3029 * shadow page tables is in sync with the guest page tables.
3030 *
3031 * @returns Number of conflicts.
3032 * @param pVM The cross context VM structure.
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param cr3 The current guest CR3 register value.
3035 * @param cr4 The current guest CR4 register value.
3036 */
3037VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
3038{
3039 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3040 pgmLock(pVM);
3041 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3042 pgmUnlock(pVM);
3043 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
3044 return cErrors;
3045}
3046
3047#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette