VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 73125

Last change on this file since 73125 was 73125, checked in by vboxsync, 7 years ago

pgmGstPtWalkNext: Incorrect assertion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 100.6 KB
Line 
1/* $Id: PGMAll.cpp 73125 2018-07-13 14:53:03Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/iem.h>
27#include <VBox/vmm/iom.h>
28#include <VBox/sup.h>
29#include <VBox/vmm/mm.h>
30#include <VBox/vmm/stam.h>
31#include <VBox/vmm/csam.h>
32#include <VBox/vmm/patm.h>
33#include <VBox/vmm/trpm.h>
34#ifdef VBOX_WITH_REM
35# include <VBox/vmm/rem.h>
36#endif
37#include <VBox/vmm/em.h>
38#include <VBox/vmm/hm.h>
39#include <VBox/vmm/hm_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vmm/vm.h>
42#include "PGMInline.h"
43#include <iprt/assert.h>
44#include <iprt/asm-amd64-x86.h>
45#include <iprt/string.h>
46#include <VBox/log.h>
47#include <VBox/param.h>
48#include <VBox/err.h>
49
50
51/*********************************************************************************************************************************
52* Structures and Typedefs *
53*********************************************************************************************************************************/
54/**
55 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
56 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
57 */
58typedef struct PGMHVUSTATE
59{
60 /** Pointer to the VM. */
61 PVM pVM;
62 /** Pointer to the VMCPU. */
63 PVMCPU pVCpu;
64 /** The todo flags. */
65 RTUINT fTodo;
66 /** The CR4 register value. */
67 uint32_t cr4;
68} PGMHVUSTATE, *PPGMHVUSTATE;
69
70
71/*********************************************************************************************************************************
72* Internal Functions *
73*********************************************************************************************************************************/
74DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
76#ifndef IN_RC
77static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
78static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
79#endif
80
81
82/*
83 * Shadow - 32-bit mode
84 */
85#define PGM_SHW_TYPE PGM_TYPE_32BIT
86#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
87#include "PGMAllShw.h"
88
89/* Guest - real mode */
90#define PGM_GST_TYPE PGM_TYPE_REAL
91#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
92#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
93#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
94#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
95#include "PGMGstDefs.h"
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef BTH_PGMPOOLKIND_ROOT
100#undef PGM_BTH_NAME
101#undef PGM_GST_TYPE
102#undef PGM_GST_NAME
103
104/* Guest - protected mode */
105#define PGM_GST_TYPE PGM_TYPE_PROT
106#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
107#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
108#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
109#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
110#include "PGMGstDefs.h"
111#include "PGMAllGst.h"
112#include "PGMAllBth.h"
113#undef BTH_PGMPOOLKIND_PT_FOR_PT
114#undef BTH_PGMPOOLKIND_ROOT
115#undef PGM_BTH_NAME
116#undef PGM_GST_TYPE
117#undef PGM_GST_NAME
118
119/* Guest - 32-bit mode */
120#define PGM_GST_TYPE PGM_TYPE_32BIT
121#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
123#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
124#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
125#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
126#include "PGMGstDefs.h"
127#include "PGMAllGst.h"
128#include "PGMAllBth.h"
129#undef BTH_PGMPOOLKIND_PT_FOR_BIG
130#undef BTH_PGMPOOLKIND_PT_FOR_PT
131#undef BTH_PGMPOOLKIND_ROOT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136#undef PGM_SHW_TYPE
137#undef PGM_SHW_NAME
138
139
140/*
141 * Shadow - PAE mode
142 */
143#define PGM_SHW_TYPE PGM_TYPE_PAE
144#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#include "PGMAllShw.h"
147
148/* Guest - real mode */
149#define PGM_GST_TYPE PGM_TYPE_REAL
150#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
151#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
152#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
153#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
154#include "PGMGstDefs.h"
155#include "PGMAllBth.h"
156#undef BTH_PGMPOOLKIND_PT_FOR_PT
157#undef BTH_PGMPOOLKIND_ROOT
158#undef PGM_BTH_NAME
159#undef PGM_GST_TYPE
160#undef PGM_GST_NAME
161
162/* Guest - protected mode */
163#define PGM_GST_TYPE PGM_TYPE_PROT
164#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
165#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
166#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
167#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
168#include "PGMGstDefs.h"
169#include "PGMAllBth.h"
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef BTH_PGMPOOLKIND_ROOT
172#undef PGM_BTH_NAME
173#undef PGM_GST_TYPE
174#undef PGM_GST_NAME
175
176/* Guest - 32-bit mode */
177#define PGM_GST_TYPE PGM_TYPE_32BIT
178#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
179#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
180#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
181#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
182#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
183#include "PGMGstDefs.h"
184#include "PGMAllBth.h"
185#undef BTH_PGMPOOLKIND_PT_FOR_BIG
186#undef BTH_PGMPOOLKIND_PT_FOR_PT
187#undef BTH_PGMPOOLKIND_ROOT
188#undef PGM_BTH_NAME
189#undef PGM_GST_TYPE
190#undef PGM_GST_NAME
191
192
193/* Guest - PAE mode */
194#define PGM_GST_TYPE PGM_TYPE_PAE
195#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
196#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
197#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
198#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
199#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
200#include "PGMGstDefs.h"
201#include "PGMAllGst.h"
202#include "PGMAllBth.h"
203#undef BTH_PGMPOOLKIND_PT_FOR_BIG
204#undef BTH_PGMPOOLKIND_PT_FOR_PT
205#undef BTH_PGMPOOLKIND_ROOT
206#undef PGM_BTH_NAME
207#undef PGM_GST_TYPE
208#undef PGM_GST_NAME
209
210#undef PGM_SHW_TYPE
211#undef PGM_SHW_NAME
212
213
214#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
215/*
216 * Shadow - AMD64 mode
217 */
218# define PGM_SHW_TYPE PGM_TYPE_AMD64
219# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
220# include "PGMAllShw.h"
221
222/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
223# define PGM_GST_TYPE PGM_TYPE_PROT
224# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
225# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
226# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
227# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
228# include "PGMGstDefs.h"
229# include "PGMAllBth.h"
230# undef BTH_PGMPOOLKIND_PT_FOR_PT
231# undef BTH_PGMPOOLKIND_ROOT
232# undef PGM_BTH_NAME
233# undef PGM_GST_TYPE
234# undef PGM_GST_NAME
235
236# ifdef VBOX_WITH_64_BITS_GUESTS
237/* Guest - AMD64 mode */
238# define PGM_GST_TYPE PGM_TYPE_AMD64
239# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
240# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
241# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
242# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
243# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
244# include "PGMGstDefs.h"
245# include "PGMAllGst.h"
246# include "PGMAllBth.h"
247# undef BTH_PGMPOOLKIND_PT_FOR_BIG
248# undef BTH_PGMPOOLKIND_PT_FOR_PT
249# undef BTH_PGMPOOLKIND_ROOT
250# undef PGM_BTH_NAME
251# undef PGM_GST_TYPE
252# undef PGM_GST_NAME
253# endif /* VBOX_WITH_64_BITS_GUESTS */
254
255# undef PGM_SHW_TYPE
256# undef PGM_SHW_NAME
257
258
259/*
260 * Shadow - Nested paging mode
261 */
262# define PGM_SHW_TYPE PGM_TYPE_NESTED
263# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
264# include "PGMAllShw.h"
265
266/* Guest - real mode */
267# define PGM_GST_TYPE PGM_TYPE_REAL
268# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
269# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
270# include "PGMGstDefs.h"
271# include "PGMAllBth.h"
272# undef PGM_BTH_NAME
273# undef PGM_GST_TYPE
274# undef PGM_GST_NAME
275
276/* Guest - protected mode */
277# define PGM_GST_TYPE PGM_TYPE_PROT
278# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
279# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
280# include "PGMGstDefs.h"
281# include "PGMAllBth.h"
282# undef PGM_BTH_NAME
283# undef PGM_GST_TYPE
284# undef PGM_GST_NAME
285
286/* Guest - 32-bit mode */
287# define PGM_GST_TYPE PGM_TYPE_32BIT
288# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
289# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
290# include "PGMGstDefs.h"
291# include "PGMAllBth.h"
292# undef PGM_BTH_NAME
293# undef PGM_GST_TYPE
294# undef PGM_GST_NAME
295
296/* Guest - PAE mode */
297# define PGM_GST_TYPE PGM_TYPE_PAE
298# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
299# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
300# include "PGMGstDefs.h"
301# include "PGMAllBth.h"
302# undef PGM_BTH_NAME
303# undef PGM_GST_TYPE
304# undef PGM_GST_NAME
305
306# ifdef VBOX_WITH_64_BITS_GUESTS
307/* Guest - AMD64 mode */
308# define PGM_GST_TYPE PGM_TYPE_AMD64
309# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
310# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
311# include "PGMGstDefs.h"
312# include "PGMAllBth.h"
313# undef PGM_BTH_NAME
314# undef PGM_GST_TYPE
315# undef PGM_GST_NAME
316# endif /* VBOX_WITH_64_BITS_GUESTS */
317
318# undef PGM_SHW_TYPE
319# undef PGM_SHW_NAME
320
321
322/*
323 * Shadow - EPT
324 */
325# define PGM_SHW_TYPE PGM_TYPE_EPT
326# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
327# include "PGMAllShw.h"
328
329/* Guest - real mode */
330# define PGM_GST_TYPE PGM_TYPE_REAL
331# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
332# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
333# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
334# include "PGMGstDefs.h"
335# include "PGMAllBth.h"
336# undef BTH_PGMPOOLKIND_PT_FOR_PT
337# undef PGM_BTH_NAME
338# undef PGM_GST_TYPE
339# undef PGM_GST_NAME
340
341/* Guest - protected mode */
342# define PGM_GST_TYPE PGM_TYPE_PROT
343# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
344# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
345# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
346# include "PGMGstDefs.h"
347# include "PGMAllBth.h"
348# undef BTH_PGMPOOLKIND_PT_FOR_PT
349# undef PGM_BTH_NAME
350# undef PGM_GST_TYPE
351# undef PGM_GST_NAME
352
353/* Guest - 32-bit mode */
354# define PGM_GST_TYPE PGM_TYPE_32BIT
355# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
356# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
357# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
358# include "PGMGstDefs.h"
359# include "PGMAllBth.h"
360# undef BTH_PGMPOOLKIND_PT_FOR_PT
361# undef PGM_BTH_NAME
362# undef PGM_GST_TYPE
363# undef PGM_GST_NAME
364
365/* Guest - PAE mode */
366# define PGM_GST_TYPE PGM_TYPE_PAE
367# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
368# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
369# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
370# include "PGMGstDefs.h"
371# include "PGMAllBth.h"
372# undef BTH_PGMPOOLKIND_PT_FOR_PT
373# undef PGM_BTH_NAME
374# undef PGM_GST_TYPE
375# undef PGM_GST_NAME
376
377# ifdef VBOX_WITH_64_BITS_GUESTS
378/* Guest - AMD64 mode */
379# define PGM_GST_TYPE PGM_TYPE_AMD64
380# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
381# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
382# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
383# include "PGMGstDefs.h"
384# include "PGMAllBth.h"
385# undef BTH_PGMPOOLKIND_PT_FOR_PT
386# undef PGM_BTH_NAME
387# undef PGM_GST_TYPE
388# undef PGM_GST_NAME
389# endif /* VBOX_WITH_64_BITS_GUESTS */
390
391# undef PGM_SHW_TYPE
392# undef PGM_SHW_NAME
393
394#endif /* !IN_RC */
395
396
397#ifndef IN_RING3
398/**
399 * #PF Handler.
400 *
401 * @returns VBox status code (appropriate for trap handling and GC return).
402 * @param pVCpu The cross context virtual CPU structure.
403 * @param uErr The trap error code.
404 * @param pRegFrame Trap register frame.
405 * @param pvFault The fault address.
406 */
407VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
408{
409 PVM pVM = pVCpu->CTX_SUFF(pVM);
410
411 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pRegFrame->cs.Sel, (RTGCPTR)pRegFrame->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
412 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, a);
413 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
414
415
416#ifdef VBOX_WITH_STATISTICS
417 /*
418 * Error code stats.
419 */
420 if (uErr & X86_TRAP_PF_US)
421 {
422 if (!(uErr & X86_TRAP_PF_P))
423 {
424 if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
426 else
427 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
428 }
429 else if (uErr & X86_TRAP_PF_RW)
430 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
431 else if (uErr & X86_TRAP_PF_RSVD)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
433 else if (uErr & X86_TRAP_PF_ID)
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
435 else
436 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
437 }
438 else
439 { /* Supervisor */
440 if (!(uErr & X86_TRAP_PF_P))
441 {
442 if (uErr & X86_TRAP_PF_RW)
443 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
444 else
445 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
446 }
447 else if (uErr & X86_TRAP_PF_RW)
448 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
449 else if (uErr & X86_TRAP_PF_ID)
450 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
451 else if (uErr & X86_TRAP_PF_RSVD)
452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
453 }
454#endif /* VBOX_WITH_STATISTICS */
455
456 /*
457 * Call the worker.
458 */
459 bool fLockTaken = false;
460 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken);
461 if (fLockTaken)
462 {
463 PGM_LOCK_ASSERT_OWNER(pVM);
464 pgmUnlock(pVM);
465 }
466 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
467
468 /*
469 * Return code tweaks.
470 */
471 if (rc != VINF_SUCCESS)
472 {
473 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
474 rc = VINF_SUCCESS;
475
476# ifdef IN_RING0
477 /* Note: hack alert for difficult to reproduce problem. */
478 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
479 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
480 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
481 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
482 {
483 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
484 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
485 rc = VINF_SUCCESS;
486 }
487# endif
488 }
489
490 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eGuestPF); });
491 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
492 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
493 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
494 return rc;
495}
496#endif /* !IN_RING3 */
497
498
499/**
500 * Prefetch a page
501 *
502 * Typically used to sync commonly used pages before entering raw mode
503 * after a CR3 reload.
504 *
505 * @returns VBox status code suitable for scheduling.
506 * @retval VINF_SUCCESS on success.
507 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
508 * @param pVCpu The cross context virtual CPU structure.
509 * @param GCPtrPage Page to invalidate.
510 */
511VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
512{
513 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
514 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
515 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,Prefetch), a);
516 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
517 return rc;
518}
519
520
521/**
522 * Gets the mapping corresponding to the specified address (if any).
523 *
524 * @returns Pointer to the mapping.
525 * @returns NULL if not
526 *
527 * @param pVM The cross context VM structure.
528 * @param GCPtr The guest context pointer.
529 */
530PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
531{
532 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
533 while (pMapping)
534 {
535 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
536 break;
537 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
538 return pMapping;
539 pMapping = pMapping->CTX_SUFF(pNext);
540 }
541 return NULL;
542}
543
544
545/**
546 * Verifies a range of pages for read or write access
547 *
548 * Only checks the guest's page tables
549 *
550 * @returns VBox status code.
551 * @param pVCpu The cross context virtual CPU structure.
552 * @param Addr Guest virtual address to check
553 * @param cbSize Access size
554 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
555 * @remarks Current not in use.
556 */
557VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
558{
559 /*
560 * Validate input.
561 */
562 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
563 {
564 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
565 return VERR_INVALID_PARAMETER;
566 }
567
568 uint64_t fPage;
569 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
570 if (RT_FAILURE(rc))
571 {
572 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
573 return VINF_EM_RAW_GUEST_TRAP;
574 }
575
576 /*
577 * Check if the access would cause a page fault
578 *
579 * Note that hypervisor page directories are not present in the guest's tables, so this check
580 * is sufficient.
581 */
582 bool fWrite = !!(fAccess & X86_PTE_RW);
583 bool fUser = !!(fAccess & X86_PTE_US);
584 if ( !(fPage & X86_PTE_P)
585 || (fWrite && !(fPage & X86_PTE_RW))
586 || (fUser && !(fPage & X86_PTE_US)) )
587 {
588 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
589 return VINF_EM_RAW_GUEST_TRAP;
590 }
591 if ( RT_SUCCESS(rc)
592 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
593 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
594 return rc;
595}
596
597
598/**
599 * Verifies a range of pages for read or write access
600 *
601 * Supports handling of pages marked for dirty bit tracking and CSAM
602 *
603 * @returns VBox status code.
604 * @param pVCpu The cross context virtual CPU structure.
605 * @param Addr Guest virtual address to check
606 * @param cbSize Access size
607 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
608 */
609VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
610{
611 PVM pVM = pVCpu->CTX_SUFF(pVM);
612
613 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
614
615 /*
616 * Get going.
617 */
618 uint64_t fPageGst;
619 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
620 if (RT_FAILURE(rc))
621 {
622 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
623 return VINF_EM_RAW_GUEST_TRAP;
624 }
625
626 /*
627 * Check if the access would cause a page fault
628 *
629 * Note that hypervisor page directories are not present in the guest's tables, so this check
630 * is sufficient.
631 */
632 const bool fWrite = !!(fAccess & X86_PTE_RW);
633 const bool fUser = !!(fAccess & X86_PTE_US);
634 if ( !(fPageGst & X86_PTE_P)
635 || (fWrite && !(fPageGst & X86_PTE_RW))
636 || (fUser && !(fPageGst & X86_PTE_US)) )
637 {
638 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
639 return VINF_EM_RAW_GUEST_TRAP;
640 }
641
642 if (!pVM->pgm.s.fNestedPaging)
643 {
644 /*
645 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
646 */
647 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
648 if ( rc == VERR_PAGE_NOT_PRESENT
649 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
650 {
651 /*
652 * Page is not present in our page tables.
653 * Try to sync it!
654 */
655 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
656 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
657 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
658 if (rc != VINF_SUCCESS)
659 return rc;
660 }
661 else
662 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
663 }
664
665#if 0 /* def VBOX_STRICT; triggers too often now */
666 /*
667 * This check is a bit paranoid, but useful.
668 */
669 /* Note! This will assert when writing to monitored pages (a bit annoying actually). */
670 uint64_t fPageShw;
671 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
672 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
673 || (fWrite && !(fPageShw & X86_PTE_RW))
674 || (fUser && !(fPageShw & X86_PTE_US)) )
675 {
676 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
677 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
678 return VINF_EM_RAW_GUEST_TRAP;
679 }
680#endif
681
682 if ( RT_SUCCESS(rc)
683 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
684 || Addr + cbSize < Addr))
685 {
686 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
687 for (;;)
688 {
689 Addr += PAGE_SIZE;
690 if (cbSize > PAGE_SIZE)
691 cbSize -= PAGE_SIZE;
692 else
693 cbSize = 1;
694 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
695 if (rc != VINF_SUCCESS)
696 break;
697 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
698 break;
699 }
700 }
701 return rc;
702}
703
704
705/**
706 * Emulation of the invlpg instruction (HC only actually).
707 *
708 * @returns Strict VBox status code, special care required.
709 * @retval VINF_PGM_SYNC_CR3 - handled.
710 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
711 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
712 *
713 * @param pVCpu The cross context virtual CPU structure.
714 * @param GCPtrPage Page to invalidate.
715 *
716 * @remark ASSUMES the page table entry or page directory is valid. Fairly
717 * safe, but there could be edge cases!
718 *
719 * @todo Flush page or page directory only if necessary!
720 * @todo VBOXSTRICTRC
721 */
722VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
723{
724 PVM pVM = pVCpu->CTX_SUFF(pVM);
725 int rc;
726 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
727
728#if !defined(IN_RING3) && defined(VBOX_WITH_REM)
729 /*
730 * Notify the recompiler so it can record this instruction.
731 */
732 REMNotifyInvalidatePage(pVM, GCPtrPage);
733#endif /* !IN_RING3 */
734 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
735
736
737#ifdef IN_RC
738 /*
739 * Check for conflicts and pending CR3 monitoring updates.
740 */
741 if (pgmMapAreMappingsFloating(pVM))
742 {
743 if ( pgmGetMapping(pVM, GCPtrPage)
744 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
745 {
746 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
747 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
748 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgConflict);
749 return VINF_PGM_SYNC_CR3;
750 }
751
752 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
753 {
754 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
755 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRCInvlPgSyncMonCR3);
756 return VINF_EM_RAW_EMULATE_INSTR;
757 }
758 }
759#endif /* IN_RC */
760
761 /*
762 * Call paging mode specific worker.
763 */
764 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
765 pgmLock(pVM);
766 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
767 pgmUnlock(pVM);
768 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePage), a);
769
770#ifdef IN_RING3
771 /*
772 * Check if we have a pending update of the CR3 monitoring.
773 */
774 if ( RT_SUCCESS(rc)
775 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
776 {
777 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
778 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
779 }
780
781# ifdef VBOX_WITH_RAW_MODE
782 /*
783 * Inform CSAM about the flush
784 *
785 * Note: This is to check if monitored pages have been changed; when we implement
786 * callbacks for virtual handlers, this is no longer required.
787 */
788 CSAMR3FlushPage(pVM, GCPtrPage);
789# endif
790#endif /* IN_RING3 */
791
792 /* Ignore all irrelevant error codes. */
793 if ( rc == VERR_PAGE_NOT_PRESENT
794 || rc == VERR_PAGE_TABLE_NOT_PRESENT
795 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
796 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
797 rc = VINF_SUCCESS;
798
799 return rc;
800}
801
802
803/**
804 * Executes an instruction using the interpreter.
805 *
806 * @returns VBox status code (appropriate for trap handling and GC return).
807 * @param pVM The cross context VM structure.
808 * @param pVCpu The cross context virtual CPU structure.
809 * @param pRegFrame Register frame.
810 * @param pvFault Fault address.
811 */
812VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
813{
814 NOREF(pVM);
815 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu, pRegFrame, pvFault);
816 if (rc == VERR_EM_INTERPRETER)
817 rc = VINF_EM_RAW_EMULATE_INSTR;
818 if (rc != VINF_SUCCESS)
819 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
820 return rc;
821}
822
823
824/**
825 * Gets effective page information (from the VMM page directory).
826 *
827 * @returns VBox status code.
828 * @param pVCpu The cross context virtual CPU structure.
829 * @param GCPtr Guest Context virtual address of the page.
830 * @param pfFlags Where to store the flags. These are X86_PTE_*.
831 * @param pHCPhys Where to store the HC physical address of the page.
832 * This is page aligned.
833 * @remark You should use PGMMapGetPage() for pages in a mapping.
834 */
835VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
836{
837 pgmLock(pVCpu->CTX_SUFF(pVM));
838 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
839 pgmUnlock(pVCpu->CTX_SUFF(pVM));
840 return rc;
841}
842
843
844/**
845 * Modify page flags for a range of pages in the shadow context.
846 *
847 * The existing flags are ANDed with the fMask and ORed with the fFlags.
848 *
849 * @returns VBox status code.
850 * @param pVCpu The cross context virtual CPU structure.
851 * @param GCPtr Virtual address of the first page in the range.
852 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
853 * @param fMask The AND mask - page flags X86_PTE_*.
854 * Be very CAREFUL when ~'ing constants which could be 32-bit!
855 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
856 * @remark You must use PGMMapModifyPage() for pages in a mapping.
857 */
858DECLINLINE(int) pdmShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
859{
860 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
861 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
862
863 GCPtr &= PAGE_BASE_GC_MASK; /** @todo this ain't necessary, right... */
864
865 PVM pVM = pVCpu->CTX_SUFF(pVM);
866 pgmLock(pVM);
867 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, PAGE_SIZE, fFlags, fMask, fOpFlags);
868 pgmUnlock(pVM);
869 return rc;
870}
871
872
873/**
874 * Changing the page flags for a single page in the shadow page tables so as to
875 * make it read-only.
876 *
877 * @returns VBox status code.
878 * @param pVCpu The cross context virtual CPU structure.
879 * @param GCPtr Virtual address of the first page in the range.
880 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
881 */
882VMMDECL(int) PGMShwMakePageReadonly(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
883{
884 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
885}
886
887
888/**
889 * Changing the page flags for a single page in the shadow page tables so as to
890 * make it writable.
891 *
892 * The call must know with 101% certainty that the guest page tables maps this
893 * as writable too. This function will deal shared, zero and write monitored
894 * pages.
895 *
896 * @returns VBox status code.
897 * @param pVCpu The cross context virtual CPU structure.
898 * @param GCPtr Virtual address of the first page in the range.
899 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
900 */
901VMMDECL(int) PGMShwMakePageWritable(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
902{
903 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
904}
905
906
907/**
908 * Changing the page flags for a single page in the shadow page tables so as to
909 * make it not present.
910 *
911 * @returns VBox status code.
912 * @param pVCpu The cross context virtual CPU structure.
913 * @param GCPtr Virtual address of the first page in the range.
914 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
915 */
916VMMDECL(int) PGMShwMakePageNotPresent(PVMCPU pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
917{
918 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
919}
920
921
922/**
923 * Changing the page flags for a single page in the shadow page tables so as to
924 * make it supervisor and writable.
925 *
926 * This if for dealing with CR0.WP=0 and readonly user pages.
927 *
928 * @returns VBox status code.
929 * @param pVCpu The cross context virtual CPU structure.
930 * @param GCPtr Virtual address of the first page in the range.
931 * @param fBigPage Whether or not this is a big page. If it is, we have to
932 * change the shadow PDE as well. If it isn't, the caller
933 * has checked that the shadow PDE doesn't need changing.
934 * We ASSUME 4KB pages backing the big page here!
935 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
936 */
937int pgmShwMakePageSupervisorAndWritable(PVMCPU pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
938{
939 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
940 if (rc == VINF_SUCCESS && fBigPage)
941 {
942 /* this is a bit ugly... */
943 switch (pVCpu->pgm.s.enmShadowMode)
944 {
945 case PGMMODE_32_BIT:
946 {
947 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
948 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
949 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
950 pPde->n.u1Write = 1;
951 Log(("-> PDE=%#llx (32)\n", pPde->u));
952 break;
953 }
954 case PGMMODE_PAE:
955 case PGMMODE_PAE_NX:
956 {
957 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
958 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
959 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
960 pPde->n.u1Write = 1;
961 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
962 break;
963 }
964 default:
965 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
966 }
967 }
968 return rc;
969}
970
971
972/**
973 * Gets the shadow page directory for the specified address, PAE.
974 *
975 * @returns Pointer to the shadow PD.
976 * @param pVCpu The cross context virtual CPU structure.
977 * @param GCPtr The address.
978 * @param uGstPdpe Guest PDPT entry. Valid.
979 * @param ppPD Receives address of page directory
980 */
981int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
982{
983 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
984 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
985 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
986 PVM pVM = pVCpu->CTX_SUFF(pVM);
987 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
988 PPGMPOOLPAGE pShwPage;
989 int rc;
990
991 PGM_LOCK_ASSERT_OWNER(pVM);
992
993 /* Allocate page directory if not present. */
994 if ( !pPdpe->n.u1Present
995 && !(pPdpe->u & X86_PDPE_PG_MASK))
996 {
997 RTGCPTR64 GCPdPt;
998 PGMPOOLKIND enmKind;
999
1000 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1001 {
1002 /* AMD-V nested paging or real/protected mode without paging. */
1003 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1004 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1005 }
1006 else
1007 {
1008 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1009 {
1010 if (!(uGstPdpe & X86_PDPE_P))
1011 {
1012 /* PD not present; guest must reload CR3 to change it.
1013 * No need to monitor anything in this case.
1014 */
1015 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1016
1017 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1018 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1019 uGstPdpe |= X86_PDPE_P;
1020 }
1021 else
1022 {
1023 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1024 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1025 }
1026 }
1027 else
1028 {
1029 GCPdPt = CPUMGetGuestCR3(pVCpu);
1030 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1031 }
1032 }
1033
1034 /* Create a reference back to the PDPT by using the index in its shadow page. */
1035 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1036 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1037 &pShwPage);
1038 AssertRCReturn(rc, rc);
1039
1040 /* The PD was cached or created; hook it up now. */
1041 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A));
1042
1043# if defined(IN_RC)
1044 /*
1045 * In 32 bits PAE mode we *must* invalidate the TLB when changing a
1046 * PDPT entry; the CPU fetches them only during cr3 load, so any
1047 * non-present PDPT will continue to cause page faults.
1048 */
1049 ASMReloadCR3();
1050# endif
1051 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1052 }
1053 else
1054 {
1055 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1056 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1057 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1058
1059 pgmPoolCacheUsed(pPool, pShwPage);
1060 }
1061 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1062 return VINF_SUCCESS;
1063}
1064
1065
1066/**
1067 * Gets the pointer to the shadow page directory entry for an address, PAE.
1068 *
1069 * @returns Pointer to the PDE.
1070 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1071 * @param GCPtr The address.
1072 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1073 */
1074DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1075{
1076 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1077 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1078 PVM pVM = pVCpu->CTX_SUFF(pVM);
1079
1080 PGM_LOCK_ASSERT_OWNER(pVM);
1081
1082 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1083 if (!pPdpt->a[iPdPt].n.u1Present)
1084 {
1085 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
1086 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1087 }
1088 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1089
1090 /* Fetch the pgm pool shadow descriptor. */
1091 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1092 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1093
1094 *ppShwPde = pShwPde;
1095 return VINF_SUCCESS;
1096}
1097
1098#ifndef IN_RC
1099
1100/**
1101 * Syncs the SHADOW page directory pointer for the specified address.
1102 *
1103 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1104 *
1105 * The caller is responsible for making sure the guest has a valid PD before
1106 * calling this function.
1107 *
1108 * @returns VBox status code.
1109 * @param pVCpu The cross context virtual CPU structure.
1110 * @param GCPtr The address.
1111 * @param uGstPml4e Guest PML4 entry (valid).
1112 * @param uGstPdpe Guest PDPT entry (valid).
1113 * @param ppPD Receives address of page directory
1114 */
1115static int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1116{
1117 PVM pVM = pVCpu->CTX_SUFF(pVM);
1118 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1119 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1120 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1121 bool fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1122 PPGMPOOLPAGE pShwPage;
1123 int rc;
1124
1125 PGM_LOCK_ASSERT_OWNER(pVM);
1126
1127 /* Allocate page directory pointer table if not present. */
1128 if ( !pPml4e->n.u1Present
1129 && !(pPml4e->u & X86_PML4E_PG_MASK))
1130 {
1131 RTGCPTR64 GCPml4;
1132 PGMPOOLKIND enmKind;
1133
1134 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1135
1136 if (fNestedPagingOrNoGstPaging)
1137 {
1138 /* AMD-V nested paging or real/protected mode without paging */
1139 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1140 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1141 }
1142 else
1143 {
1144 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1145 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1146 }
1147
1148 /* Create a reference back to the PDPT by using the index in its shadow page. */
1149 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1150 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1151 &pShwPage);
1152 AssertRCReturn(rc, rc);
1153 }
1154 else
1155 {
1156 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1157 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1158
1159 pgmPoolCacheUsed(pPool, pShwPage);
1160 }
1161 /* The PDPT was cached or created; hook it up now. */
1162 pPml4e->u |= pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask);
1163
1164 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1165 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1166 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1167
1168 /* Allocate page directory if not present. */
1169 if ( !pPdpe->n.u1Present
1170 && !(pPdpe->u & X86_PDPE_PG_MASK))
1171 {
1172 RTGCPTR64 GCPdPt;
1173 PGMPOOLKIND enmKind;
1174
1175 if (fNestedPagingOrNoGstPaging)
1176 {
1177 /* AMD-V nested paging or real/protected mode without paging */
1178 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1179 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1180 }
1181 else
1182 {
1183 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1184 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1185 }
1186
1187 /* Create a reference back to the PDPT by using the index in its shadow page. */
1188 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1189 pShwPage->idx, iPdPt, false /*fLockPage*/,
1190 &pShwPage);
1191 AssertRCReturn(rc, rc);
1192 }
1193 else
1194 {
1195 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1196 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1197
1198 pgmPoolCacheUsed(pPool, pShwPage);
1199 }
1200 /* The PD was cached or created; hook it up now. */
1201 pPdpe->u |= pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask);
1202
1203 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1204 return VINF_SUCCESS;
1205}
1206
1207
1208/**
1209 * Gets the SHADOW page directory pointer for the specified address (long mode).
1210 *
1211 * @returns VBox status code.
1212 * @param pVCpu The cross context virtual CPU structure.
1213 * @param GCPtr The address.
1214 * @param ppPdpt Receives address of pdpt
1215 * @param ppPD Receives address of page directory
1216 */
1217DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1218{
1219 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1220 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1221
1222 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1223
1224 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1225 if (ppPml4e)
1226 *ppPml4e = (PX86PML4E)pPml4e;
1227
1228 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1229
1230 if (!pPml4e->n.u1Present)
1231 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1232
1233 PVM pVM = pVCpu->CTX_SUFF(pVM);
1234 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1235 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1236 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1237
1238 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1239 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1240 if (!pPdpt->a[iPdPt].n.u1Present)
1241 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1242
1243 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1244 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1245
1246 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1247 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1248 return VINF_SUCCESS;
1249}
1250
1251
1252/**
1253 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1254 * backing pages in case the PDPT or PML4 entry is missing.
1255 *
1256 * @returns VBox status code.
1257 * @param pVCpu The cross context virtual CPU structure.
1258 * @param GCPtr The address.
1259 * @param ppPdpt Receives address of pdpt
1260 * @param ppPD Receives address of page directory
1261 */
1262static int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1263{
1264 PVM pVM = pVCpu->CTX_SUFF(pVM);
1265 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1266 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1267 PEPTPML4 pPml4;
1268 PEPTPML4E pPml4e;
1269 PPGMPOOLPAGE pShwPage;
1270 int rc;
1271
1272 Assert(pVM->pgm.s.fNestedPaging);
1273 PGM_LOCK_ASSERT_OWNER(pVM);
1274
1275 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1276 Assert(pPml4);
1277
1278 /* Allocate page directory pointer table if not present. */
1279 pPml4e = &pPml4->a[iPml4];
1280 if ( !pPml4e->n.u1Present
1281 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1282 {
1283 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1284 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1285
1286 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1287 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1288 &pShwPage);
1289 AssertRCReturn(rc, rc);
1290 }
1291 else
1292 {
1293 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1294 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1295
1296 pgmPoolCacheUsed(pPool, pShwPage);
1297 }
1298 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1299 pPml4e->u = pShwPage->Core.Key;
1300 pPml4e->n.u1Present = 1;
1301 pPml4e->n.u1Write = 1;
1302 pPml4e->n.u1Execute = 1;
1303
1304 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1305 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1306 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1307
1308 if (ppPdpt)
1309 *ppPdpt = pPdpt;
1310
1311 /* Allocate page directory if not present. */
1312 if ( !pPdpe->n.u1Present
1313 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1314 {
1315 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1316 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1317 pShwPage->idx, iPdPt, false /*fLockPage*/,
1318 &pShwPage);
1319 AssertRCReturn(rc, rc);
1320 }
1321 else
1322 {
1323 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1324 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1325
1326 pgmPoolCacheUsed(pPool, pShwPage);
1327 }
1328 /* The PD was cached or created; hook it up now and fill with the default value. */
1329 pPdpe->u = pShwPage->Core.Key;
1330 pPdpe->n.u1Present = 1;
1331 pPdpe->n.u1Write = 1;
1332 pPdpe->n.u1Execute = 1;
1333
1334 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1335 return VINF_SUCCESS;
1336}
1337
1338#endif /* IN_RC */
1339
1340#ifdef IN_RING0
1341/**
1342 * Synchronizes a range of nested page table entries.
1343 *
1344 * The caller must own the PGM lock.
1345 *
1346 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1347 * @param GCPhys Where to start.
1348 * @param cPages How many pages which entries should be synced.
1349 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1350 * host paging mode for AMD-V).
1351 */
1352int pgmShwSyncNestedPageLocked(PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1353{
1354 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1355
1356 int rc;
1357 switch (enmShwPagingMode)
1358 {
1359 case PGMMODE_32_BIT:
1360 {
1361 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1362 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1363 break;
1364 }
1365
1366 case PGMMODE_PAE:
1367 case PGMMODE_PAE_NX:
1368 {
1369 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1370 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1371 break;
1372 }
1373
1374 case PGMMODE_AMD64:
1375 case PGMMODE_AMD64_NX:
1376 {
1377 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1378 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1379 break;
1380 }
1381
1382 case PGMMODE_EPT:
1383 {
1384 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1385 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1386 break;
1387 }
1388
1389 default:
1390 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1391 }
1392 return rc;
1393}
1394#endif /* IN_RING0 */
1395
1396
1397/**
1398 * Gets effective Guest OS page information.
1399 *
1400 * When GCPtr is in a big page, the function will return as if it was a normal
1401 * 4KB page. If the need for distinguishing between big and normal page becomes
1402 * necessary at a later point, a PGMGstGetPage() will be created for that
1403 * purpose.
1404 *
1405 * @returns VBox status code.
1406 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1407 * @param GCPtr Guest Context virtual address of the page.
1408 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1409 * @param pGCPhys Where to store the GC physical address of the page.
1410 * This is page aligned. The fact that the
1411 */
1412VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1413{
1414 VMCPU_ASSERT_EMT(pVCpu);
1415 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1416}
1417
1418
1419/**
1420 * Performs a guest page table walk.
1421 *
1422 * The guest should be in paged protect mode or long mode when making a call to
1423 * this function.
1424 *
1425 * @returns VBox status code.
1426 * @retval VINF_SUCCESS on success.
1427 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1428 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1429 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1430 *
1431 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1432 * @param GCPtr The guest virtual address to walk by.
1433 * @param pWalk Where to return the walk result. This is valid for some
1434 * error codes as well.
1435 */
1436int pgmGstPtWalk(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1437{
1438 VMCPU_ASSERT_EMT(pVCpu);
1439 switch (pVCpu->pgm.s.enmGuestMode)
1440 {
1441 case PGMMODE_32_BIT:
1442 pWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
1443 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, &pWalk->u.Legacy);
1444
1445 case PGMMODE_PAE:
1446 case PGMMODE_PAE_NX:
1447 pWalk->enmType = PGMPTWALKGSTTYPE_PAE;
1448 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, &pWalk->u.Pae);
1449
1450#if !defined(IN_RC)
1451 case PGMMODE_AMD64:
1452 case PGMMODE_AMD64_NX:
1453 pWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
1454 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, &pWalk->u.Amd64);
1455#endif
1456
1457 case PGMMODE_REAL:
1458 case PGMMODE_PROTECTED:
1459 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1460 return VERR_PGM_NOT_USED_IN_MODE;
1461
1462#if defined(IN_RC)
1463 case PGMMODE_AMD64:
1464 case PGMMODE_AMD64_NX:
1465#endif
1466 case PGMMODE_NESTED:
1467 case PGMMODE_EPT:
1468 default:
1469 AssertFailed();
1470 pWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
1471 return VERR_PGM_NOT_USED_IN_MODE;
1472 }
1473}
1474
1475
1476/**
1477 * Tries to continue the previous walk.
1478 *
1479 * @note Requires the caller to hold the PGM lock from the first
1480 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
1481 * we cannot use the pointers.
1482 *
1483 * @returns VBox status code.
1484 * @retval VINF_SUCCESS on success.
1485 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
1486 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
1487 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
1488 *
1489 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1490 * @param GCPtr The guest virtual address to walk by.
1491 * @param pWalk Pointer to the previous walk result and where to return
1492 * the result of this walk. This is valid for some error
1493 * codes as well.
1494 */
1495int pgmGstPtWalkNext(PVMCPU pVCpu, RTGCPTR GCPtr, PPGMPTWALKGST pWalk)
1496{
1497 /*
1498 * We can only handle successfully walks.
1499 * We also limit ourselves to the next page.
1500 */
1501 if ( pWalk->u.Core.fSucceeded
1502 && GCPtr - pWalk->u.Core.GCPtr == PAGE_SIZE)
1503 {
1504 Assert(pWalk->u.Core.uLevel == 0);
1505 if (pWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
1506 {
1507 /*
1508 * AMD64
1509 */
1510 if (!pWalk->u.Core.fGigantPage && !pWalk->u.Core.fBigPage)
1511 {
1512 /*
1513 * We fall back to full walk if the PDE table changes, if any
1514 * reserved bits are set, or if the effective page access changes.
1515 */
1516 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
1517 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
1518 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
1519 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
1520
1521 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PD_PAE_SHIFT))
1522 {
1523 if (pWalk->u.Amd64.pPte)
1524 {
1525 X86PTEPAE Pte;
1526 Pte.u = pWalk->u.Amd64.pPte[1].u;
1527 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1528 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1529 {
1530
1531 pWalk->u.Core.GCPtr = GCPtr;
1532 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1533 pWalk->u.Amd64.Pte.u = Pte.u;
1534 pWalk->u.Amd64.pPte++;
1535 return VINF_SUCCESS;
1536 }
1537 }
1538 }
1539 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->u.Core.GCPtr >> X86_PDPT_SHIFT))
1540 {
1541 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT|_PAE_MASK)); /* Must be first PT entry. */
1542 if (pWalk->u.Amd64.pPde)
1543 {
1544 X86PDEPAE Pde;
1545 Pde.u = pWalk->u.Amd64.pPde[1].u;
1546 if ( (Pde.u & fPdeSame) == (pWalk->u.Amd64.Pde.u & fPdeSame)
1547 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
1548 {
1549 /* Get the new PTE and check out the first entry. */
1550 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
1551 &pWalk->u.Amd64.pPt);
1552 if (RT_SUCCESS(rc))
1553 {
1554 pWalk->u.Amd64.pPte = &pWalk->u.Amd64.pPt->a[0];
1555 X86PTEPAE Pte;
1556 Pte.u = pWalk->u.Amd64.pPte->u;
1557 if ( (Pte.u & fPteSame) == (pWalk->u.Amd64.Pte.u & fPteSame)
1558 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
1559 {
1560 pWalk->u.Core.GCPtr = GCPtr;
1561 pWalk->u.Core.GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
1562 pWalk->u.Amd64.Pte.u = Pte.u;
1563 pWalk->u.Amd64.Pde.u = Pde.u;
1564 pWalk->u.Amd64.pPde++;
1565 return VINF_SUCCESS;
1566 }
1567 }
1568 }
1569 }
1570 }
1571 }
1572 else if (!pWalk->u.Core.fGigantPage)
1573 {
1574 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_2M_BASE_MASK))
1575 {
1576 pWalk->u.Core.GCPtr = GCPtr;
1577 pWalk->u.Core.GCPhys += PAGE_SIZE;
1578 return VINF_SUCCESS;
1579 }
1580 }
1581 else
1582 {
1583 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->u.Core.GCPtr & X86_PAGE_1G_BASE_MASK))
1584 {
1585 pWalk->u.Core.GCPtr = GCPtr;
1586 pWalk->u.Core.GCPhys += PAGE_SIZE;
1587 return VINF_SUCCESS;
1588 }
1589 }
1590 }
1591 }
1592 /* Case we don't handle. Do full walk. */
1593 return pgmGstPtWalk(pVCpu, GCPtr, pWalk);
1594}
1595
1596
1597/**
1598 * Checks if the page is present.
1599 *
1600 * @returns true if the page is present.
1601 * @returns false if the page is not present.
1602 * @param pVCpu The cross context virtual CPU structure.
1603 * @param GCPtr Address within the page.
1604 */
1605VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1606{
1607 VMCPU_ASSERT_EMT(pVCpu);
1608 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1609 return RT_SUCCESS(rc);
1610}
1611
1612
1613/**
1614 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1615 *
1616 * @returns VBox status code.
1617 * @param pVCpu The cross context virtual CPU structure.
1618 * @param GCPtr The address of the first page.
1619 * @param cb The size of the range in bytes.
1620 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1621 */
1622VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1623{
1624 VMCPU_ASSERT_EMT(pVCpu);
1625 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1626}
1627
1628
1629/**
1630 * Modify page flags for a range of pages in the guest's tables
1631 *
1632 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1633 *
1634 * @returns VBox status code.
1635 * @param pVCpu The cross context virtual CPU structure.
1636 * @param GCPtr Virtual address of the first page in the range.
1637 * @param cb Size (in bytes) of the range to apply the modification to.
1638 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1639 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1640 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1641 */
1642VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1643{
1644 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1645 VMCPU_ASSERT_EMT(pVCpu);
1646
1647 /*
1648 * Validate input.
1649 */
1650 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1651 Assert(cb);
1652
1653 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1654
1655 /*
1656 * Adjust input.
1657 */
1658 cb += GCPtr & PAGE_OFFSET_MASK;
1659 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1660 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1661
1662 /*
1663 * Call worker.
1664 */
1665 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1666
1667 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,GstModifyPage), a);
1668 return rc;
1669}
1670
1671
1672#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1673
1674/**
1675 * Performs the lazy mapping of the 32-bit guest PD.
1676 *
1677 * @returns VBox status code.
1678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1679 * @param ppPd Where to return the pointer to the mapping. This is
1680 * always set.
1681 */
1682int pgmGstLazyMap32BitPD(PVMCPU pVCpu, PX86PD *ppPd)
1683{
1684 PVM pVM = pVCpu->CTX_SUFF(pVM);
1685 pgmLock(pVM);
1686
1687 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
1688
1689 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK;
1690 PPGMPAGE pPage;
1691 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1692 if (RT_SUCCESS(rc))
1693 {
1694 RTHCPTR HCPtrGuestCR3;
1695 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1696 if (RT_SUCCESS(rc))
1697 {
1698 pVCpu->pgm.s.pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1699# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1700 pVCpu->pgm.s.pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1701# endif
1702 *ppPd = (PX86PD)HCPtrGuestCR3;
1703
1704 pgmUnlock(pVM);
1705 return VINF_SUCCESS;
1706 }
1707
1708 AssertRC(rc);
1709 }
1710 pgmUnlock(pVM);
1711
1712 *ppPd = NULL;
1713 return rc;
1714}
1715
1716
1717/**
1718 * Performs the lazy mapping of the PAE guest PDPT.
1719 *
1720 * @returns VBox status code.
1721 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1722 * @param ppPdpt Where to return the pointer to the mapping. This is
1723 * always set.
1724 */
1725int pgmGstLazyMapPaePDPT(PVMCPU pVCpu, PX86PDPT *ppPdpt)
1726{
1727 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
1728 PVM pVM = pVCpu->CTX_SUFF(pVM);
1729 pgmLock(pVM);
1730
1731 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK;
1732 PPGMPAGE pPage;
1733 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1734 if (RT_SUCCESS(rc))
1735 {
1736 RTHCPTR HCPtrGuestCR3;
1737 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1738 if (RT_SUCCESS(rc))
1739 {
1740 pVCpu->pgm.s.pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1741# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1742 pVCpu->pgm.s.pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1743# endif
1744 *ppPdpt = (PX86PDPT)HCPtrGuestCR3;
1745
1746 pgmUnlock(pVM);
1747 return VINF_SUCCESS;
1748 }
1749
1750 AssertRC(rc);
1751 }
1752
1753 pgmUnlock(pVM);
1754 *ppPdpt = NULL;
1755 return rc;
1756}
1757
1758
1759/**
1760 * Performs the lazy mapping / updating of a PAE guest PD.
1761 *
1762 * @returns Pointer to the mapping.
1763 * @returns VBox status code.
1764 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1765 * @param iPdpt Which PD entry to map (0..3).
1766 * @param ppPd Where to return the pointer to the mapping. This is
1767 * always set.
1768 */
1769int pgmGstLazyMapPaePD(PVMCPU pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
1770{
1771 PVM pVM = pVCpu->CTX_SUFF(pVM);
1772 pgmLock(pVM);
1773
1774 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
1775 Assert(pGuestPDPT);
1776 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1777 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1778 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
1779
1780 PPGMPAGE pPage;
1781 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1782 if (RT_SUCCESS(rc))
1783 {
1784 RTRCPTR RCPtr = NIL_RTRCPTR;
1785 RTHCPTR HCPtr = NIL_RTHCPTR;
1786#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1787 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr);
1788 AssertRC(rc);
1789#endif
1790 if (RT_SUCCESS(rc) && fChanged)
1791 {
1792 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1793 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1794 }
1795 if (RT_SUCCESS(rc))
1796 {
1797 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1798# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1799 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1800# endif
1801 if (fChanged)
1802 {
1803 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
1804 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1805 }
1806
1807 *ppPd = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
1808 pgmUnlock(pVM);
1809 return VINF_SUCCESS;
1810 }
1811 }
1812
1813 /* Invalid page or some failure, invalidate the entry. */
1814 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1815 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = 0;
1816# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1817 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = 0;
1818# endif
1819 pVCpu->pgm.s.apGstPaePDsRC[iPdpt] = 0;
1820
1821 pgmUnlock(pVM);
1822 return rc;
1823}
1824
1825#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1826#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1827/**
1828 * Performs the lazy mapping of the 32-bit guest PD.
1829 *
1830 * @returns VBox status code.
1831 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1832 * @param ppPml4 Where to return the pointer to the mapping. This will
1833 * always be set.
1834 */
1835int pgmGstLazyMapPml4(PVMCPU pVCpu, PX86PML4 *ppPml4)
1836{
1837 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
1838 PVM pVM = pVCpu->CTX_SUFF(pVM);
1839 pgmLock(pVM);
1840
1841 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK;
1842 PPGMPAGE pPage;
1843 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
1844 if (RT_SUCCESS(rc))
1845 {
1846 RTHCPTR HCPtrGuestCR3;
1847 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)&HCPtrGuestCR3);
1848 if (RT_SUCCESS(rc))
1849 {
1850 pVCpu->pgm.s.pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1851# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1852 pVCpu->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1853# endif
1854 *ppPml4 = (PX86PML4)HCPtrGuestCR3;
1855
1856 pgmUnlock(pVM);
1857 return VINF_SUCCESS;
1858 }
1859 }
1860
1861 pgmUnlock(pVM);
1862 *ppPml4 = NULL;
1863 return rc;
1864}
1865#endif
1866
1867
1868/**
1869 * Gets the PAE PDPEs values cached by the CPU.
1870 *
1871 * @returns VBox status code.
1872 * @param pVCpu The cross context virtual CPU structure.
1873 * @param paPdpes Where to return the four PDPEs. The array
1874 * pointed to must have 4 entries.
1875 */
1876VMM_INT_DECL(int) PGMGstGetPaePdpes(PVMCPU pVCpu, PX86PDPE paPdpes)
1877{
1878 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1879
1880 paPdpes[0] = pVCpu->pgm.s.aGstPaePdpeRegs[0];
1881 paPdpes[1] = pVCpu->pgm.s.aGstPaePdpeRegs[1];
1882 paPdpes[2] = pVCpu->pgm.s.aGstPaePdpeRegs[2];
1883 paPdpes[3] = pVCpu->pgm.s.aGstPaePdpeRegs[3];
1884 return VINF_SUCCESS;
1885}
1886
1887
1888/**
1889 * Sets the PAE PDPEs values cached by the CPU.
1890 *
1891 * @remarks This must be called *AFTER* PGMUpdateCR3.
1892 *
1893 * @param pVCpu The cross context virtual CPU structure.
1894 * @param paPdpes The four PDPE values. The array pointed to must
1895 * have exactly 4 entries.
1896 *
1897 * @remarks No-long-jump zone!!!
1898 */
1899VMM_INT_DECL(void) PGMGstUpdatePaePdpes(PVMCPU pVCpu, PCX86PDPE paPdpes)
1900{
1901 Assert(pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1902
1903 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGstPaePdpeRegs); i++)
1904 {
1905 if (pVCpu->pgm.s.aGstPaePdpeRegs[i].u != paPdpes[i].u)
1906 {
1907 pVCpu->pgm.s.aGstPaePdpeRegs[i] = paPdpes[i];
1908
1909 /* Force lazy remapping if it changed in any way. */
1910 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
1911# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1912 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
1913# endif
1914 pVCpu->pgm.s.apGstPaePDsRC[i] = 0;
1915 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
1916 }
1917 }
1918
1919 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1920}
1921
1922
1923/**
1924 * Gets the current CR3 register value for the shadow memory context.
1925 * @returns CR3 value.
1926 * @param pVCpu The cross context virtual CPU structure.
1927 */
1928VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1929{
1930 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1931 AssertPtrReturn(pPoolPage, 0);
1932 return pPoolPage->Core.Key;
1933}
1934
1935
1936/**
1937 * Gets the current CR3 register value for the nested memory context.
1938 * @returns CR3 value.
1939 * @param pVCpu The cross context virtual CPU structure.
1940 * @param enmShadowMode The shadow paging mode.
1941 */
1942VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1943{
1944 NOREF(enmShadowMode);
1945 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1946 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1947}
1948
1949
1950/**
1951 * Gets the current CR3 register value for the HC intermediate memory context.
1952 * @returns CR3 value.
1953 * @param pVM The cross context VM structure.
1954 */
1955VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1956{
1957 switch (pVM->pgm.s.enmHostMode)
1958 {
1959 case SUPPAGINGMODE_32_BIT:
1960 case SUPPAGINGMODE_32_BIT_GLOBAL:
1961 return pVM->pgm.s.HCPhysInterPD;
1962
1963 case SUPPAGINGMODE_PAE:
1964 case SUPPAGINGMODE_PAE_GLOBAL:
1965 case SUPPAGINGMODE_PAE_NX:
1966 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1967 return pVM->pgm.s.HCPhysInterPaePDPT;
1968
1969 case SUPPAGINGMODE_AMD64:
1970 case SUPPAGINGMODE_AMD64_GLOBAL:
1971 case SUPPAGINGMODE_AMD64_NX:
1972 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1973 return pVM->pgm.s.HCPhysInterPaePDPT;
1974
1975 default:
1976 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1977 return NIL_RTHCPHYS;
1978 }
1979}
1980
1981
1982/**
1983 * Gets the current CR3 register value for the RC intermediate memory context.
1984 * @returns CR3 value.
1985 * @param pVM The cross context VM structure.
1986 * @param pVCpu The cross context virtual CPU structure.
1987 */
1988VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1989{
1990 switch (pVCpu->pgm.s.enmShadowMode)
1991 {
1992 case PGMMODE_32_BIT:
1993 return pVM->pgm.s.HCPhysInterPD;
1994
1995 case PGMMODE_PAE:
1996 case PGMMODE_PAE_NX:
1997 return pVM->pgm.s.HCPhysInterPaePDPT;
1998
1999 case PGMMODE_AMD64:
2000 case PGMMODE_AMD64_NX:
2001 return pVM->pgm.s.HCPhysInterPaePML4;
2002
2003 case PGMMODE_EPT:
2004 case PGMMODE_NESTED:
2005 return 0; /* not relevant */
2006
2007 default:
2008 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
2009 return NIL_RTHCPHYS;
2010 }
2011}
2012
2013
2014/**
2015 * Gets the CR3 register value for the 32-Bit intermediate memory context.
2016 * @returns CR3 value.
2017 * @param pVM The cross context VM structure.
2018 */
2019VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
2020{
2021 return pVM->pgm.s.HCPhysInterPD;
2022}
2023
2024
2025/**
2026 * Gets the CR3 register value for the PAE intermediate memory context.
2027 * @returns CR3 value.
2028 * @param pVM The cross context VM structure.
2029 */
2030VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
2031{
2032 return pVM->pgm.s.HCPhysInterPaePDPT;
2033}
2034
2035
2036/**
2037 * Gets the CR3 register value for the AMD64 intermediate memory context.
2038 * @returns CR3 value.
2039 * @param pVM The cross context VM structure.
2040 */
2041VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
2042{
2043 return pVM->pgm.s.HCPhysInterPaePML4;
2044}
2045
2046
2047/**
2048 * Performs and schedules necessary updates following a CR3 load or reload.
2049 *
2050 * This will normally involve mapping the guest PD or nPDPT
2051 *
2052 * @returns VBox status code.
2053 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2054 * safely be ignored and overridden since the FF will be set too then.
2055 * @param pVCpu The cross context virtual CPU structure.
2056 * @param cr3 The new cr3.
2057 * @param fGlobal Indicates whether this is a global flush or not.
2058 */
2059VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
2060{
2061 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2062 PVM pVM = pVCpu->CTX_SUFF(pVM);
2063
2064 VMCPU_ASSERT_EMT(pVCpu);
2065
2066 /*
2067 * Always flag the necessary updates; necessary for hardware acceleration
2068 */
2069 /** @todo optimize this, it shouldn't always be necessary. */
2070 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2071 if (fGlobal)
2072 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2073 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
2074
2075 /*
2076 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2077 */
2078 int rc = VINF_SUCCESS;
2079 RTGCPHYS GCPhysCR3;
2080 switch (pVCpu->pgm.s.enmGuestMode)
2081 {
2082 case PGMMODE_PAE:
2083 case PGMMODE_PAE_NX:
2084 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2085 break;
2086 case PGMMODE_AMD64:
2087 case PGMMODE_AMD64_NX:
2088 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2089 break;
2090 default:
2091 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2092 break;
2093 }
2094 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2095
2096 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2097 {
2098 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2099 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2100 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2101 if (RT_LIKELY(rc == VINF_SUCCESS))
2102 {
2103 if (pgmMapAreMappingsFloating(pVM))
2104 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2105 }
2106 else
2107 {
2108 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2109 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2110 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2111 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2112 if (pgmMapAreMappingsFloating(pVM))
2113 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
2114 }
2115
2116 if (fGlobal)
2117 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2118 else
2119 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBNewCR3));
2120 }
2121 else
2122 {
2123# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2124 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
2125 if (pPool->cDirtyPages)
2126 {
2127 pgmLock(pVM);
2128 pgmPoolResetDirtyPages(pVM);
2129 pgmUnlock(pVM);
2130 }
2131# endif
2132 /*
2133 * Check if we have a pending update of the CR3 monitoring.
2134 */
2135 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2136 {
2137 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2138 Assert(!pVM->pgm.s.fMappingsFixed); Assert(pgmMapAreMappingsEnabled(pVM));
2139 }
2140 if (fGlobal)
2141 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2142 else
2143 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLBSameCR3));
2144 }
2145
2146 IEMTlbInvalidateAll(pVCpu, false /*fVmm*/);
2147 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,FlushTLB), a);
2148 return rc;
2149}
2150
2151
2152/**
2153 * Performs and schedules necessary updates following a CR3 load or reload when
2154 * using nested or extended paging.
2155 *
2156 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2157 * TLB and triggering a SyncCR3.
2158 *
2159 * This will normally involve mapping the guest PD or nPDPT
2160 *
2161 * @returns VBox status code.
2162 * @retval VINF_SUCCESS.
2163 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2164 * paging modes). This can safely be ignored and overridden since the
2165 * FF will be set too then.
2166 * @param pVCpu The cross context virtual CPU structure.
2167 * @param cr3 The new cr3.
2168 */
2169VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
2170{
2171 VMCPU_ASSERT_EMT(pVCpu);
2172 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
2173
2174 /* We assume we're only called in nested paging mode. */
2175 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2176 Assert(!pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2177 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
2178
2179 /*
2180 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2181 */
2182 int rc = VINF_SUCCESS;
2183 RTGCPHYS GCPhysCR3;
2184 switch (pVCpu->pgm.s.enmGuestMode)
2185 {
2186 case PGMMODE_PAE:
2187 case PGMMODE_PAE_NX:
2188 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2189 break;
2190 case PGMMODE_AMD64:
2191 case PGMMODE_AMD64_NX:
2192 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2193 break;
2194 default:
2195 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2196 break;
2197 }
2198 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2199
2200 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2201 {
2202 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2203 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2204 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2205 }
2206
2207 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2208 return rc;
2209}
2210
2211
2212/**
2213 * Synchronize the paging structures.
2214 *
2215 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2216 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2217 * in several places, most importantly whenever the CR3 is loaded.
2218 *
2219 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2220 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2221 * the VMM into guest context.
2222 * @param pVCpu The cross context virtual CPU structure.
2223 * @param cr0 Guest context CR0 register
2224 * @param cr3 Guest context CR3 register
2225 * @param cr4 Guest context CR4 register
2226 * @param fGlobal Including global page directories or not
2227 */
2228VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2229{
2230 int rc;
2231
2232 VMCPU_ASSERT_EMT(pVCpu);
2233
2234 /*
2235 * The pool may have pending stuff and even require a return to ring-3 to
2236 * clear the whole thing.
2237 */
2238 rc = pgmPoolSyncCR3(pVCpu);
2239 if (rc != VINF_SUCCESS)
2240 return rc;
2241
2242 /*
2243 * We might be called when we shouldn't.
2244 *
2245 * The mode switching will ensure that the PD is resynced after every mode
2246 * switch. So, if we find ourselves here when in protected or real mode
2247 * we can safely clear the FF and return immediately.
2248 */
2249 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2250 {
2251 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2252 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2253 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2254 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2255 return VINF_SUCCESS;
2256 }
2257
2258 /* If global pages are not supported, then all flushes are global. */
2259 if (!(cr4 & X86_CR4_PGE))
2260 fGlobal = true;
2261 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2262 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2263
2264 /*
2265 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2266 * This should be done before SyncCR3.
2267 */
2268 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2269 {
2270 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2271
2272 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3; NOREF(GCPhysCR3Old);
2273 RTGCPHYS GCPhysCR3;
2274 switch (pVCpu->pgm.s.enmGuestMode)
2275 {
2276 case PGMMODE_PAE:
2277 case PGMMODE_PAE_NX:
2278 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
2279 break;
2280 case PGMMODE_AMD64:
2281 case PGMMODE_AMD64_NX:
2282 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
2283 break;
2284 default:
2285 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
2286 break;
2287 }
2288 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
2289
2290 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
2291 {
2292 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2293 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
2294 }
2295
2296 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2297 if ( rc == VINF_PGM_SYNC_CR3
2298 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2299 {
2300 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2301#ifdef IN_RING3
2302 rc = pgmPoolSyncCR3(pVCpu);
2303#else
2304 if (rc == VINF_PGM_SYNC_CR3)
2305 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
2306 return VINF_PGM_SYNC_CR3;
2307#endif
2308 }
2309 AssertRCReturn(rc, rc);
2310 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2311 }
2312
2313 /*
2314 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2315 */
2316 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2317 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
2318 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2319 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
2320 if (rc == VINF_SUCCESS)
2321 {
2322 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2323 {
2324 /* Go back to ring 3 if a pgm pool sync is again pending. */
2325 return VINF_PGM_SYNC_CR3;
2326 }
2327
2328 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2329 {
2330 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2331 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2332 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2333 }
2334
2335 /*
2336 * Check if we have a pending update of the CR3 monitoring.
2337 */
2338 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
2339 {
2340 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
2341 Assert(!pVCpu->CTX_SUFF(pVM)->pgm.s.fMappingsFixed);
2342 Assert(pgmMapAreMappingsEnabled(pVCpu->CTX_SUFF(pVM)));
2343 }
2344 }
2345
2346 /*
2347 * Now flush the CR3 (guest context).
2348 */
2349 if (rc == VINF_SUCCESS)
2350 PGM_INVL_VCPU_TLBS(pVCpu);
2351 return rc;
2352}
2353
2354
2355/**
2356 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
2357 *
2358 * @returns VBox status code, with the following informational code for
2359 * VM scheduling.
2360 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
2361 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
2362 * (I.e. not in R3.)
2363 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
2364 *
2365 * @param pVCpu The cross context virtual CPU structure.
2366 * @param cr0 The new cr0.
2367 * @param cr4 The new cr4.
2368 * @param efer The new extended feature enable register.
2369 */
2370VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
2371{
2372 VMCPU_ASSERT_EMT(pVCpu);
2373
2374 /*
2375 * Calc the new guest mode.
2376 *
2377 * Note! We check PG before PE and without requiring PE because of the
2378 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
2379 */
2380 PGMMODE enmGuestMode;
2381 if (cr0 & X86_CR0_PG)
2382 {
2383 if (!(cr4 & X86_CR4_PAE))
2384 {
2385 bool const fPse = !!(cr4 & X86_CR4_PSE);
2386 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
2387 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
2388 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
2389 enmGuestMode = PGMMODE_32_BIT;
2390 }
2391 else if (!(efer & MSR_K6_EFER_LME))
2392 {
2393 if (!(efer & MSR_K6_EFER_NXE))
2394 enmGuestMode = PGMMODE_PAE;
2395 else
2396 enmGuestMode = PGMMODE_PAE_NX;
2397 }
2398 else
2399 {
2400 if (!(efer & MSR_K6_EFER_NXE))
2401 enmGuestMode = PGMMODE_AMD64;
2402 else
2403 enmGuestMode = PGMMODE_AMD64_NX;
2404 }
2405 }
2406 else if (!(cr0 & X86_CR0_PE))
2407 enmGuestMode = PGMMODE_REAL;
2408 else
2409 enmGuestMode = PGMMODE_PROTECTED;
2410
2411 /*
2412 * Did it change?
2413 */
2414 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
2415 return VINF_SUCCESS;
2416
2417 /* Flush the TLB */
2418 PGM_INVL_VCPU_TLBS(pVCpu);
2419
2420#ifdef IN_RING3
2421 return PGMR3ChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode);
2422#else
2423 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
2424 return VINF_PGM_CHANGE_MODE;
2425#endif
2426}
2427
2428
2429/**
2430 * Called by CPUM or REM when CR0.WP changes to 1.
2431 *
2432 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2433 * @thread EMT
2434 */
2435VMMDECL(void) PGMCr0WpEnabled(PVMCPU pVCpu)
2436{
2437 /*
2438 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
2439 *
2440 * Use the counter to judge whether there might be pool pages with active
2441 * hacks in them. If there are, we will be running the risk of messing up
2442 * the guest by allowing it to write to read-only pages. Thus, we have to
2443 * clear the page pool ASAP if there is the slightest chance.
2444 */
2445 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
2446 {
2447 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
2448
2449 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
2450 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
2451 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2452 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2453 }
2454}
2455
2456
2457/**
2458 * Gets the current guest paging mode.
2459 *
2460 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
2461 *
2462 * @returns The current paging mode.
2463 * @param pVCpu The cross context virtual CPU structure.
2464 */
2465VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
2466{
2467 return pVCpu->pgm.s.enmGuestMode;
2468}
2469
2470
2471/**
2472 * Gets the current shadow paging mode.
2473 *
2474 * @returns The current paging mode.
2475 * @param pVCpu The cross context virtual CPU structure.
2476 */
2477VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2478{
2479 return pVCpu->pgm.s.enmShadowMode;
2480}
2481
2482
2483/**
2484 * Gets the current host paging mode.
2485 *
2486 * @returns The current paging mode.
2487 * @param pVM The cross context VM structure.
2488 */
2489VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2490{
2491 switch (pVM->pgm.s.enmHostMode)
2492 {
2493 case SUPPAGINGMODE_32_BIT:
2494 case SUPPAGINGMODE_32_BIT_GLOBAL:
2495 return PGMMODE_32_BIT;
2496
2497 case SUPPAGINGMODE_PAE:
2498 case SUPPAGINGMODE_PAE_GLOBAL:
2499 return PGMMODE_PAE;
2500
2501 case SUPPAGINGMODE_PAE_NX:
2502 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2503 return PGMMODE_PAE_NX;
2504
2505 case SUPPAGINGMODE_AMD64:
2506 case SUPPAGINGMODE_AMD64_GLOBAL:
2507 return PGMMODE_AMD64;
2508
2509 case SUPPAGINGMODE_AMD64_NX:
2510 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2511 return PGMMODE_AMD64_NX;
2512
2513 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2514 }
2515
2516 return PGMMODE_INVALID;
2517}
2518
2519
2520/**
2521 * Get mode name.
2522 *
2523 * @returns read-only name string.
2524 * @param enmMode The mode which name is desired.
2525 */
2526VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2527{
2528 switch (enmMode)
2529 {
2530 case PGMMODE_REAL: return "Real";
2531 case PGMMODE_PROTECTED: return "Protected";
2532 case PGMMODE_32_BIT: return "32-bit";
2533 case PGMMODE_PAE: return "PAE";
2534 case PGMMODE_PAE_NX: return "PAE+NX";
2535 case PGMMODE_AMD64: return "AMD64";
2536 case PGMMODE_AMD64_NX: return "AMD64+NX";
2537 case PGMMODE_NESTED: return "Nested";
2538 case PGMMODE_EPT: return "EPT";
2539 default: return "unknown mode value";
2540 }
2541}
2542
2543
2544/**
2545 * Gets the physical address represented in the guest CR3 as PGM sees it.
2546 *
2547 * This is mainly for logging and debugging.
2548 *
2549 * @returns PGM's guest CR3 value.
2550 * @param pVCpu The cross context virtual CPU structure.
2551 */
2552VMM_INT_DECL(RTGCPHYS) PGMGetGuestCR3Phys(PVMCPU pVCpu)
2553{
2554 return pVCpu->pgm.s.GCPhysCR3;
2555}
2556
2557
2558
2559/**
2560 * Notification from CPUM that the EFER.NXE bit has changed.
2561 *
2562 * @param pVCpu The cross context virtual CPU structure of the CPU for
2563 * which EFER changed.
2564 * @param fNxe The new NXE state.
2565 */
2566VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
2567{
2568/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
2569 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
2570
2571 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
2572 if (fNxe)
2573 {
2574 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2575 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
2576 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
2577 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2578 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
2579 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
2580 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
2581 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
2582 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
2583 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
2584 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
2585
2586 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
2587 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
2588 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
2589 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
2590 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
2591 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
2592 }
2593 else
2594 {
2595 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
2596 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
2597 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
2598 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
2599 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
2600 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
2601 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
2602 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
2603 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
2604 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
2605 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
2606
2607 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
2608 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
2609 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
2610 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
2611 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
2612 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
2613 }
2614}
2615
2616
2617/**
2618 * Check if any pgm pool pages are marked dirty (not monitored)
2619 *
2620 * @returns bool locked/not locked
2621 * @param pVM The cross context VM structure.
2622 */
2623VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2624{
2625 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2626}
2627
2628
2629/**
2630 * Check if this VCPU currently owns the PGM lock.
2631 *
2632 * @returns bool owner/not owner
2633 * @param pVM The cross context VM structure.
2634 */
2635VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2636{
2637 return PDMCritSectIsOwner(&pVM->pgm.s.CritSectX);
2638}
2639
2640
2641/**
2642 * Enable or disable large page usage
2643 *
2644 * @returns VBox status code.
2645 * @param pVM The cross context VM structure.
2646 * @param fUseLargePages Use/not use large pages
2647 */
2648VMMDECL(int) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
2649{
2650 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2651
2652 pVM->fUseLargePages = fUseLargePages;
2653 return VINF_SUCCESS;
2654}
2655
2656
2657/**
2658 * Acquire the PGM lock.
2659 *
2660 * @returns VBox status code
2661 * @param pVM The cross context VM structure.
2662 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2663 */
2664#if (defined(VBOX_STRICT) && defined(IN_RING3)) || defined(DOXYGEN_RUNNING)
2665int pgmLockDebug(PVM pVM, RT_SRC_POS_DECL)
2666#else
2667int pgmLock(PVM pVM)
2668#endif
2669{
2670#if defined(VBOX_STRICT) && defined(IN_RING3)
2671 int rc = PDMCritSectEnterDebug(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
2672#else
2673 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSectX, VERR_SEM_BUSY);
2674#endif
2675#if defined(IN_RC) || defined(IN_RING0)
2676 if (rc == VERR_SEM_BUSY)
2677 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2678#endif
2679 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2680 return rc;
2681}
2682
2683
2684/**
2685 * Release the PGM lock.
2686 *
2687 * @returns VBox status code
2688 * @param pVM The cross context VM structure.
2689 */
2690void pgmUnlock(PVM pVM)
2691{
2692 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
2693 pVM->pgm.s.cDeprecatedPageLocks = 0;
2694 int rc = PDMCritSectLeave(&pVM->pgm.s.CritSectX);
2695 if (rc == VINF_SEM_NESTED)
2696 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
2697}
2698
2699#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2700
2701/**
2702 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined.
2703 *
2704 * @returns VBox status code.
2705 * @param pVM The cross context VM structure.
2706 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2707 * @param GCPhys The guest physical address of the page to map. The
2708 * offset bits are not ignored.
2709 * @param ppv Where to return the address corresponding to @a GCPhys.
2710 * @param SRC_POS The source position of the caller (RT_SRC_POS).
2711 */
2712int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
2713{
2714 pgmLock(pVM);
2715
2716 /*
2717 * Convert it to a writable page and it on to the dynamic mapper.
2718 */
2719 int rc;
2720 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2721 if (RT_LIKELY(pPage))
2722 {
2723 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2724 if (RT_SUCCESS(rc))
2725 {
2726 void *pv;
2727 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS);
2728 if (RT_SUCCESS(rc))
2729 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK));
2730 }
2731 else
2732 AssertRC(rc);
2733 }
2734 else
2735 {
2736 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2737 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2738 }
2739
2740 pgmUnlock(pVM);
2741 return rc;
2742}
2743
2744#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2745#if !defined(IN_R0) || defined(LOG_ENABLED)
2746
2747/** Format handler for PGMPAGE.
2748 * @copydoc FNRTSTRFORMATTYPE */
2749static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2750 const char *pszType, void const *pvValue,
2751 int cchWidth, int cchPrecision, unsigned fFlags,
2752 void *pvUser)
2753{
2754 size_t cch;
2755 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2756 if (RT_VALID_PTR(pPage))
2757 {
2758 char szTmp[64+80];
2759
2760 cch = 0;
2761
2762 /* The single char state stuff. */
2763 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2764 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
2765
2766#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2767 if (IS_PART_INCLUDED(5))
2768 {
2769 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2770 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2771 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2772 }
2773
2774 /* The type. */
2775 if (IS_PART_INCLUDED(4))
2776 {
2777 szTmp[cch++] = ':';
2778 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2779 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
2780 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
2781 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
2782 }
2783
2784 /* The numbers. */
2785 if (IS_PART_INCLUDED(3))
2786 {
2787 szTmp[cch++] = ':';
2788 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2789 }
2790
2791 if (IS_PART_INCLUDED(2))
2792 {
2793 szTmp[cch++] = ':';
2794 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2795 }
2796
2797 if (IS_PART_INCLUDED(6))
2798 {
2799 szTmp[cch++] = ':';
2800 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2801 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
2802 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2803 }
2804#undef IS_PART_INCLUDED
2805
2806 cch = pfnOutput(pvArgOutput, szTmp, cch);
2807 }
2808 else
2809 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
2810 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
2811 return cch;
2812}
2813
2814
2815/** Format handler for PGMRAMRANGE.
2816 * @copydoc FNRTSTRFORMATTYPE */
2817static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2818 const char *pszType, void const *pvValue,
2819 int cchWidth, int cchPrecision, unsigned fFlags,
2820 void *pvUser)
2821{
2822 size_t cch;
2823 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2824 if (VALID_PTR(pRam))
2825 {
2826 char szTmp[80];
2827 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2828 cch = pfnOutput(pvArgOutput, szTmp, cch);
2829 }
2830 else
2831 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
2832 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
2833 return cch;
2834}
2835
2836/** Format type andlers to be registered/deregistered. */
2837static const struct
2838{
2839 char szType[24];
2840 PFNRTSTRFORMATTYPE pfnHandler;
2841} g_aPgmFormatTypes[] =
2842{
2843 { "pgmpage", pgmFormatTypeHandlerPage },
2844 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2845};
2846
2847#endif /* !IN_R0 || LOG_ENABLED */
2848
2849/**
2850 * Registers the global string format types.
2851 *
2852 * This should be called at module load time or in some other manner that ensure
2853 * that it's called exactly one time.
2854 *
2855 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2856 */
2857VMMDECL(int) PGMRegisterStringFormatTypes(void)
2858{
2859#if !defined(IN_R0) || defined(LOG_ENABLED)
2860 int rc = VINF_SUCCESS;
2861 unsigned i;
2862 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2863 {
2864 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2865# ifdef IN_RING0
2866 if (rc == VERR_ALREADY_EXISTS)
2867 {
2868 /* in case of cleanup failure in ring-0 */
2869 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2870 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2871 }
2872# endif
2873 }
2874 if (RT_FAILURE(rc))
2875 while (i-- > 0)
2876 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2877
2878 return rc;
2879#else
2880 return VINF_SUCCESS;
2881#endif
2882}
2883
2884
2885/**
2886 * Deregisters the global string format types.
2887 *
2888 * This should be called at module unload time or in some other manner that
2889 * ensure that it's called exactly one time.
2890 */
2891VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2892{
2893#if !defined(IN_R0) || defined(LOG_ENABLED)
2894 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2895 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2896#endif
2897}
2898
2899#ifdef VBOX_STRICT
2900
2901/**
2902 * Asserts that there are no mapping conflicts.
2903 *
2904 * @returns Number of conflicts.
2905 * @param pVM The cross context VM structure.
2906 */
2907VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2908{
2909 unsigned cErrors = 0;
2910
2911 /* Only applies to raw mode -> 1 VPCU */
2912 Assert(pVM->cCpus == 1);
2913 PVMCPU pVCpu = &pVM->aCpus[0];
2914
2915 /*
2916 * Check for mapping conflicts.
2917 */
2918 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2919 pMapping;
2920 pMapping = pMapping->CTX_SUFF(pNext))
2921 {
2922 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2923 for (RTGCPTR GCPtr = pMapping->GCPtr;
2924 GCPtr <= pMapping->GCPtrLast;
2925 GCPtr += PAGE_SIZE)
2926 {
2927 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2928 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2929 {
2930 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2931 cErrors++;
2932 break;
2933 }
2934 }
2935 }
2936
2937 return cErrors;
2938}
2939
2940
2941/**
2942 * Asserts that everything related to the guest CR3 is correctly shadowed.
2943 *
2944 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2945 * and assert the correctness of the guest CR3 mapping before asserting that the
2946 * shadow page tables is in sync with the guest page tables.
2947 *
2948 * @returns Number of conflicts.
2949 * @param pVM The cross context VM structure.
2950 * @param pVCpu The cross context virtual CPU structure.
2951 * @param cr3 The current guest CR3 register value.
2952 * @param cr4 The current guest CR4 register value.
2953 */
2954VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2955{
2956 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2957 pgmLock(pVM);
2958 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2959 pgmUnlock(pVM);
2960 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncCR3), a);
2961 return cErrors;
2962}
2963
2964#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette