VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 23085

Last change on this file since 23085 was 23085, checked in by vboxsync, 15 years ago

The VERR_PAGE_TABLE_NOT_PRESENT problem occurs on uniprocessor VMs too. Extend the workaround.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.3 KB
Line 
1/* $Id: PGMAll.cpp 23085 2009-09-17 11:35:58Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The VMCPU handle. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75
76/*
77 * Shadow - 32-bit mode
78 */
79#define PGM_SHW_TYPE PGM_TYPE_32BIT
80#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
81#include "PGMAllShw.h"
82
83/* Guest - real mode */
84#define PGM_GST_TYPE PGM_TYPE_REAL
85#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
86#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
87#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
88#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
89#include "PGMGstDefs.h"
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef BTH_PGMPOOLKIND_ROOT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - protected mode */
99#define PGM_GST_TYPE PGM_TYPE_PROT
100#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
103#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
104#include "PGMGstDefs.h"
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef BTH_PGMPOOLKIND_ROOT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113/* Guest - 32-bit mode */
114#define PGM_GST_TYPE PGM_TYPE_32BIT
115#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
116#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
117#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
118#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
119#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
120#include "PGMGstDefs.h"
121#include "PGMAllGst.h"
122#include "PGMAllBth.h"
123#undef BTH_PGMPOOLKIND_PT_FOR_BIG
124#undef BTH_PGMPOOLKIND_PT_FOR_PT
125#undef BTH_PGMPOOLKIND_ROOT
126#undef PGM_BTH_NAME
127#undef PGM_GST_TYPE
128#undef PGM_GST_NAME
129
130#undef PGM_SHW_TYPE
131#undef PGM_SHW_NAME
132
133
134/*
135 * Shadow - PAE mode
136 */
137#define PGM_SHW_TYPE PGM_TYPE_PAE
138#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
140#include "PGMAllShw.h"
141
142/* Guest - real mode */
143#define PGM_GST_TYPE PGM_TYPE_REAL
144#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
147#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
148#include "PGMGstDefs.h"
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - 32-bit mode */
171#define PGM_GST_TYPE PGM_TYPE_32BIT
172#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
175#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
176#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
177#include "PGMGstDefs.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef BTH_PGMPOOLKIND_ROOT
182#undef PGM_BTH_NAME
183#undef PGM_GST_TYPE
184#undef PGM_GST_NAME
185
186
187/* Guest - PAE mode */
188#define PGM_GST_TYPE PGM_TYPE_PAE
189#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
194#include "PGMGstDefs.h"
195#include "PGMAllGst.h"
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_BIG
198#undef BTH_PGMPOOLKIND_PT_FOR_PT
199#undef BTH_PGMPOOLKIND_ROOT
200#undef PGM_BTH_NAME
201#undef PGM_GST_TYPE
202#undef PGM_GST_NAME
203
204#undef PGM_SHW_TYPE
205#undef PGM_SHW_NAME
206
207
208#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
209/*
210 * Shadow - AMD64 mode
211 */
212# define PGM_SHW_TYPE PGM_TYPE_AMD64
213# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
214# include "PGMAllShw.h"
215
216/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
217# define PGM_GST_TYPE PGM_TYPE_PROT
218# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
219# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
220# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
221# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
222# include "PGMGstDefs.h"
223# include "PGMAllBth.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_PT
225# undef BTH_PGMPOOLKIND_ROOT
226# undef PGM_BTH_NAME
227# undef PGM_GST_TYPE
228# undef PGM_GST_NAME
229
230# ifdef VBOX_WITH_64_BITS_GUESTS
231/* Guest - AMD64 mode */
232# define PGM_GST_TYPE PGM_TYPE_AMD64
233# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
234# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
235# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
236# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
237# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
238# include "PGMGstDefs.h"
239# include "PGMAllGst.h"
240# include "PGMAllBth.h"
241# undef BTH_PGMPOOLKIND_PT_FOR_BIG
242# undef BTH_PGMPOOLKIND_PT_FOR_PT
243# undef BTH_PGMPOOLKIND_ROOT
244# undef PGM_BTH_NAME
245# undef PGM_GST_TYPE
246# undef PGM_GST_NAME
247# endif /* VBOX_WITH_64_BITS_GUESTS */
248
249# undef PGM_SHW_TYPE
250# undef PGM_SHW_NAME
251
252
253/*
254 * Shadow - Nested paging mode
255 */
256# define PGM_SHW_TYPE PGM_TYPE_NESTED
257# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
258# include "PGMAllShw.h"
259
260/* Guest - real mode */
261# define PGM_GST_TYPE PGM_TYPE_REAL
262# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
263# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
264# include "PGMGstDefs.h"
265# include "PGMAllBth.h"
266# undef PGM_BTH_NAME
267# undef PGM_GST_TYPE
268# undef PGM_GST_NAME
269
270/* Guest - protected mode */
271# define PGM_GST_TYPE PGM_TYPE_PROT
272# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
273# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
274# include "PGMGstDefs.h"
275# include "PGMAllBth.h"
276# undef PGM_BTH_NAME
277# undef PGM_GST_TYPE
278# undef PGM_GST_NAME
279
280/* Guest - 32-bit mode */
281# define PGM_GST_TYPE PGM_TYPE_32BIT
282# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289
290/* Guest - PAE mode */
291# define PGM_GST_TYPE PGM_TYPE_PAE
292# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
293# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
294# include "PGMGstDefs.h"
295# include "PGMAllBth.h"
296# undef PGM_BTH_NAME
297# undef PGM_GST_TYPE
298# undef PGM_GST_NAME
299
300# ifdef VBOX_WITH_64_BITS_GUESTS
301/* Guest - AMD64 mode */
302# define PGM_GST_TYPE PGM_TYPE_AMD64
303# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
304# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
305# include "PGMGstDefs.h"
306# include "PGMAllBth.h"
307# undef PGM_BTH_NAME
308# undef PGM_GST_TYPE
309# undef PGM_GST_NAME
310# endif /* VBOX_WITH_64_BITS_GUESTS */
311
312# undef PGM_SHW_TYPE
313# undef PGM_SHW_NAME
314
315
316/*
317 * Shadow - EPT
318 */
319# define PGM_SHW_TYPE PGM_TYPE_EPT
320# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
321# include "PGMAllShw.h"
322
323/* Guest - real mode */
324# define PGM_GST_TYPE PGM_TYPE_REAL
325# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
326# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
327# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
328# include "PGMGstDefs.h"
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - protected mode */
336# define PGM_GST_TYPE PGM_TYPE_PROT
337# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMGstDefs.h"
341# include "PGMAllBth.h"
342# undef BTH_PGMPOOLKIND_PT_FOR_PT
343# undef PGM_BTH_NAME
344# undef PGM_GST_TYPE
345# undef PGM_GST_NAME
346
347/* Guest - 32-bit mode */
348# define PGM_GST_TYPE PGM_TYPE_32BIT
349# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
350# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
351# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
352# include "PGMGstDefs.h"
353# include "PGMAllBth.h"
354# undef BTH_PGMPOOLKIND_PT_FOR_PT
355# undef PGM_BTH_NAME
356# undef PGM_GST_TYPE
357# undef PGM_GST_NAME
358
359/* Guest - PAE mode */
360# define PGM_GST_TYPE PGM_TYPE_PAE
361# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
363# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef BTH_PGMPOOLKIND_PT_FOR_PT
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370
371# ifdef VBOX_WITH_64_BITS_GUESTS
372/* Guest - AMD64 mode */
373# define PGM_GST_TYPE PGM_TYPE_AMD64
374# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
375# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
376# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
377# include "PGMGstDefs.h"
378# include "PGMAllBth.h"
379# undef BTH_PGMPOOLKIND_PT_FOR_PT
380# undef PGM_BTH_NAME
381# undef PGM_GST_TYPE
382# undef PGM_GST_NAME
383# endif /* VBOX_WITH_64_BITS_GUESTS */
384
385# undef PGM_SHW_TYPE
386# undef PGM_SHW_NAME
387
388#endif /* !IN_RC */
389
390
391#ifndef IN_RING3
392/**
393 * #PF Handler.
394 *
395 * @returns VBox status code (appropriate for trap handling and GC return).
396 * @param pVCpu VMCPU handle.
397 * @param uErr The trap error code.
398 * @param pRegFrame Trap register frame.
399 * @param pvFault The fault address.
400 */
401VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
402{
403 PVM pVM = pVCpu->CTX_SUFF(pVM);
404
405 Log(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
406 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
407 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
408
409
410#ifdef VBOX_WITH_STATISTICS
411 /*
412 * Error code stats.
413 */
414 if (uErr & X86_TRAP_PF_US)
415 {
416 if (!(uErr & X86_TRAP_PF_P))
417 {
418 if (uErr & X86_TRAP_PF_RW)
419 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
420 else
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
422 }
423 else if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
425 else if (uErr & X86_TRAP_PF_RSVD)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
427 else if (uErr & X86_TRAP_PF_ID)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
429 else
430 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
431 }
432 else
433 { /* Supervisor */
434 if (!(uErr & X86_TRAP_PF_P))
435 {
436 if (uErr & X86_TRAP_PF_RW)
437 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
438 else
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
440 }
441 else if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
443 else if (uErr & X86_TRAP_PF_ID)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
445 else if (uErr & X86_TRAP_PF_RSVD)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
447 }
448#endif /* VBOX_WITH_STATISTICS */
449
450 /*
451 * Call the worker.
452 */
453 pgmLock(pVM);
454 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
455 Assert(PGMIsLockOwner(pVM));
456 pgmUnlock(pVM);
457 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
458 rc = VINF_SUCCESS;
459
460# ifdef IN_RING0
461 /* Note: hack alert for difficult to reproduce problem. */
462 if (rc == VERR_PAGE_TABLE_NOT_PRESENT)
463 {
464 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT for page fault at %RGv error code %x (rip=%RGv)\n", pvFault, uErr, pRegFrame->rip));
465 rc = VINF_SUCCESS;
466 }
467# endif
468
469 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
470 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
471 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
472 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
473 return rc;
474}
475#endif /* !IN_RING3 */
476
477
478/**
479 * Prefetch a page
480 *
481 * Typically used to sync commonly used pages before entering raw mode
482 * after a CR3 reload.
483 *
484 * @returns VBox status code suitable for scheduling.
485 * @retval VINF_SUCCESS on success.
486 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
487 * @param pVCpu VMCPU handle.
488 * @param GCPtrPage Page to invalidate.
489 */
490VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
491{
492 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
493 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
494 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
495 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
496 return rc;
497}
498
499
500/**
501 * Gets the mapping corresponding to the specified address (if any).
502 *
503 * @returns Pointer to the mapping.
504 * @returns NULL if not
505 *
506 * @param pVM The virtual machine.
507 * @param GCPtr The guest context pointer.
508 */
509PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
510{
511 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
512 while (pMapping)
513 {
514 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
515 break;
516 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
517 return pMapping;
518 pMapping = pMapping->CTX_SUFF(pNext);
519 }
520 return NULL;
521}
522
523
524/**
525 * Verifies a range of pages for read or write access
526 *
527 * Only checks the guest's page tables
528 *
529 * @returns VBox status code.
530 * @param pVCpu VMCPU handle.
531 * @param Addr Guest virtual address to check
532 * @param cbSize Access size
533 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
534 * @remarks Current not in use.
535 */
536VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
537{
538 /*
539 * Validate input.
540 */
541 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
542 {
543 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
544 return VERR_INVALID_PARAMETER;
545 }
546
547 uint64_t fPage;
548 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
549 if (RT_FAILURE(rc))
550 {
551 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
552 return VINF_EM_RAW_GUEST_TRAP;
553 }
554
555 /*
556 * Check if the access would cause a page fault
557 *
558 * Note that hypervisor page directories are not present in the guest's tables, so this check
559 * is sufficient.
560 */
561 bool fWrite = !!(fAccess & X86_PTE_RW);
562 bool fUser = !!(fAccess & X86_PTE_US);
563 if ( !(fPage & X86_PTE_P)
564 || (fWrite && !(fPage & X86_PTE_RW))
565 || (fUser && !(fPage & X86_PTE_US)) )
566 {
567 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
568 return VINF_EM_RAW_GUEST_TRAP;
569 }
570 if ( RT_SUCCESS(rc)
571 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
572 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
573 return rc;
574}
575
576
577/**
578 * Verifies a range of pages for read or write access
579 *
580 * Supports handling of pages marked for dirty bit tracking and CSAM
581 *
582 * @returns VBox status code.
583 * @param pVCpu VMCPU handle.
584 * @param Addr Guest virtual address to check
585 * @param cbSize Access size
586 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
587 */
588VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
589{
590 PVM pVM = pVCpu->CTX_SUFF(pVM);
591
592 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
593
594 /*
595 * Get going.
596 */
597 uint64_t fPageGst;
598 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
599 if (RT_FAILURE(rc))
600 {
601 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
602 return VINF_EM_RAW_GUEST_TRAP;
603 }
604
605 /*
606 * Check if the access would cause a page fault
607 *
608 * Note that hypervisor page directories are not present in the guest's tables, so this check
609 * is sufficient.
610 */
611 const bool fWrite = !!(fAccess & X86_PTE_RW);
612 const bool fUser = !!(fAccess & X86_PTE_US);
613 if ( !(fPageGst & X86_PTE_P)
614 || (fWrite && !(fPageGst & X86_PTE_RW))
615 || (fUser && !(fPageGst & X86_PTE_US)) )
616 {
617 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
618 return VINF_EM_RAW_GUEST_TRAP;
619 }
620
621 if (!HWACCMIsNestedPagingActive(pVM))
622 {
623 /*
624 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
625 */
626 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
627 if ( rc == VERR_PAGE_NOT_PRESENT
628 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
629 {
630 /*
631 * Page is not present in our page tables.
632 * Try to sync it!
633 */
634 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
635 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
636 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
637 if (rc != VINF_SUCCESS)
638 return rc;
639 }
640 else
641 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
642 }
643
644#if 0 /* def VBOX_STRICT; triggers too often now */
645 /*
646 * This check is a bit paranoid, but useful.
647 */
648 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
649 uint64_t fPageShw;
650 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
651 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
652 || (fWrite && !(fPageShw & X86_PTE_RW))
653 || (fUser && !(fPageShw & X86_PTE_US)) )
654 {
655 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
656 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
657 return VINF_EM_RAW_GUEST_TRAP;
658 }
659#endif
660
661 if ( RT_SUCCESS(rc)
662 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
663 || Addr + cbSize < Addr))
664 {
665 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
666 for (;;)
667 {
668 Addr += PAGE_SIZE;
669 if (cbSize > PAGE_SIZE)
670 cbSize -= PAGE_SIZE;
671 else
672 cbSize = 1;
673 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
674 if (rc != VINF_SUCCESS)
675 break;
676 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
677 break;
678 }
679 }
680 return rc;
681}
682
683
684/**
685 * Emulation of the invlpg instruction (HC only actually).
686 *
687 * @returns VBox status code, special care required.
688 * @retval VINF_PGM_SYNC_CR3 - handled.
689 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
690 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
691 *
692 * @param pVCpu VMCPU handle.
693 * @param GCPtrPage Page to invalidate.
694 *
695 * @remark ASSUMES the page table entry or page directory is valid. Fairly
696 * safe, but there could be edge cases!
697 *
698 * @todo Flush page or page directory only if necessary!
699 */
700VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
701{
702 PVM pVM = pVCpu->CTX_SUFF(pVM);
703 int rc;
704 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
705
706#ifndef IN_RING3
707 /*
708 * Notify the recompiler so it can record this instruction.
709 * Failure happens when it's out of space. We'll return to HC in that case.
710 */
711 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
712 if (rc != VINF_SUCCESS)
713 return rc;
714#endif /* !IN_RING3 */
715
716
717#ifdef IN_RC
718 /*
719 * Check for conflicts and pending CR3 monitoring updates.
720 */
721 if (!pVM->pgm.s.fMappingsFixed)
722 {
723 if ( pgmGetMapping(pVM, GCPtrPage)
724 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
725 {
726 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
727 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
728 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
729 return VINF_PGM_SYNC_CR3;
730 }
731
732 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
733 {
734 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
735 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
736 return VINF_EM_RAW_EMULATE_INSTR;
737 }
738 }
739#endif /* IN_RC */
740
741 /*
742 * Call paging mode specific worker.
743 */
744 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
745 pgmLock(pVM);
746 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
747 pgmUnlock(pVM);
748 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
749
750#ifdef IN_RING3
751 /*
752 * Check if we have a pending update of the CR3 monitoring.
753 */
754 if ( RT_SUCCESS(rc)
755 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
756 {
757 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
758 Assert(!pVM->pgm.s.fMappingsFixed);
759 }
760
761 /*
762 * Inform CSAM about the flush
763 *
764 * Note: This is to check if monitored pages have been changed; when we implement
765 * callbacks for virtual handlers, this is no longer required.
766 */
767 CSAMR3FlushPage(pVM, GCPtrPage);
768#endif /* IN_RING3 */
769 return rc;
770}
771
772
773/**
774 * Executes an instruction using the interpreter.
775 *
776 * @returns VBox status code (appropriate for trap handling and GC return).
777 * @param pVM VM handle.
778 * @param pVCpu VMCPU handle.
779 * @param pRegFrame Register frame.
780 * @param pvFault Fault address.
781 */
782VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
783{
784 uint32_t cb;
785 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
786 if (rc == VERR_EM_INTERPRETER)
787 rc = VINF_EM_RAW_EMULATE_INSTR;
788 if (rc != VINF_SUCCESS)
789 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
790 return rc;
791}
792
793
794/**
795 * Gets effective page information (from the VMM page directory).
796 *
797 * @returns VBox status.
798 * @param pVCpu VMCPU handle.
799 * @param GCPtr Guest Context virtual address of the page.
800 * @param pfFlags Where to store the flags. These are X86_PTE_*.
801 * @param pHCPhys Where to store the HC physical address of the page.
802 * This is page aligned.
803 * @remark You should use PGMMapGetPage() for pages in a mapping.
804 */
805VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
806{
807 pgmLock(pVCpu->CTX_SUFF(pVM));
808 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
809 pgmUnlock(pVCpu->CTX_SUFF(pVM));
810 return rc;
811}
812
813
814/**
815 * Sets (replaces) the page flags for a range of pages in the shadow context.
816 *
817 * @returns VBox status.
818 * @param pVCpu VMCPU handle.
819 * @param GCPtr The address of the first page.
820 * @param cb The size of the range in bytes.
821 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
822 * @remark You must use PGMMapSetPage() for pages in a mapping.
823 */
824VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
825{
826 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
827}
828
829
830/**
831 * Modify page flags for a range of pages in the shadow context.
832 *
833 * The existing flags are ANDed with the fMask and ORed with the fFlags.
834 *
835 * @returns VBox status code.
836 * @param pVCpu VMCPU handle.
837 * @param GCPtr Virtual address of the first page in the range.
838 * @param cb Size (in bytes) of the range to apply the modification to.
839 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
840 * @param fMask The AND mask - page flags X86_PTE_*.
841 * Be very CAREFUL when ~'ing constants which could be 32-bit!
842 * @remark You must use PGMMapModifyPage() for pages in a mapping.
843 */
844VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
845{
846 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
847 Assert(cb);
848
849 /*
850 * Align the input.
851 */
852 cb += GCPtr & PAGE_OFFSET_MASK;
853 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
854 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
855
856 /*
857 * Call worker.
858 */
859 PVM pVM = pVCpu->CTX_SUFF(pVM);
860 pgmLock(pVM);
861 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
862 pgmUnlock(pVM);
863 return rc;
864}
865
866/**
867 * Gets the shadow page directory for the specified address, PAE.
868 *
869 * @returns Pointer to the shadow PD.
870 * @param pVCpu The VMCPU handle.
871 * @param GCPtr The address.
872 * @param pGstPdpe Guest PDPT entry
873 * @param ppPD Receives address of page directory
874 */
875int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
876{
877 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
878 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
879 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
880 PVM pVM = pVCpu->CTX_SUFF(pVM);
881 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
882 PPGMPOOLPAGE pShwPage;
883 int rc;
884
885 Assert(PGMIsLockOwner(pVM));
886
887 /* Allocate page directory if not present. */
888 if ( !pPdpe->n.u1Present
889 && !(pPdpe->u & X86_PDPE_PG_MASK))
890 {
891 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
892 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
893 RTGCPTR64 GCPdPt;
894 PGMPOOLKIND enmKind;
895
896# if defined(IN_RC)
897 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
898 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
899# endif
900
901 if (fNestedPaging || !fPaging)
902 {
903 /* AMD-V nested paging or real/protected mode without paging */
904 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
905 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
906 }
907 else
908 {
909 Assert(pGstPdpe);
910
911 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
912 {
913 if (!pGstPdpe->n.u1Present)
914 {
915 /* PD not present; guest must reload CR3 to change it.
916 * No need to monitor anything in this case.
917 */
918 Assert(!HWACCMIsEnabled(pVM));
919
920 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
921 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
922 pGstPdpe->n.u1Present = 1;
923 }
924 else
925 {
926 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
927 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
928 }
929 }
930 else
931 {
932 GCPdPt = CPUMGetGuestCR3(pVCpu);
933 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
934 }
935 }
936
937 /* Create a reference back to the PDPT by using the index in its shadow page. */
938 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
939 AssertRCReturn(rc, rc);
940
941 /* The PD was cached or created; hook it up now. */
942 pPdpe->u |= pShwPage->Core.Key
943 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
944
945# if defined(IN_RC)
946 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
947 * non-present PDPT will continue to cause page faults.
948 */
949 ASMReloadCR3();
950 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
951# endif
952 }
953 else
954 {
955 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
956 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
957 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
958
959 pgmPoolCacheUsed(pPool, pShwPage);
960 }
961 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
962 return VINF_SUCCESS;
963}
964
965
966/**
967 * Gets the pointer to the shadow page directory entry for an address, PAE.
968 *
969 * @returns Pointer to the PDE.
970 * @param pPGM Pointer to the PGMCPU instance data.
971 * @param GCPtr The address.
972 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
973 */
974DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
975{
976 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
977 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
978
979 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
980
981 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
982 if (!pPdpt->a[iPdPt].n.u1Present)
983 {
984 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
985 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
986 }
987 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
988
989 /* Fetch the pgm pool shadow descriptor. */
990 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
991 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
992
993 *ppShwPde = pShwPde;
994 return VINF_SUCCESS;
995}
996
997#ifndef IN_RC
998
999/**
1000 * Syncs the SHADOW page directory pointer for the specified address.
1001 *
1002 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1003 *
1004 * The caller is responsible for making sure the guest has a valid PD before
1005 * calling this function.
1006 *
1007 * @returns VBox status.
1008 * @param pVCpu VMCPU handle.
1009 * @param GCPtr The address.
1010 * @param pGstPml4e Guest PML4 entry
1011 * @param pGstPdpe Guest PDPT entry
1012 * @param ppPD Receives address of page directory
1013 */
1014int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1015{
1016 PPGMCPU pPGM = &pVCpu->pgm.s;
1017 PVM pVM = pVCpu->CTX_SUFF(pVM);
1018 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1019 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1020 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1021 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1022 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
1023 PPGMPOOLPAGE pShwPage;
1024 int rc;
1025
1026 Assert(PGMIsLockOwner(pVM));
1027
1028 /* Allocate page directory pointer table if not present. */
1029 if ( !pPml4e->n.u1Present
1030 && !(pPml4e->u & X86_PML4E_PG_MASK))
1031 {
1032 RTGCPTR64 GCPml4;
1033 PGMPOOLKIND enmKind;
1034
1035 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1036
1037 if (fNestedPaging || !fPaging)
1038 {
1039 /* AMD-V nested paging or real/protected mode without paging */
1040 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1041 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1042 }
1043 else
1044 {
1045 Assert(pGstPml4e && pGstPdpe);
1046
1047 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1048 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1049 }
1050
1051 /* Create a reference back to the PDPT by using the index in its shadow page. */
1052 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1053 AssertRCReturn(rc, rc);
1054 }
1055 else
1056 {
1057 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1058 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1059
1060 pgmPoolCacheUsed(pPool, pShwPage);
1061 }
1062 /* The PDPT was cached or created; hook it up now. */
1063 pPml4e->u |= pShwPage->Core.Key
1064 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1065
1066 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1067 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1068 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1069
1070 /* Allocate page directory if not present. */
1071 if ( !pPdpe->n.u1Present
1072 && !(pPdpe->u & X86_PDPE_PG_MASK))
1073 {
1074 RTGCPTR64 GCPdPt;
1075 PGMPOOLKIND enmKind;
1076
1077 if (fNestedPaging || !fPaging)
1078 {
1079 /* AMD-V nested paging or real/protected mode without paging */
1080 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1081 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1082 }
1083 else
1084 {
1085 Assert(pGstPdpe);
1086
1087 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1088 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1089 }
1090
1091 /* Create a reference back to the PDPT by using the index in its shadow page. */
1092 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1093 AssertRCReturn(rc, rc);
1094 }
1095 else
1096 {
1097 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1098 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1099
1100 pgmPoolCacheUsed(pPool, pShwPage);
1101 }
1102 /* The PD was cached or created; hook it up now. */
1103 pPdpe->u |= pShwPage->Core.Key
1104 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1105
1106 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1107 return VINF_SUCCESS;
1108}
1109
1110
1111/**
1112 * Gets the SHADOW page directory pointer for the specified address (long mode).
1113 *
1114 * @returns VBox status.
1115 * @param pVCpu VMCPU handle.
1116 * @param GCPtr The address.
1117 * @param ppPdpt Receives address of pdpt
1118 * @param ppPD Receives address of page directory
1119 */
1120DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1121{
1122 PPGMCPU pPGM = &pVCpu->pgm.s;
1123 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1124 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1125
1126 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1127
1128 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1129 if (ppPml4e)
1130 *ppPml4e = (PX86PML4E)pPml4e;
1131
1132 Log4(("pgmShwGetLongModePDPtr %VGv (%VHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1133
1134 if (!pPml4e->n.u1Present)
1135 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1136
1137 PVM pVM = pVCpu->CTX_SUFF(pVM);
1138 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1139 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1140 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1141
1142 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1143 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1144 if (!pPdpt->a[iPdPt].n.u1Present)
1145 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1146
1147 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1148 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1149
1150 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1151 return VINF_SUCCESS;
1152}
1153
1154
1155/**
1156 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1157 * backing pages in case the PDPT or PML4 entry is missing.
1158 *
1159 * @returns VBox status.
1160 * @param pVCpu VMCPU handle.
1161 * @param GCPtr The address.
1162 * @param ppPdpt Receives address of pdpt
1163 * @param ppPD Receives address of page directory
1164 */
1165int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1166{
1167 PPGMCPU pPGM = &pVCpu->pgm.s;
1168 PVM pVM = pVCpu->CTX_SUFF(pVM);
1169 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1170 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1171 PEPTPML4 pPml4;
1172 PEPTPML4E pPml4e;
1173 PPGMPOOLPAGE pShwPage;
1174 int rc;
1175
1176 Assert(HWACCMIsNestedPagingActive(pVM));
1177 Assert(PGMIsLockOwner(pVM));
1178
1179 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1180 Assert(pPml4);
1181
1182 /* Allocate page directory pointer table if not present. */
1183 pPml4e = &pPml4->a[iPml4];
1184 if ( !pPml4e->n.u1Present
1185 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1186 {
1187 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1188 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1189
1190 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1191 AssertRCReturn(rc, rc);
1192 }
1193 else
1194 {
1195 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1196 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1197
1198 pgmPoolCacheUsed(pPool, pShwPage);
1199 }
1200 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1201 pPml4e->u = pShwPage->Core.Key;
1202 pPml4e->n.u1Present = 1;
1203 pPml4e->n.u1Write = 1;
1204 pPml4e->n.u1Execute = 1;
1205
1206 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1207 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1208 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1209
1210 if (ppPdpt)
1211 *ppPdpt = pPdpt;
1212
1213 /* Allocate page directory if not present. */
1214 if ( !pPdpe->n.u1Present
1215 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1216 {
1217 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1218
1219 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1220 AssertRCReturn(rc, rc);
1221 }
1222 else
1223 {
1224 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1225 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1226
1227 pgmPoolCacheUsed(pPool, pShwPage);
1228 }
1229 /* The PD was cached or created; hook it up now and fill with the default value. */
1230 pPdpe->u = pShwPage->Core.Key;
1231 pPdpe->n.u1Present = 1;
1232 pPdpe->n.u1Write = 1;
1233 pPdpe->n.u1Execute = 1;
1234
1235 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1236 return VINF_SUCCESS;
1237}
1238
1239#endif /* IN_RC */
1240
1241/**
1242 * Gets effective Guest OS page information.
1243 *
1244 * When GCPtr is in a big page, the function will return as if it was a normal
1245 * 4KB page. If the need for distinguishing between big and normal page becomes
1246 * necessary at a later point, a PGMGstGetPage() will be created for that
1247 * purpose.
1248 *
1249 * @returns VBox status.
1250 * @param pVCpu VMCPU handle.
1251 * @param GCPtr Guest Context virtual address of the page.
1252 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1253 * @param pGCPhys Where to store the GC physical address of the page.
1254 * This is page aligned. The fact that the
1255 */
1256VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1257{
1258 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1259}
1260
1261
1262/**
1263 * Checks if the page is present.
1264 *
1265 * @returns true if the page is present.
1266 * @returns false if the page is not present.
1267 * @param pVCpu VMCPU handle.
1268 * @param GCPtr Address within the page.
1269 */
1270VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1271{
1272 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1273 return RT_SUCCESS(rc);
1274}
1275
1276
1277/**
1278 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1279 *
1280 * @returns VBox status.
1281 * @param pVCpu VMCPU handle.
1282 * @param GCPtr The address of the first page.
1283 * @param cb The size of the range in bytes.
1284 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1285 */
1286VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1287{
1288 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1289}
1290
1291
1292/**
1293 * Modify page flags for a range of pages in the guest's tables
1294 *
1295 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1296 *
1297 * @returns VBox status code.
1298 * @param pVCpu VMCPU handle.
1299 * @param GCPtr Virtual address of the first page in the range.
1300 * @param cb Size (in bytes) of the range to apply the modification to.
1301 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1302 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1303 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1304 */
1305VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1306{
1307 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1308
1309 /*
1310 * Validate input.
1311 */
1312 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1313 Assert(cb);
1314
1315 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1316
1317 /*
1318 * Adjust input.
1319 */
1320 cb += GCPtr & PAGE_OFFSET_MASK;
1321 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1322 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1323
1324 /*
1325 * Call worker.
1326 */
1327 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1328
1329 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1330 return rc;
1331}
1332
1333#ifdef IN_RING3
1334
1335/**
1336 * Performs the lazy mapping of the 32-bit guest PD.
1337 *
1338 * @returns Pointer to the mapping.
1339 * @param pPGM The PGM instance data.
1340 */
1341PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1342{
1343 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1344 PVM pVM = PGMCPU2VM(pPGM);
1345 pgmLock(pVM);
1346
1347 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1348 AssertReturn(pPage, NULL);
1349
1350 RTHCPTR HCPtrGuestCR3;
1351 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1352 AssertRCReturn(rc, NULL);
1353
1354 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1355# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1356 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1357# endif
1358
1359 pgmUnlock(pVM);
1360 return pPGM->CTX_SUFF(pGst32BitPd);
1361}
1362
1363
1364/**
1365 * Performs the lazy mapping of the PAE guest PDPT.
1366 *
1367 * @returns Pointer to the mapping.
1368 * @param pPGM The PGM instance data.
1369 */
1370PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1371{
1372 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1373 PVM pVM = PGMCPU2VM(pPGM);
1374 pgmLock(pVM);
1375
1376 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1377 AssertReturn(pPage, NULL);
1378
1379 RTHCPTR HCPtrGuestCR3;
1380 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
1381 AssertRCReturn(rc, NULL);
1382
1383 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1384# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1385 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1386# endif
1387
1388 pgmUnlock(pVM);
1389 return pPGM->CTX_SUFF(pGstPaePdpt);
1390}
1391
1392#endif /* IN_RING3 */
1393
1394#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1395/**
1396 * Performs the lazy mapping / updating of a PAE guest PD.
1397 *
1398 * @returns Pointer to the mapping.
1399 * @param pPGM The PGM instance data.
1400 * @param iPdpt Which PD entry to map (0..3).
1401 */
1402PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1403{
1404 PVM pVM = PGMCPU2VM(pPGM);
1405 pgmLock(pVM);
1406
1407 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1408 Assert(pGuestPDPT);
1409 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1410 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1411 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1412
1413 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1414 if (RT_LIKELY(pPage))
1415 {
1416 int rc = VINF_SUCCESS;
1417 RTRCPTR RCPtr = NIL_RTRCPTR;
1418 RTHCPTR HCPtr = NIL_RTHCPTR;
1419#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1420 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1421 AssertRC(rc);
1422#endif
1423 if (RT_SUCCESS(rc) && fChanged)
1424 {
1425 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1426 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1427 }
1428 if (RT_SUCCESS(rc))
1429 {
1430 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1431# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1432 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1433# endif
1434 if (fChanged)
1435 {
1436 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1437 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1438 }
1439
1440 pgmUnlock(pVM);
1441 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1442 }
1443 }
1444
1445 /* Invalid page or some failure, invalidate the entry. */
1446 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1447 pPGM->apGstPaePDsR3[iPdpt] = 0;
1448# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1449 pPGM->apGstPaePDsR0[iPdpt] = 0;
1450# endif
1451 pPGM->apGstPaePDsRC[iPdpt] = 0;
1452
1453 pgmUnlock(pVM);
1454 return NULL;
1455}
1456#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1457
1458
1459#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1460/**
1461 * Performs the lazy mapping of the 32-bit guest PD.
1462 *
1463 * @returns Pointer to the mapping.
1464 * @param pPGM The PGM instance data.
1465 */
1466PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1467{
1468 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1469 PVM pVM = PGMCPU2VM(pPGM);
1470 pgmLock(pVM);
1471
1472 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1473 AssertReturn(pPage, NULL);
1474
1475 RTHCPTR HCPtrGuestCR3;
1476 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
1477 AssertRCReturn(rc, NULL);
1478
1479 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1480# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1481 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1482# endif
1483
1484 pgmUnlock(pVM);
1485 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1486}
1487#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1488
1489
1490/**
1491 * Gets the specified page directory pointer table entry.
1492 *
1493 * @returns PDP entry
1494 * @param pVCpu VMCPU handle.
1495 * @param iPdpt PDPT index
1496 */
1497VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1498{
1499 Assert(iPdpt <= 3);
1500 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1501}
1502
1503
1504/**
1505 * Gets the current CR3 register value for the shadow memory context.
1506 * @returns CR3 value.
1507 * @param pVCpu VMCPU handle.
1508 */
1509VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1510{
1511 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1512 AssertPtrReturn(pPoolPage, 0);
1513 return pPoolPage->Core.Key;
1514}
1515
1516
1517/**
1518 * Gets the current CR3 register value for the nested memory context.
1519 * @returns CR3 value.
1520 * @param pVCpu VMCPU handle.
1521 */
1522VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1523{
1524 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1525 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1526}
1527
1528
1529/**
1530 * Gets the current CR3 register value for the HC intermediate memory context.
1531 * @returns CR3 value.
1532 * @param pVM The VM handle.
1533 */
1534VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1535{
1536 switch (pVM->pgm.s.enmHostMode)
1537 {
1538 case SUPPAGINGMODE_32_BIT:
1539 case SUPPAGINGMODE_32_BIT_GLOBAL:
1540 return pVM->pgm.s.HCPhysInterPD;
1541
1542 case SUPPAGINGMODE_PAE:
1543 case SUPPAGINGMODE_PAE_GLOBAL:
1544 case SUPPAGINGMODE_PAE_NX:
1545 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1546 return pVM->pgm.s.HCPhysInterPaePDPT;
1547
1548 case SUPPAGINGMODE_AMD64:
1549 case SUPPAGINGMODE_AMD64_GLOBAL:
1550 case SUPPAGINGMODE_AMD64_NX:
1551 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1552 return pVM->pgm.s.HCPhysInterPaePDPT;
1553
1554 default:
1555 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1556 return ~0;
1557 }
1558}
1559
1560
1561/**
1562 * Gets the current CR3 register value for the RC intermediate memory context.
1563 * @returns CR3 value.
1564 * @param pVM The VM handle.
1565 * @param pVCpu VMCPU handle.
1566 */
1567VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1568{
1569 switch (pVCpu->pgm.s.enmShadowMode)
1570 {
1571 case PGMMODE_32_BIT:
1572 return pVM->pgm.s.HCPhysInterPD;
1573
1574 case PGMMODE_PAE:
1575 case PGMMODE_PAE_NX:
1576 return pVM->pgm.s.HCPhysInterPaePDPT;
1577
1578 case PGMMODE_AMD64:
1579 case PGMMODE_AMD64_NX:
1580 return pVM->pgm.s.HCPhysInterPaePML4;
1581
1582 case PGMMODE_EPT:
1583 case PGMMODE_NESTED:
1584 return 0; /* not relevant */
1585
1586 default:
1587 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1588 return ~0;
1589 }
1590}
1591
1592
1593/**
1594 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1595 * @returns CR3 value.
1596 * @param pVM The VM handle.
1597 */
1598VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1599{
1600 return pVM->pgm.s.HCPhysInterPD;
1601}
1602
1603
1604/**
1605 * Gets the CR3 register value for the PAE intermediate memory context.
1606 * @returns CR3 value.
1607 * @param pVM The VM handle.
1608 */
1609VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1610{
1611 return pVM->pgm.s.HCPhysInterPaePDPT;
1612}
1613
1614
1615/**
1616 * Gets the CR3 register value for the AMD64 intermediate memory context.
1617 * @returns CR3 value.
1618 * @param pVM The VM handle.
1619 */
1620VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1621{
1622 return pVM->pgm.s.HCPhysInterPaePML4;
1623}
1624
1625
1626/**
1627 * Performs and schedules necessary updates following a CR3 load or reload.
1628 *
1629 * This will normally involve mapping the guest PD or nPDPT
1630 *
1631 * @returns VBox status code.
1632 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1633 * safely be ignored and overridden since the FF will be set too then.
1634 * @param pVCpu VMCPU handle.
1635 * @param cr3 The new cr3.
1636 * @param fGlobal Indicates whether this is a global flush or not.
1637 */
1638VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1639{
1640 PVM pVM = pVCpu->CTX_SUFF(pVM);
1641
1642 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1643
1644 /*
1645 * Always flag the necessary updates; necessary for hardware acceleration
1646 */
1647 /** @todo optimize this, it shouldn't always be necessary. */
1648 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1649 if (fGlobal)
1650 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1651 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1652
1653 /*
1654 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1655 */
1656 int rc = VINF_SUCCESS;
1657 RTGCPHYS GCPhysCR3;
1658 switch (pVCpu->pgm.s.enmGuestMode)
1659 {
1660 case PGMMODE_PAE:
1661 case PGMMODE_PAE_NX:
1662 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1663 break;
1664 case PGMMODE_AMD64:
1665 case PGMMODE_AMD64_NX:
1666 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1667 break;
1668 default:
1669 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1670 break;
1671 }
1672
1673 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1674 {
1675 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1676 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1677 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1678 if (RT_LIKELY(rc == VINF_SUCCESS))
1679 {
1680 if (!pVM->pgm.s.fMappingsFixed)
1681 {
1682 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1683 }
1684 }
1685 else
1686 {
1687 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1688 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1689 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1690 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1691 if (!pVM->pgm.s.fMappingsFixed)
1692 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1693 }
1694
1695 if (fGlobal)
1696 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1697 else
1698 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1699 }
1700 else
1701 {
1702# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1703 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1704 if (pPool->cDirtyPages)
1705 {
1706 pgmLock(pVM);
1707 pgmPoolResetDirtyPages(pVM);
1708 pgmUnlock(pVM);
1709 }
1710# endif
1711 /*
1712 * Check if we have a pending update of the CR3 monitoring.
1713 */
1714 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1715 {
1716 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1717 Assert(!pVM->pgm.s.fMappingsFixed);
1718 }
1719 if (fGlobal)
1720 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1721 else
1722 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1723 }
1724
1725 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1726 return rc;
1727}
1728
1729
1730/**
1731 * Performs and schedules necessary updates following a CR3 load or reload when
1732 * using nested or extended paging.
1733 *
1734 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1735 * TLB and triggering a SyncCR3.
1736 *
1737 * This will normally involve mapping the guest PD or nPDPT
1738 *
1739 * @returns VBox status code.
1740 * @retval VINF_SUCCESS.
1741 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1742 * requires a CR3 sync. This can safely be ignored and overridden since
1743 * the FF will be set too then.)
1744 * @param pVCpu VMCPU handle.
1745 * @param cr3 The new cr3.
1746 */
1747VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1748{
1749 PVM pVM = pVCpu->CTX_SUFF(pVM);
1750
1751 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1752
1753 /* We assume we're only called in nested paging mode. */
1754 Assert(pVM->pgm.s.fMappingsFixed);
1755 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1756 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1757
1758 /*
1759 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1760 */
1761 int rc = VINF_SUCCESS;
1762 RTGCPHYS GCPhysCR3;
1763 switch (pVCpu->pgm.s.enmGuestMode)
1764 {
1765 case PGMMODE_PAE:
1766 case PGMMODE_PAE_NX:
1767 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1768 break;
1769 case PGMMODE_AMD64:
1770 case PGMMODE_AMD64_NX:
1771 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1772 break;
1773 default:
1774 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1775 break;
1776 }
1777 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1778 {
1779 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1780 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1781 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1782 }
1783 return rc;
1784}
1785
1786
1787/**
1788 * Synchronize the paging structures.
1789 *
1790 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1791 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1792 * in several places, most importantly whenever the CR3 is loaded.
1793 *
1794 * @returns VBox status code.
1795 * @param pVCpu VMCPU handle.
1796 * @param cr0 Guest context CR0 register
1797 * @param cr3 Guest context CR3 register
1798 * @param cr4 Guest context CR4 register
1799 * @param fGlobal Including global page directories or not
1800 */
1801VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1802{
1803 PVM pVM = pVCpu->CTX_SUFF(pVM);
1804 int rc;
1805
1806#ifdef PGMPOOL_WITH_MONITORING
1807 /*
1808 * The pool may have pending stuff and even require a return to ring-3 to
1809 * clear the whole thing.
1810 */
1811 rc = pgmPoolSyncCR3(pVCpu);
1812 if (rc != VINF_SUCCESS)
1813 return rc;
1814#endif
1815
1816 /*
1817 * We might be called when we shouldn't.
1818 *
1819 * The mode switching will ensure that the PD is resynced
1820 * after every mode switch. So, if we find ourselves here
1821 * when in protected or real mode we can safely disable the
1822 * FF and return immediately.
1823 */
1824 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1825 {
1826 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1827 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1828 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1829 return VINF_SUCCESS;
1830 }
1831
1832 /* If global pages are not supported, then all flushes are global. */
1833 if (!(cr4 & X86_CR4_PGE))
1834 fGlobal = true;
1835 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1836 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1837
1838 /*
1839 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1840 * This should be done before SyncCR3.
1841 */
1842 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1843 {
1844 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1845
1846 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1847 RTGCPHYS GCPhysCR3;
1848 switch (pVCpu->pgm.s.enmGuestMode)
1849 {
1850 case PGMMODE_PAE:
1851 case PGMMODE_PAE_NX:
1852 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1853 break;
1854 case PGMMODE_AMD64:
1855 case PGMMODE_AMD64_NX:
1856 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1857 break;
1858 default:
1859 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1860 break;
1861 }
1862
1863 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1864 {
1865 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1866 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1867 }
1868#ifdef IN_RING3
1869 if (rc == VINF_PGM_SYNC_CR3)
1870 rc = pgmPoolSyncCR3(pVCpu);
1871#else
1872 if (rc == VINF_PGM_SYNC_CR3)
1873 {
1874 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1875 return rc;
1876 }
1877#endif
1878 AssertRCReturn(rc, rc);
1879 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1880 }
1881
1882 /*
1883 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1884 */
1885 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1886 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1887 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1888 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1889 if (rc == VINF_SUCCESS)
1890 {
1891 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1892 {
1893 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1894 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1895 }
1896
1897 /*
1898 * Check if we have a pending update of the CR3 monitoring.
1899 */
1900 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1901 {
1902 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1903 Assert(!pVM->pgm.s.fMappingsFixed);
1904 }
1905 }
1906
1907 /*
1908 * Now flush the CR3 (guest context).
1909 */
1910 if (rc == VINF_SUCCESS)
1911 PGM_INVL_VCPU_TLBS(pVCpu);
1912 return rc;
1913}
1914
1915
1916/**
1917 * Called whenever CR0 or CR4 in a way which may change
1918 * the paging mode.
1919 *
1920 * @returns VBox status code, with the following informational code for
1921 * VM scheduling.
1922 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1923 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1924 * (I.e. not in R3.)
1925 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1926 *
1927 * @param pVCpu VMCPU handle.
1928 * @param cr0 The new cr0.
1929 * @param cr4 The new cr4.
1930 * @param efer The new extended feature enable register.
1931 */
1932VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1933{
1934 PVM pVM = pVCpu->CTX_SUFF(pVM);
1935 PGMMODE enmGuestMode;
1936
1937 /*
1938 * Calc the new guest mode.
1939 */
1940 if (!(cr0 & X86_CR0_PE))
1941 enmGuestMode = PGMMODE_REAL;
1942 else if (!(cr0 & X86_CR0_PG))
1943 enmGuestMode = PGMMODE_PROTECTED;
1944 else if (!(cr4 & X86_CR4_PAE))
1945 enmGuestMode = PGMMODE_32_BIT;
1946 else if (!(efer & MSR_K6_EFER_LME))
1947 {
1948 if (!(efer & MSR_K6_EFER_NXE))
1949 enmGuestMode = PGMMODE_PAE;
1950 else
1951 enmGuestMode = PGMMODE_PAE_NX;
1952 }
1953 else
1954 {
1955 if (!(efer & MSR_K6_EFER_NXE))
1956 enmGuestMode = PGMMODE_AMD64;
1957 else
1958 enmGuestMode = PGMMODE_AMD64_NX;
1959 }
1960
1961 /*
1962 * Did it change?
1963 */
1964 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1965 return VINF_SUCCESS;
1966
1967 /* Flush the TLB */
1968 PGM_INVL_VCPU_TLBS(pVCpu);
1969
1970#ifdef IN_RING3
1971 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1972#else
1973 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1974 return VINF_PGM_CHANGE_MODE;
1975#endif
1976}
1977
1978
1979/**
1980 * Gets the current guest paging mode.
1981 *
1982 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1983 *
1984 * @returns The current paging mode.
1985 * @param pVCpu VMCPU handle.
1986 */
1987VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1988{
1989 return pVCpu->pgm.s.enmGuestMode;
1990}
1991
1992
1993/**
1994 * Gets the current shadow paging mode.
1995 *
1996 * @returns The current paging mode.
1997 * @param pVCpu VMCPU handle.
1998 */
1999VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2000{
2001 return pVCpu->pgm.s.enmShadowMode;
2002}
2003
2004/**
2005 * Gets the current host paging mode.
2006 *
2007 * @returns The current paging mode.
2008 * @param pVM The VM handle.
2009 */
2010VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2011{
2012 switch (pVM->pgm.s.enmHostMode)
2013 {
2014 case SUPPAGINGMODE_32_BIT:
2015 case SUPPAGINGMODE_32_BIT_GLOBAL:
2016 return PGMMODE_32_BIT;
2017
2018 case SUPPAGINGMODE_PAE:
2019 case SUPPAGINGMODE_PAE_GLOBAL:
2020 return PGMMODE_PAE;
2021
2022 case SUPPAGINGMODE_PAE_NX:
2023 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2024 return PGMMODE_PAE_NX;
2025
2026 case SUPPAGINGMODE_AMD64:
2027 case SUPPAGINGMODE_AMD64_GLOBAL:
2028 return PGMMODE_AMD64;
2029
2030 case SUPPAGINGMODE_AMD64_NX:
2031 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2032 return PGMMODE_AMD64_NX;
2033
2034 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2035 }
2036
2037 return PGMMODE_INVALID;
2038}
2039
2040
2041/**
2042 * Get mode name.
2043 *
2044 * @returns read-only name string.
2045 * @param enmMode The mode which name is desired.
2046 */
2047VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2048{
2049 switch (enmMode)
2050 {
2051 case PGMMODE_REAL: return "Real";
2052 case PGMMODE_PROTECTED: return "Protected";
2053 case PGMMODE_32_BIT: return "32-bit";
2054 case PGMMODE_PAE: return "PAE";
2055 case PGMMODE_PAE_NX: return "PAE+NX";
2056 case PGMMODE_AMD64: return "AMD64";
2057 case PGMMODE_AMD64_NX: return "AMD64+NX";
2058 case PGMMODE_NESTED: return "Nested";
2059 case PGMMODE_EPT: return "EPT";
2060 default: return "unknown mode value";
2061 }
2062}
2063
2064
2065/**
2066 * Check if the PGM lock is currently taken.
2067 *
2068 * @returns bool locked/not locked
2069 * @param pVM The VM to operate on.
2070 */
2071VMMDECL(bool) PGMIsLocked(PVM pVM)
2072{
2073 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2074}
2075
2076
2077/**
2078 * Check if this VCPU currently owns the PGM lock.
2079 *
2080 * @returns bool owner/not owner
2081 * @param pVM The VM to operate on.
2082 */
2083VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2084{
2085 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2086}
2087
2088
2089/**
2090 * Acquire the PGM lock.
2091 *
2092 * @returns VBox status code
2093 * @param pVM The VM to operate on.
2094 */
2095int pgmLock(PVM pVM)
2096{
2097 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2098#if defined(IN_RC) || defined(IN_RING0)
2099 if (rc == VERR_SEM_BUSY)
2100 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2101#endif
2102 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2103 return rc;
2104}
2105
2106
2107/**
2108 * Release the PGM lock.
2109 *
2110 * @returns VBox status code
2111 * @param pVM The VM to operate on.
2112 */
2113void pgmUnlock(PVM pVM)
2114{
2115 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2116}
2117
2118#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2119
2120/**
2121 * Temporarily maps one guest page specified by GC physical address.
2122 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2123 *
2124 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2125 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2126 *
2127 * @returns VBox status.
2128 * @param pVM VM handle.
2129 * @param GCPhys GC Physical address of the page.
2130 * @param ppv Where to store the address of the mapping.
2131 */
2132VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2133{
2134 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2135
2136 /*
2137 * Get the ram range.
2138 */
2139 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2140 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2141 pRam = pRam->CTX_SUFF(pNext);
2142 if (!pRam)
2143 {
2144 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2145 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2146 }
2147
2148 /*
2149 * Pass it on to PGMDynMapHCPage.
2150 */
2151 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2152 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2153#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2154 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2155#else
2156 PGMDynMapHCPage(pVM, HCPhys, ppv);
2157#endif
2158 return VINF_SUCCESS;
2159}
2160
2161
2162/**
2163 * Temporarily maps one guest page specified by unaligned GC physical address.
2164 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2165 *
2166 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2167 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2168 *
2169 * The caller is aware that only the speicifed page is mapped and that really bad things
2170 * will happen if writing beyond the page!
2171 *
2172 * @returns VBox status.
2173 * @param pVM VM handle.
2174 * @param GCPhys GC Physical address within the page to be mapped.
2175 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2176 */
2177VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2178{
2179 /*
2180 * Get the ram range.
2181 */
2182 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2183 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2184 pRam = pRam->CTX_SUFF(pNext);
2185 if (!pRam)
2186 {
2187 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2188 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2189 }
2190
2191 /*
2192 * Pass it on to PGMDynMapHCPage.
2193 */
2194 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2195#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2196 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2197#else
2198 PGMDynMapHCPage(pVM, HCPhys, ppv);
2199#endif
2200 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2201 return VINF_SUCCESS;
2202}
2203
2204# ifdef IN_RC
2205
2206/**
2207 * Temporarily maps one host page specified by HC physical address.
2208 *
2209 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2210 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2211 *
2212 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2213 * @param pVM VM handle.
2214 * @param HCPhys HC Physical address of the page.
2215 * @param ppv Where to store the address of the mapping. This is the
2216 * address of the PAGE not the exact address corresponding
2217 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2218 * page offset.
2219 */
2220VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2221{
2222 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2223
2224 /*
2225 * Check the cache.
2226 */
2227 register unsigned iCache;
2228 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2229 {
2230 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2231 {
2232 { 0, 9, 10, 11, 12, 13, 14, 15},
2233 { 0, 1, 10, 11, 12, 13, 14, 15},
2234 { 0, 1, 2, 11, 12, 13, 14, 15},
2235 { 0, 1, 2, 3, 12, 13, 14, 15},
2236 { 0, 1, 2, 3, 4, 13, 14, 15},
2237 { 0, 1, 2, 3, 4, 5, 14, 15},
2238 { 0, 1, 2, 3, 4, 5, 6, 15},
2239 { 0, 1, 2, 3, 4, 5, 6, 7},
2240 { 8, 1, 2, 3, 4, 5, 6, 7},
2241 { 8, 9, 2, 3, 4, 5, 6, 7},
2242 { 8, 9, 10, 3, 4, 5, 6, 7},
2243 { 8, 9, 10, 11, 4, 5, 6, 7},
2244 { 8, 9, 10, 11, 12, 5, 6, 7},
2245 { 8, 9, 10, 11, 12, 13, 6, 7},
2246 { 8, 9, 10, 11, 12, 13, 14, 7},
2247 { 8, 9, 10, 11, 12, 13, 14, 15},
2248 };
2249 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2250 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2251
2252 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2253 {
2254 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2255
2256 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2257 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2258 {
2259 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2260 *ppv = pv;
2261 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2262 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2263 return VINF_SUCCESS;
2264 }
2265 LogFlow(("Out of sync entry %d\n", iPage));
2266 }
2267 }
2268 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2269 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2270 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2271
2272 /*
2273 * Update the page tables.
2274 */
2275 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2276 unsigned i;
2277 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2278 {
2279 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2280 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2281 break;
2282 iPage++;
2283 }
2284 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2285
2286 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2287 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2288 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2289 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2290
2291 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2292 *ppv = pv;
2293 ASMInvalidatePage(pv);
2294 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2295 return VINF_SUCCESS;
2296}
2297
2298
2299/**
2300 * Temporarily lock a dynamic page to prevent it from being reused.
2301 *
2302 * @param pVM VM handle.
2303 * @param GCPage GC address of page
2304 */
2305VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2306{
2307 unsigned iPage;
2308
2309 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2310 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2311 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2312 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2313}
2314
2315
2316/**
2317 * Unlock a dynamic page
2318 *
2319 * @param pVM VM handle.
2320 * @param GCPage GC address of page
2321 */
2322VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2323{
2324 unsigned iPage;
2325
2326 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2327 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2328
2329 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2330 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2331 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2332 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2333 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2334}
2335
2336
2337# ifdef VBOX_STRICT
2338/**
2339 * Check for lock leaks.
2340 *
2341 * @param pVM VM handle.
2342 */
2343VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2344{
2345 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2346 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2347}
2348# endif /* VBOX_STRICT */
2349
2350# endif /* IN_RC */
2351#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2352
2353#if !defined(IN_R0) || defined(LOG_ENABLED)
2354
2355/** Format handler for PGMPAGE.
2356 * @copydoc FNRTSTRFORMATTYPE */
2357static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2358 const char *pszType, void const *pvValue,
2359 int cchWidth, int cchPrecision, unsigned fFlags,
2360 void *pvUser)
2361{
2362 size_t cch;
2363 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2364 if (VALID_PTR(pPage))
2365 {
2366 char szTmp[64+80];
2367
2368 cch = 0;
2369
2370 /* The single char state stuff. */
2371 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2372 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2373
2374#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2375 if (IS_PART_INCLUDED(5))
2376 {
2377 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2378 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2379 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2380 }
2381
2382 /* The type. */
2383 if (IS_PART_INCLUDED(4))
2384 {
2385 szTmp[cch++] = ':';
2386 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2387 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2388 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2389 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2390 }
2391
2392 /* The numbers. */
2393 if (IS_PART_INCLUDED(3))
2394 {
2395 szTmp[cch++] = ':';
2396 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2397 }
2398
2399 if (IS_PART_INCLUDED(2))
2400 {
2401 szTmp[cch++] = ':';
2402 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2403 }
2404
2405 if (IS_PART_INCLUDED(6))
2406 {
2407 szTmp[cch++] = ':';
2408 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2409 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2410 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2411 }
2412#undef IS_PART_INCLUDED
2413
2414 cch = pfnOutput(pvArgOutput, szTmp, cch);
2415 }
2416 else
2417 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2418 return cch;
2419}
2420
2421
2422/** Format handler for PGMRAMRANGE.
2423 * @copydoc FNRTSTRFORMATTYPE */
2424static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2425 const char *pszType, void const *pvValue,
2426 int cchWidth, int cchPrecision, unsigned fFlags,
2427 void *pvUser)
2428{
2429 size_t cch;
2430 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2431 if (VALID_PTR(pRam))
2432 {
2433 char szTmp[80];
2434 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2435 cch = pfnOutput(pvArgOutput, szTmp, cch);
2436 }
2437 else
2438 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2439 return cch;
2440}
2441
2442/** Format type andlers to be registered/deregistered. */
2443static const struct
2444{
2445 char szType[24];
2446 PFNRTSTRFORMATTYPE pfnHandler;
2447} g_aPgmFormatTypes[] =
2448{
2449 { "pgmpage", pgmFormatTypeHandlerPage },
2450 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2451};
2452
2453#endif /* !IN_R0 || LOG_ENABLED */
2454
2455
2456/**
2457 * Registers the global string format types.
2458 *
2459 * This should be called at module load time or in some other manner that ensure
2460 * that it's called exactly one time.
2461 *
2462 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2463 */
2464VMMDECL(int) PGMRegisterStringFormatTypes(void)
2465{
2466#if !defined(IN_R0) || defined(LOG_ENABLED)
2467 int rc = VINF_SUCCESS;
2468 unsigned i;
2469 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2470 {
2471 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2472# ifdef IN_RING0
2473 if (rc == VERR_ALREADY_EXISTS)
2474 {
2475 /* in case of cleanup failure in ring-0 */
2476 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2477 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2478 }
2479# endif
2480 }
2481 if (RT_FAILURE(rc))
2482 while (i-- > 0)
2483 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2484
2485 return rc;
2486#else
2487 return VINF_SUCCESS;
2488#endif
2489}
2490
2491
2492/**
2493 * Deregisters the global string format types.
2494 *
2495 * This should be called at module unload time or in some other manner that
2496 * ensure that it's called exactly one time.
2497 */
2498VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2499{
2500#if !defined(IN_R0) || defined(LOG_ENABLED)
2501 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2502 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2503#endif
2504}
2505
2506#ifdef VBOX_STRICT
2507
2508/**
2509 * Asserts that there are no mapping conflicts.
2510 *
2511 * @returns Number of conflicts.
2512 * @param pVM The VM Handle.
2513 */
2514VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2515{
2516 unsigned cErrors = 0;
2517
2518 /* Only applies to raw mode -> 1 VPCU */
2519 Assert(pVM->cCpus == 1);
2520 PVMCPU pVCpu = &pVM->aCpus[0];
2521
2522 /*
2523 * Check for mapping conflicts.
2524 */
2525 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2526 pMapping;
2527 pMapping = pMapping->CTX_SUFF(pNext))
2528 {
2529 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2530 for (RTGCPTR GCPtr = pMapping->GCPtr;
2531 GCPtr <= pMapping->GCPtrLast;
2532 GCPtr += PAGE_SIZE)
2533 {
2534 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2535 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2536 {
2537 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2538 cErrors++;
2539 break;
2540 }
2541 }
2542 }
2543
2544 return cErrors;
2545}
2546
2547
2548/**
2549 * Asserts that everything related to the guest CR3 is correctly shadowed.
2550 *
2551 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2552 * and assert the correctness of the guest CR3 mapping before asserting that the
2553 * shadow page tables is in sync with the guest page tables.
2554 *
2555 * @returns Number of conflicts.
2556 * @param pVM The VM Handle.
2557 * @param pVCpu VMCPU handle.
2558 * @param cr3 The current guest CR3 register value.
2559 * @param cr4 The current guest CR4 register value.
2560 */
2561VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2562{
2563 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2564 pgmLock(pVM);
2565 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2566 pgmUnlock(pVM);
2567 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2568 return cErrors;
2569}
2570
2571#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette