VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 20795

Last change on this file since 20795 was 20795, checked in by vboxsync, 15 years ago

VMM: Fixed guest PAE issues on the mac.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.9 KB
Line 
1/* $Id: PGMAll.cpp 20795 2009-06-22 18:40:42Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The VMCPU handle. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75
76/*
77 * Shadow - 32-bit mode
78 */
79#define PGM_SHW_TYPE PGM_TYPE_32BIT
80#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
81#include "PGMAllShw.h"
82
83/* Guest - real mode */
84#define PGM_GST_TYPE PGM_TYPE_REAL
85#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
86#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
87#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
88#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
89#include "PGMGstDefs.h"
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef BTH_PGMPOOLKIND_ROOT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - protected mode */
99#define PGM_GST_TYPE PGM_TYPE_PROT
100#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
103#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
104#include "PGMGstDefs.h"
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef BTH_PGMPOOLKIND_ROOT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113/* Guest - 32-bit mode */
114#define PGM_GST_TYPE PGM_TYPE_32BIT
115#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
116#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
117#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
118#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
119#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
120#include "PGMGstDefs.h"
121#include "PGMAllGst.h"
122#include "PGMAllBth.h"
123#undef BTH_PGMPOOLKIND_PT_FOR_BIG
124#undef BTH_PGMPOOLKIND_PT_FOR_PT
125#undef BTH_PGMPOOLKIND_ROOT
126#undef PGM_BTH_NAME
127#undef PGM_GST_TYPE
128#undef PGM_GST_NAME
129
130#undef PGM_SHW_TYPE
131#undef PGM_SHW_NAME
132
133
134/*
135 * Shadow - PAE mode
136 */
137#define PGM_SHW_TYPE PGM_TYPE_PAE
138#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
140#include "PGMAllShw.h"
141
142/* Guest - real mode */
143#define PGM_GST_TYPE PGM_TYPE_REAL
144#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
147#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
148#include "PGMGstDefs.h"
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - 32-bit mode */
171#define PGM_GST_TYPE PGM_TYPE_32BIT
172#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
175#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
176#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
177#include "PGMGstDefs.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef BTH_PGMPOOLKIND_ROOT
182#undef PGM_BTH_NAME
183#undef PGM_GST_TYPE
184#undef PGM_GST_NAME
185
186
187/* Guest - PAE mode */
188#define PGM_GST_TYPE PGM_TYPE_PAE
189#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
194#include "PGMGstDefs.h"
195#include "PGMAllGst.h"
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_BIG
198#undef BTH_PGMPOOLKIND_PT_FOR_PT
199#undef BTH_PGMPOOLKIND_ROOT
200#undef PGM_BTH_NAME
201#undef PGM_GST_TYPE
202#undef PGM_GST_NAME
203
204#undef PGM_SHW_TYPE
205#undef PGM_SHW_NAME
206
207
208#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
209/*
210 * Shadow - AMD64 mode
211 */
212# define PGM_SHW_TYPE PGM_TYPE_AMD64
213# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
214# include "PGMAllShw.h"
215
216/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
217# define PGM_GST_TYPE PGM_TYPE_PROT
218# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
219# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
220# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
221# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
222# include "PGMGstDefs.h"
223# include "PGMAllBth.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_PT
225# undef BTH_PGMPOOLKIND_ROOT
226# undef PGM_BTH_NAME
227# undef PGM_GST_TYPE
228# undef PGM_GST_NAME
229
230# ifdef VBOX_WITH_64_BITS_GUESTS
231/* Guest - AMD64 mode */
232# define PGM_GST_TYPE PGM_TYPE_AMD64
233# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
234# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
235# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
236# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
237# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
238# include "PGMGstDefs.h"
239# include "PGMAllGst.h"
240# include "PGMAllBth.h"
241# undef BTH_PGMPOOLKIND_PT_FOR_BIG
242# undef BTH_PGMPOOLKIND_PT_FOR_PT
243# undef BTH_PGMPOOLKIND_ROOT
244# undef PGM_BTH_NAME
245# undef PGM_GST_TYPE
246# undef PGM_GST_NAME
247# endif /* VBOX_WITH_64_BITS_GUESTS */
248
249# undef PGM_SHW_TYPE
250# undef PGM_SHW_NAME
251
252
253/*
254 * Shadow - Nested paging mode
255 */
256# define PGM_SHW_TYPE PGM_TYPE_NESTED
257# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
258# include "PGMAllShw.h"
259
260/* Guest - real mode */
261# define PGM_GST_TYPE PGM_TYPE_REAL
262# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
263# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
264# include "PGMGstDefs.h"
265# include "PGMAllBth.h"
266# undef PGM_BTH_NAME
267# undef PGM_GST_TYPE
268# undef PGM_GST_NAME
269
270/* Guest - protected mode */
271# define PGM_GST_TYPE PGM_TYPE_PROT
272# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
273# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
274# include "PGMGstDefs.h"
275# include "PGMAllBth.h"
276# undef PGM_BTH_NAME
277# undef PGM_GST_TYPE
278# undef PGM_GST_NAME
279
280/* Guest - 32-bit mode */
281# define PGM_GST_TYPE PGM_TYPE_32BIT
282# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289
290/* Guest - PAE mode */
291# define PGM_GST_TYPE PGM_TYPE_PAE
292# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
293# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
294# include "PGMGstDefs.h"
295# include "PGMAllBth.h"
296# undef PGM_BTH_NAME
297# undef PGM_GST_TYPE
298# undef PGM_GST_NAME
299
300# ifdef VBOX_WITH_64_BITS_GUESTS
301/* Guest - AMD64 mode */
302# define PGM_GST_TYPE PGM_TYPE_AMD64
303# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
304# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
305# include "PGMGstDefs.h"
306# include "PGMAllBth.h"
307# undef PGM_BTH_NAME
308# undef PGM_GST_TYPE
309# undef PGM_GST_NAME
310# endif /* VBOX_WITH_64_BITS_GUESTS */
311
312# undef PGM_SHW_TYPE
313# undef PGM_SHW_NAME
314
315
316/*
317 * Shadow - EPT
318 */
319# define PGM_SHW_TYPE PGM_TYPE_EPT
320# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
321# include "PGMAllShw.h"
322
323/* Guest - real mode */
324# define PGM_GST_TYPE PGM_TYPE_REAL
325# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
326# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
327# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
328# include "PGMGstDefs.h"
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - protected mode */
336# define PGM_GST_TYPE PGM_TYPE_PROT
337# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMGstDefs.h"
341# include "PGMAllBth.h"
342# undef BTH_PGMPOOLKIND_PT_FOR_PT
343# undef PGM_BTH_NAME
344# undef PGM_GST_TYPE
345# undef PGM_GST_NAME
346
347/* Guest - 32-bit mode */
348# define PGM_GST_TYPE PGM_TYPE_32BIT
349# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
350# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
351# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
352# include "PGMGstDefs.h"
353# include "PGMAllBth.h"
354# undef BTH_PGMPOOLKIND_PT_FOR_PT
355# undef PGM_BTH_NAME
356# undef PGM_GST_TYPE
357# undef PGM_GST_NAME
358
359/* Guest - PAE mode */
360# define PGM_GST_TYPE PGM_TYPE_PAE
361# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
363# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef BTH_PGMPOOLKIND_PT_FOR_PT
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370
371# ifdef VBOX_WITH_64_BITS_GUESTS
372/* Guest - AMD64 mode */
373# define PGM_GST_TYPE PGM_TYPE_AMD64
374# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
375# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
376# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
377# include "PGMGstDefs.h"
378# include "PGMAllBth.h"
379# undef BTH_PGMPOOLKIND_PT_FOR_PT
380# undef PGM_BTH_NAME
381# undef PGM_GST_TYPE
382# undef PGM_GST_NAME
383# endif /* VBOX_WITH_64_BITS_GUESTS */
384
385# undef PGM_SHW_TYPE
386# undef PGM_SHW_NAME
387
388#endif /* !IN_RC */
389
390
391#ifndef IN_RING3
392/**
393 * #PF Handler.
394 *
395 * @returns VBox status code (appropriate for trap handling and GC return).
396 * @param pVCpu VMCPU handle.
397 * @param uErr The trap error code.
398 * @param pRegFrame Trap register frame.
399 * @param pvFault The fault address.
400 */
401VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
402{
403 PVM pVM = pVCpu->CTX_SUFF(pVM);
404
405 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
406 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
407 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
408
409
410#ifdef VBOX_WITH_STATISTICS
411 /*
412 * Error code stats.
413 */
414 if (uErr & X86_TRAP_PF_US)
415 {
416 if (!(uErr & X86_TRAP_PF_P))
417 {
418 if (uErr & X86_TRAP_PF_RW)
419 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
420 else
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
422 }
423 else if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
425 else if (uErr & X86_TRAP_PF_RSVD)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
427 else if (uErr & X86_TRAP_PF_ID)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
429 else
430 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
431 }
432 else
433 { /* Supervisor */
434 if (!(uErr & X86_TRAP_PF_P))
435 {
436 if (uErr & X86_TRAP_PF_RW)
437 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
438 else
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
440 }
441 else if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
443 else if (uErr & X86_TRAP_PF_ID)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
445 else if (uErr & X86_TRAP_PF_RSVD)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
447 }
448#endif /* VBOX_WITH_STATISTICS */
449
450 /*
451 * Call the worker.
452 */
453 pgmLock(pVM);
454 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
455 Assert(PGMIsLockOwner(pVM));
456 pgmUnlock(pVM);
457 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
458 rc = VINF_SUCCESS;
459 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
460 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
461 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
462 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
463 return rc;
464}
465#endif /* !IN_RING3 */
466
467
468/**
469 * Prefetch a page
470 *
471 * Typically used to sync commonly used pages before entering raw mode
472 * after a CR3 reload.
473 *
474 * @returns VBox status code suitable for scheduling.
475 * @retval VINF_SUCCESS on success.
476 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
477 * @param pVCpu VMCPU handle.
478 * @param GCPtrPage Page to invalidate.
479 */
480VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
481{
482 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
483 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
484 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
485 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
486 return rc;
487}
488
489
490/**
491 * Gets the mapping corresponding to the specified address (if any).
492 *
493 * @returns Pointer to the mapping.
494 * @returns NULL if not
495 *
496 * @param pVM The virtual machine.
497 * @param GCPtr The guest context pointer.
498 */
499PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
500{
501 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
502 while (pMapping)
503 {
504 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
505 break;
506 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
507 return pMapping;
508 pMapping = pMapping->CTX_SUFF(pNext);
509 }
510 return NULL;
511}
512
513
514/**
515 * Verifies a range of pages for read or write access
516 *
517 * Only checks the guest's page tables
518 *
519 * @returns VBox status code.
520 * @param pVCpu VMCPU handle.
521 * @param Addr Guest virtual address to check
522 * @param cbSize Access size
523 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
524 * @remarks Current not in use.
525 */
526VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
527{
528 /*
529 * Validate input.
530 */
531 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
532 {
533 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
534 return VERR_INVALID_PARAMETER;
535 }
536
537 uint64_t fPage;
538 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
539 if (RT_FAILURE(rc))
540 {
541 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
542 return VINF_EM_RAW_GUEST_TRAP;
543 }
544
545 /*
546 * Check if the access would cause a page fault
547 *
548 * Note that hypervisor page directories are not present in the guest's tables, so this check
549 * is sufficient.
550 */
551 bool fWrite = !!(fAccess & X86_PTE_RW);
552 bool fUser = !!(fAccess & X86_PTE_US);
553 if ( !(fPage & X86_PTE_P)
554 || (fWrite && !(fPage & X86_PTE_RW))
555 || (fUser && !(fPage & X86_PTE_US)) )
556 {
557 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
558 return VINF_EM_RAW_GUEST_TRAP;
559 }
560 if ( RT_SUCCESS(rc)
561 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
562 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
563 return rc;
564}
565
566
567/**
568 * Verifies a range of pages for read or write access
569 *
570 * Supports handling of pages marked for dirty bit tracking and CSAM
571 *
572 * @returns VBox status code.
573 * @param pVCpu VMCPU handle.
574 * @param Addr Guest virtual address to check
575 * @param cbSize Access size
576 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
577 */
578VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
579{
580 PVM pVM = pVCpu->CTX_SUFF(pVM);
581
582 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
583
584 /*
585 * Get going.
586 */
587 uint64_t fPageGst;
588 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
589 if (RT_FAILURE(rc))
590 {
591 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
592 return VINF_EM_RAW_GUEST_TRAP;
593 }
594
595 /*
596 * Check if the access would cause a page fault
597 *
598 * Note that hypervisor page directories are not present in the guest's tables, so this check
599 * is sufficient.
600 */
601 const bool fWrite = !!(fAccess & X86_PTE_RW);
602 const bool fUser = !!(fAccess & X86_PTE_US);
603 if ( !(fPageGst & X86_PTE_P)
604 || (fWrite && !(fPageGst & X86_PTE_RW))
605 || (fUser && !(fPageGst & X86_PTE_US)) )
606 {
607 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
608 return VINF_EM_RAW_GUEST_TRAP;
609 }
610
611 if (!HWACCMIsNestedPagingActive(pVM))
612 {
613 /*
614 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
615 */
616 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
617 if ( rc == VERR_PAGE_NOT_PRESENT
618 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
619 {
620 /*
621 * Page is not present in our page tables.
622 * Try to sync it!
623 */
624 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
625 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
626 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
627 if (rc != VINF_SUCCESS)
628 return rc;
629 }
630 else
631 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
632 }
633
634#if 0 /* def VBOX_STRICT; triggers too often now */
635 /*
636 * This check is a bit paranoid, but useful.
637 */
638 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
639 uint64_t fPageShw;
640 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
641 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
642 || (fWrite && !(fPageShw & X86_PTE_RW))
643 || (fUser && !(fPageShw & X86_PTE_US)) )
644 {
645 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
646 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
647 return VINF_EM_RAW_GUEST_TRAP;
648 }
649#endif
650
651 if ( RT_SUCCESS(rc)
652 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
653 || Addr + cbSize < Addr))
654 {
655 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
656 for (;;)
657 {
658 Addr += PAGE_SIZE;
659 if (cbSize > PAGE_SIZE)
660 cbSize -= PAGE_SIZE;
661 else
662 cbSize = 1;
663 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
664 if (rc != VINF_SUCCESS)
665 break;
666 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
667 break;
668 }
669 }
670 return rc;
671}
672
673
674/**
675 * Emulation of the invlpg instruction (HC only actually).
676 *
677 * @returns VBox status code, special care required.
678 * @retval VINF_PGM_SYNC_CR3 - handled.
679 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
680 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
681 *
682 * @param pVCpu VMCPU handle.
683 * @param GCPtrPage Page to invalidate.
684 *
685 * @remark ASSUMES the page table entry or page directory is valid. Fairly
686 * safe, but there could be edge cases!
687 *
688 * @todo Flush page or page directory only if necessary!
689 */
690VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
691{
692 PVM pVM = pVCpu->CTX_SUFF(pVM);
693 int rc;
694 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
695
696#ifndef IN_RING3
697 /*
698 * Notify the recompiler so it can record this instruction.
699 * Failure happens when it's out of space. We'll return to HC in that case.
700 */
701 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
702 if (rc != VINF_SUCCESS)
703 return rc;
704#endif /* !IN_RING3 */
705
706
707#ifdef IN_RC
708 /*
709 * Check for conflicts and pending CR3 monitoring updates.
710 */
711 if (!pVM->pgm.s.fMappingsFixed)
712 {
713 if ( pgmGetMapping(pVM, GCPtrPage)
714 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
715 {
716 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
717 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
718 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
719 return VINF_PGM_SYNC_CR3;
720 }
721
722 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
723 {
724 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
725 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
726 return VINF_EM_RAW_EMULATE_INSTR;
727 }
728 }
729#endif /* IN_RC */
730
731 /*
732 * Call paging mode specific worker.
733 */
734 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
735 pgmLock(pVM);
736 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
737 pgmUnlock(pVM);
738 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
739
740#ifdef IN_RING3
741 /*
742 * Check if we have a pending update of the CR3 monitoring.
743 */
744 if ( RT_SUCCESS(rc)
745 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
746 {
747 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
748 Assert(!pVM->pgm.s.fMappingsFixed);
749 }
750
751 /*
752 * Inform CSAM about the flush
753 *
754 * Note: This is to check if monitored pages have been changed; when we implement
755 * callbacks for virtual handlers, this is no longer required.
756 */
757 CSAMR3FlushPage(pVM, GCPtrPage);
758#endif /* IN_RING3 */
759 return rc;
760}
761
762
763/**
764 * Executes an instruction using the interpreter.
765 *
766 * @returns VBox status code (appropriate for trap handling and GC return).
767 * @param pVM VM handle.
768 * @param pVCpu VMCPU handle.
769 * @param pRegFrame Register frame.
770 * @param pvFault Fault address.
771 */
772VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
773{
774 uint32_t cb;
775 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
776 if (rc == VERR_EM_INTERPRETER)
777 rc = VINF_EM_RAW_EMULATE_INSTR;
778 if (rc != VINF_SUCCESS)
779 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
780 return rc;
781}
782
783
784/**
785 * Gets effective page information (from the VMM page directory).
786 *
787 * @returns VBox status.
788 * @param pVCpu VMCPU handle.
789 * @param GCPtr Guest Context virtual address of the page.
790 * @param pfFlags Where to store the flags. These are X86_PTE_*.
791 * @param pHCPhys Where to store the HC physical address of the page.
792 * This is page aligned.
793 * @remark You should use PGMMapGetPage() for pages in a mapping.
794 */
795VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
796{
797 pgmLock(pVCpu->CTX_SUFF(pVM));
798 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
799 pgmUnlock(pVCpu->CTX_SUFF(pVM));
800 return rc;
801}
802
803
804/**
805 * Sets (replaces) the page flags for a range of pages in the shadow context.
806 *
807 * @returns VBox status.
808 * @param pVCpu VMCPU handle.
809 * @param GCPtr The address of the first page.
810 * @param cb The size of the range in bytes.
811 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
812 * @remark You must use PGMMapSetPage() for pages in a mapping.
813 */
814VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
815{
816 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
817}
818
819
820/**
821 * Modify page flags for a range of pages in the shadow context.
822 *
823 * The existing flags are ANDed with the fMask and ORed with the fFlags.
824 *
825 * @returns VBox status code.
826 * @param pVCpu VMCPU handle.
827 * @param GCPtr Virtual address of the first page in the range.
828 * @param cb Size (in bytes) of the range to apply the modification to.
829 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
830 * @param fMask The AND mask - page flags X86_PTE_*.
831 * Be very CAREFUL when ~'ing constants which could be 32-bit!
832 * @remark You must use PGMMapModifyPage() for pages in a mapping.
833 */
834VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
835{
836 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
837 Assert(cb);
838
839 /*
840 * Align the input.
841 */
842 cb += GCPtr & PAGE_OFFSET_MASK;
843 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
844 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
845
846 /*
847 * Call worker.
848 */
849 PVM pVM = pVCpu->CTX_SUFF(pVM);
850 pgmLock(pVM);
851 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
852 pgmUnlock(pVM);
853 return rc;
854}
855
856/**
857 * Gets the shadow page directory for the specified address, PAE.
858 *
859 * @returns Pointer to the shadow PD.
860 * @param pVCpu The VMCPU handle.
861 * @param GCPtr The address.
862 * @param pGstPdpe Guest PDPT entry
863 * @param ppPD Receives address of page directory
864 */
865int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
866{
867 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
868 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
869 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
870 PVM pVM = pVCpu->CTX_SUFF(pVM);
871 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
872 PPGMPOOLPAGE pShwPage;
873 int rc;
874
875 Assert(PGMIsLockOwner(pVM));
876
877 /* Allocate page directory if not present. */
878 if ( !pPdpe->n.u1Present
879 && !(pPdpe->u & X86_PDPE_PG_MASK))
880 {
881 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
882 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
883 RTGCPTR64 GCPdPt;
884 PGMPOOLKIND enmKind;
885
886# if defined(IN_RC)
887 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
888 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
889# endif
890
891 if (fNestedPaging || !fPaging)
892 {
893 /* AMD-V nested paging or real/protected mode without paging */
894 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
895 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
896 }
897 else
898 {
899 Assert(pGstPdpe);
900
901 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
902 {
903 if (!pGstPdpe->n.u1Present)
904 {
905 /* PD not present; guest must reload CR3 to change it.
906 * No need to monitor anything in this case.
907 */
908 Assert(!HWACCMIsEnabled(pVM));
909
910 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
911 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
912 pGstPdpe->n.u1Present = 1;
913 }
914 else
915 {
916 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
917 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
918 }
919 }
920 else
921 {
922 GCPdPt = CPUMGetGuestCR3(pVCpu);
923 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
924 }
925 }
926
927 /* Create a reference back to the PDPT by using the index in its shadow page. */
928 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
929 AssertRCReturn(rc, rc);
930
931 /* The PD was cached or created; hook it up now. */
932 pPdpe->u |= pShwPage->Core.Key
933 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
934
935# if defined(IN_RC)
936 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
937 * non-present PDPT will continue to cause page faults.
938 */
939 ASMReloadCR3();
940 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
941# endif
942 }
943 else
944 {
945 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
946 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
947 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
948
949 pgmPoolCacheUsed(pPool, pShwPage);
950 }
951 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
952 return VINF_SUCCESS;
953}
954
955
956/**
957 * Gets the pointer to the shadow page directory entry for an address, PAE.
958 *
959 * @returns Pointer to the PDE.
960 * @param pPGM Pointer to the PGMCPU instance data.
961 * @param GCPtr The address.
962 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
963 */
964DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
965{
966 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
967 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
968
969 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
970
971 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
972 if (!pPdpt->a[iPdPt].n.u1Present)
973 {
974 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
975 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
976 }
977 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
978
979 /* Fetch the pgm pool shadow descriptor. */
980 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
981 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
982
983 *ppShwPde = pShwPde;
984 return VINF_SUCCESS;
985}
986
987#ifndef IN_RC
988
989/**
990 * Syncs the SHADOW page directory pointer for the specified address.
991 *
992 * Allocates backing pages in case the PDPT or PML4 entry is missing.
993 *
994 * The caller is responsible for making sure the guest has a valid PD before
995 * calling this function.
996 *
997 * @returns VBox status.
998 * @param pVCpu VMCPU handle.
999 * @param GCPtr The address.
1000 * @param pGstPml4e Guest PML4 entry
1001 * @param pGstPdpe Guest PDPT entry
1002 * @param ppPD Receives address of page directory
1003 */
1004int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1005{
1006 PPGMCPU pPGM = &pVCpu->pgm.s;
1007 PVM pVM = pVCpu->CTX_SUFF(pVM);
1008 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1009 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1010 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1011 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1012 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
1013 PPGMPOOLPAGE pShwPage;
1014 int rc;
1015
1016 Assert(PGMIsLockOwner(pVM));
1017
1018 /* Allocate page directory pointer table if not present. */
1019 if ( !pPml4e->n.u1Present
1020 && !(pPml4e->u & X86_PML4E_PG_MASK))
1021 {
1022 RTGCPTR64 GCPml4;
1023 PGMPOOLKIND enmKind;
1024
1025 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1026
1027 if (fNestedPaging || !fPaging)
1028 {
1029 /* AMD-V nested paging or real/protected mode without paging */
1030 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1031 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1032 }
1033 else
1034 {
1035 Assert(pGstPml4e && pGstPdpe);
1036
1037 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1038 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1039 }
1040
1041 /* Create a reference back to the PDPT by using the index in its shadow page. */
1042 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1043 AssertRCReturn(rc, rc);
1044 }
1045 else
1046 {
1047 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1048 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1049
1050 pgmPoolCacheUsed(pPool, pShwPage);
1051 }
1052 /* The PDPT was cached or created; hook it up now. */
1053 pPml4e->u |= pShwPage->Core.Key
1054 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1055
1056 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1057 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1058 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1059
1060 /* Allocate page directory if not present. */
1061 if ( !pPdpe->n.u1Present
1062 && !(pPdpe->u & X86_PDPE_PG_MASK))
1063 {
1064 RTGCPTR64 GCPdPt;
1065 PGMPOOLKIND enmKind;
1066
1067 if (fNestedPaging || !fPaging)
1068 {
1069 /* AMD-V nested paging or real/protected mode without paging */
1070 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1071 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1072 }
1073 else
1074 {
1075 Assert(pGstPdpe);
1076
1077 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1078 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1079 }
1080
1081 /* Create a reference back to the PDPT by using the index in its shadow page. */
1082 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1083 AssertRCReturn(rc, rc);
1084 }
1085 else
1086 {
1087 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1088 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1089
1090 pgmPoolCacheUsed(pPool, pShwPage);
1091 }
1092 /* The PD was cached or created; hook it up now. */
1093 pPdpe->u |= pShwPage->Core.Key
1094 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1095
1096 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1097 return VINF_SUCCESS;
1098}
1099
1100
1101/**
1102 * Gets the SHADOW page directory pointer for the specified address (long mode).
1103 *
1104 * @returns VBox status.
1105 * @param pVCpu VMCPU handle.
1106 * @param GCPtr The address.
1107 * @param ppPdpt Receives address of pdpt
1108 * @param ppPD Receives address of page directory
1109 */
1110DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1111{
1112 PPGMCPU pPGM = &pVCpu->pgm.s;
1113 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1114 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1115
1116 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1117
1118 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1119 if (ppPml4e)
1120 *ppPml4e = (PX86PML4E)pPml4e;
1121
1122 Log4(("pgmShwGetLongModePDPtr %VGv (%VHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1123
1124 if (!pPml4e->n.u1Present)
1125 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1126
1127 PVM pVM = pVCpu->CTX_SUFF(pVM);
1128 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1129 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1130 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1131
1132 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1133 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1134 if (!pPdpt->a[iPdPt].n.u1Present)
1135 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1136
1137 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1138 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1139
1140 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1141 return VINF_SUCCESS;
1142}
1143
1144
1145/**
1146 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1147 * backing pages in case the PDPT or PML4 entry is missing.
1148 *
1149 * @returns VBox status.
1150 * @param pVCpu VMCPU handle.
1151 * @param GCPtr The address.
1152 * @param ppPdpt Receives address of pdpt
1153 * @param ppPD Receives address of page directory
1154 */
1155int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1156{
1157 PPGMCPU pPGM = &pVCpu->pgm.s;
1158 PVM pVM = pVCpu->CTX_SUFF(pVM);
1159 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1160 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1161 PEPTPML4 pPml4;
1162 PEPTPML4E pPml4e;
1163 PPGMPOOLPAGE pShwPage;
1164 int rc;
1165
1166 Assert(HWACCMIsNestedPagingActive(pVM));
1167 Assert(PGMIsLockOwner(pVM));
1168
1169 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1170 Assert(pPml4);
1171
1172 /* Allocate page directory pointer table if not present. */
1173 pPml4e = &pPml4->a[iPml4];
1174 if ( !pPml4e->n.u1Present
1175 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1176 {
1177 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1178 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1179
1180 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1181 AssertRCReturn(rc, rc);
1182 }
1183 else
1184 {
1185 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1186 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1187
1188 pgmPoolCacheUsed(pPool, pShwPage);
1189 }
1190 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1191 pPml4e->u = pShwPage->Core.Key;
1192 pPml4e->n.u1Present = 1;
1193 pPml4e->n.u1Write = 1;
1194 pPml4e->n.u1Execute = 1;
1195
1196 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1197 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1198 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1199
1200 if (ppPdpt)
1201 *ppPdpt = pPdpt;
1202
1203 /* Allocate page directory if not present. */
1204 if ( !pPdpe->n.u1Present
1205 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1206 {
1207 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1208
1209 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1210 AssertRCReturn(rc, rc);
1211 }
1212 else
1213 {
1214 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1215 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1216
1217 pgmPoolCacheUsed(pPool, pShwPage);
1218 }
1219 /* The PD was cached or created; hook it up now and fill with the default value. */
1220 pPdpe->u = pShwPage->Core.Key;
1221 pPdpe->n.u1Present = 1;
1222 pPdpe->n.u1Write = 1;
1223 pPdpe->n.u1Execute = 1;
1224
1225 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1226 return VINF_SUCCESS;
1227}
1228
1229#endif /* IN_RC */
1230
1231/**
1232 * Gets effective Guest OS page information.
1233 *
1234 * When GCPtr is in a big page, the function will return as if it was a normal
1235 * 4KB page. If the need for distinguishing between big and normal page becomes
1236 * necessary at a later point, a PGMGstGetPage() will be created for that
1237 * purpose.
1238 *
1239 * @returns VBox status.
1240 * @param pVCpu VMCPU handle.
1241 * @param GCPtr Guest Context virtual address of the page.
1242 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1243 * @param pGCPhys Where to store the GC physical address of the page.
1244 * This is page aligned. The fact that the
1245 */
1246VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1247{
1248 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1249}
1250
1251
1252/**
1253 * Checks if the page is present.
1254 *
1255 * @returns true if the page is present.
1256 * @returns false if the page is not present.
1257 * @param pVCpu VMCPU handle.
1258 * @param GCPtr Address within the page.
1259 */
1260VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1261{
1262 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1263 return RT_SUCCESS(rc);
1264}
1265
1266
1267/**
1268 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1269 *
1270 * @returns VBox status.
1271 * @param pVCpu VMCPU handle.
1272 * @param GCPtr The address of the first page.
1273 * @param cb The size of the range in bytes.
1274 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1275 */
1276VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1277{
1278 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1279}
1280
1281
1282/**
1283 * Modify page flags for a range of pages in the guest's tables
1284 *
1285 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1286 *
1287 * @returns VBox status code.
1288 * @param pVCpu VMCPU handle.
1289 * @param GCPtr Virtual address of the first page in the range.
1290 * @param cb Size (in bytes) of the range to apply the modification to.
1291 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1292 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1293 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1294 */
1295VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1296{
1297 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1298
1299 /*
1300 * Validate input.
1301 */
1302 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1303 Assert(cb);
1304
1305 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1306
1307 /*
1308 * Adjust input.
1309 */
1310 cb += GCPtr & PAGE_OFFSET_MASK;
1311 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1312 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1313
1314 /*
1315 * Call worker.
1316 */
1317 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1318
1319 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1320 return rc;
1321}
1322
1323#ifdef IN_RING3
1324
1325/**
1326 * Performs the lazy mapping of the 32-bit guest PD.
1327 *
1328 * @returns Pointer to the mapping.
1329 * @param pPGM The PGM instance data.
1330 */
1331PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1332{
1333 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1334 PVM pVM = PGMCPU2VM(pPGM);
1335 pgmLock(pVM);
1336
1337 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1338 AssertReturn(pPage, NULL);
1339
1340 RTHCPTR HCPtrGuestCR3;
1341 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1342 AssertRCReturn(rc, NULL);
1343
1344 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1345# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1346 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1347# endif
1348
1349 pgmUnlock(pVM);
1350 return pPGM->CTX_SUFF(pGst32BitPd);
1351}
1352
1353
1354/**
1355 * Performs the lazy mapping of the PAE guest PDPT.
1356 *
1357 * @returns Pointer to the mapping.
1358 * @param pPGM The PGM instance data.
1359 */
1360PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1361{
1362 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1363 PVM pVM = PGMCPU2VM(pPGM);
1364 pgmLock(pVM);
1365
1366 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1367 AssertReturn(pPage, NULL);
1368
1369 RTHCPTR HCPtrGuestCR3;
1370 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
1371 AssertRCReturn(rc, NULL);
1372
1373 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1374# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1375 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1376# endif
1377
1378 pgmUnlock(pVM);
1379 return pPGM->CTX_SUFF(pGstPaePdpt);
1380}
1381
1382#endif /* IN_RING3 */
1383
1384#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1385/**
1386 * Performs the lazy mapping / updating of a PAE guest PD.
1387 *
1388 * @returns Pointer to the mapping.
1389 * @param pPGM The PGM instance data.
1390 * @param iPdpt Which PD entry to map (0..3).
1391 */
1392PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1393{
1394 PVM pVM = PGMCPU2VM(pPGM);
1395 pgmLock(pVM);
1396
1397 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1398 Assert(pGuestPDPT);
1399 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1400 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1401 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1402
1403 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1404 if (RT_LIKELY(pPage))
1405 {
1406 int rc = VINF_SUCCESS;
1407 RTRCPTR RCPtr = NIL_RTRCPTR;
1408 RTHCPTR HCPtr = NIL_RTHCPTR;
1409#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1410 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1411 AssertRC(rc);
1412#endif
1413 if (RT_SUCCESS(rc) && fChanged)
1414 {
1415 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1416 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1417 }
1418 if (RT_SUCCESS(rc))
1419 {
1420 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1421# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1422 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1423# endif
1424 if (fChanged)
1425 {
1426 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1427 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1428 }
1429
1430 pgmUnlock(pVM);
1431 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1432 }
1433 }
1434
1435 /* Invalid page or some failure, invalidate the entry. */
1436 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1437 pPGM->apGstPaePDsR3[iPdpt] = 0;
1438# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1439 pPGM->apGstPaePDsR0[iPdpt] = 0;
1440# endif
1441 pPGM->apGstPaePDsRC[iPdpt] = 0;
1442
1443 pgmUnlock(pVM);
1444 return NULL;
1445}
1446#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1447
1448
1449#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1450/**
1451 * Performs the lazy mapping of the 32-bit guest PD.
1452 *
1453 * @returns Pointer to the mapping.
1454 * @param pPGM The PGM instance data.
1455 */
1456PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1457{
1458 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1459 PVM pVM = PGMCPU2VM(pPGM);
1460 pgmLock(pVM);
1461
1462 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1463 AssertReturn(pPage, NULL);
1464
1465 RTHCPTR HCPtrGuestCR3;
1466 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
1467 AssertRCReturn(rc, NULL);
1468
1469 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1470# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1471 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1472# endif
1473
1474 pgmUnlock(pVM);
1475 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1476}
1477#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1478
1479
1480/**
1481 * Gets the specified page directory pointer table entry.
1482 *
1483 * @returns PDP entry
1484 * @param pVCpu VMCPU handle.
1485 * @param iPdpt PDPT index
1486 */
1487VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1488{
1489 Assert(iPdpt <= 3);
1490 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1491}
1492
1493
1494/**
1495 * Gets the current CR3 register value for the shadow memory context.
1496 * @returns CR3 value.
1497 * @param pVCpu VMCPU handle.
1498 */
1499VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1500{
1501 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1502 AssertPtrReturn(pPoolPage, 0);
1503 return pPoolPage->Core.Key;
1504}
1505
1506
1507/**
1508 * Gets the current CR3 register value for the nested memory context.
1509 * @returns CR3 value.
1510 * @param pVCpu VMCPU handle.
1511 */
1512VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1513{
1514 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1515 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1516}
1517
1518
1519/**
1520 * Gets the current CR3 register value for the HC intermediate memory context.
1521 * @returns CR3 value.
1522 * @param pVM The VM handle.
1523 */
1524VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1525{
1526 switch (pVM->pgm.s.enmHostMode)
1527 {
1528 case SUPPAGINGMODE_32_BIT:
1529 case SUPPAGINGMODE_32_BIT_GLOBAL:
1530 return pVM->pgm.s.HCPhysInterPD;
1531
1532 case SUPPAGINGMODE_PAE:
1533 case SUPPAGINGMODE_PAE_GLOBAL:
1534 case SUPPAGINGMODE_PAE_NX:
1535 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1536 return pVM->pgm.s.HCPhysInterPaePDPT;
1537
1538 case SUPPAGINGMODE_AMD64:
1539 case SUPPAGINGMODE_AMD64_GLOBAL:
1540 case SUPPAGINGMODE_AMD64_NX:
1541 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1542 return pVM->pgm.s.HCPhysInterPaePDPT;
1543
1544 default:
1545 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1546 return ~0;
1547 }
1548}
1549
1550
1551/**
1552 * Gets the current CR3 register value for the RC intermediate memory context.
1553 * @returns CR3 value.
1554 * @param pVM The VM handle.
1555 * @param pVCpu VMCPU handle.
1556 */
1557VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1558{
1559 switch (pVCpu->pgm.s.enmShadowMode)
1560 {
1561 case PGMMODE_32_BIT:
1562 return pVM->pgm.s.HCPhysInterPD;
1563
1564 case PGMMODE_PAE:
1565 case PGMMODE_PAE_NX:
1566 return pVM->pgm.s.HCPhysInterPaePDPT;
1567
1568 case PGMMODE_AMD64:
1569 case PGMMODE_AMD64_NX:
1570 return pVM->pgm.s.HCPhysInterPaePML4;
1571
1572 case PGMMODE_EPT:
1573 case PGMMODE_NESTED:
1574 return 0; /* not relevant */
1575
1576 default:
1577 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1578 return ~0;
1579 }
1580}
1581
1582
1583/**
1584 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1585 * @returns CR3 value.
1586 * @param pVM The VM handle.
1587 */
1588VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1589{
1590 return pVM->pgm.s.HCPhysInterPD;
1591}
1592
1593
1594/**
1595 * Gets the CR3 register value for the PAE intermediate memory context.
1596 * @returns CR3 value.
1597 * @param pVM The VM handle.
1598 */
1599VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1600{
1601 return pVM->pgm.s.HCPhysInterPaePDPT;
1602}
1603
1604
1605/**
1606 * Gets the CR3 register value for the AMD64 intermediate memory context.
1607 * @returns CR3 value.
1608 * @param pVM The VM handle.
1609 */
1610VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1611{
1612 return pVM->pgm.s.HCPhysInterPaePML4;
1613}
1614
1615
1616/**
1617 * Performs and schedules necessary updates following a CR3 load or reload.
1618 *
1619 * This will normally involve mapping the guest PD or nPDPT
1620 *
1621 * @returns VBox status code.
1622 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1623 * safely be ignored and overridden since the FF will be set too then.
1624 * @param pVCpu VMCPU handle.
1625 * @param cr3 The new cr3.
1626 * @param fGlobal Indicates whether this is a global flush or not.
1627 */
1628VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1629{
1630 PVM pVM = pVCpu->CTX_SUFF(pVM);
1631
1632 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1633
1634 /*
1635 * Always flag the necessary updates; necessary for hardware acceleration
1636 */
1637 /** @todo optimize this, it shouldn't always be necessary. */
1638 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1639 if (fGlobal)
1640 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1641 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1642
1643 /*
1644 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1645 */
1646 int rc = VINF_SUCCESS;
1647 RTGCPHYS GCPhysCR3;
1648 switch (pVCpu->pgm.s.enmGuestMode)
1649 {
1650 case PGMMODE_PAE:
1651 case PGMMODE_PAE_NX:
1652 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1653 break;
1654 case PGMMODE_AMD64:
1655 case PGMMODE_AMD64_NX:
1656 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1657 break;
1658 default:
1659 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1660 break;
1661 }
1662
1663 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1664 {
1665 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1666 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1667 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1668 if (RT_LIKELY(rc == VINF_SUCCESS))
1669 {
1670 if (!pVM->pgm.s.fMappingsFixed)
1671 {
1672 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1673 }
1674 }
1675 else
1676 {
1677 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1678 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1679 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1680 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1681 if (!pVM->pgm.s.fMappingsFixed)
1682 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1683 }
1684
1685 if (fGlobal)
1686 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1687 else
1688 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1689 }
1690 else
1691 {
1692 /*
1693 * Check if we have a pending update of the CR3 monitoring.
1694 */
1695 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1696 {
1697 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1698 Assert(!pVM->pgm.s.fMappingsFixed);
1699 }
1700 if (fGlobal)
1701 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1702 else
1703 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1704 }
1705
1706 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1707 return rc;
1708}
1709
1710
1711/**
1712 * Performs and schedules necessary updates following a CR3 load or reload when
1713 * using nested or extended paging.
1714 *
1715 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1716 * TLB and triggering a SyncCR3.
1717 *
1718 * This will normally involve mapping the guest PD or nPDPT
1719 *
1720 * @returns VBox status code.
1721 * @retval VINF_SUCCESS.
1722 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1723 * requires a CR3 sync. This can safely be ignored and overridden since
1724 * the FF will be set too then.)
1725 * @param pVCpu VMCPU handle.
1726 * @param cr3 The new cr3.
1727 */
1728VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1729{
1730 PVM pVM = pVCpu->CTX_SUFF(pVM);
1731
1732 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1733
1734 /* We assume we're only called in nested paging mode. */
1735 Assert(pVM->pgm.s.fMappingsFixed);
1736 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1737 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1738
1739 /*
1740 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1741 */
1742 int rc = VINF_SUCCESS;
1743 RTGCPHYS GCPhysCR3;
1744 switch (pVCpu->pgm.s.enmGuestMode)
1745 {
1746 case PGMMODE_PAE:
1747 case PGMMODE_PAE_NX:
1748 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1749 break;
1750 case PGMMODE_AMD64:
1751 case PGMMODE_AMD64_NX:
1752 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1753 break;
1754 default:
1755 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1756 break;
1757 }
1758 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1759 {
1760 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1761 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1762 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1763 }
1764 return rc;
1765}
1766
1767
1768/**
1769 * Synchronize the paging structures.
1770 *
1771 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1772 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1773 * in several places, most importantly whenever the CR3 is loaded.
1774 *
1775 * @returns VBox status code.
1776 * @param pVCpu VMCPU handle.
1777 * @param cr0 Guest context CR0 register
1778 * @param cr3 Guest context CR3 register
1779 * @param cr4 Guest context CR4 register
1780 * @param fGlobal Including global page directories or not
1781 */
1782VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1783{
1784 PVM pVM = pVCpu->CTX_SUFF(pVM);
1785 int rc;
1786
1787#ifdef PGMPOOL_WITH_MONITORING
1788 /*
1789 * The pool may have pending stuff and even require a return to ring-3 to
1790 * clear the whole thing.
1791 */
1792 rc = pgmPoolSyncCR3(pVCpu);
1793 if (rc != VINF_SUCCESS)
1794 return rc;
1795#endif
1796
1797 /*
1798 * We might be called when we shouldn't.
1799 *
1800 * The mode switching will ensure that the PD is resynced
1801 * after every mode switch. So, if we find ourselves here
1802 * when in protected or real mode we can safely disable the
1803 * FF and return immediately.
1804 */
1805 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1806 {
1807 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1808 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1809 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1810 return VINF_SUCCESS;
1811 }
1812
1813 /* If global pages are not supported, then all flushes are global. */
1814 if (!(cr4 & X86_CR4_PGE))
1815 fGlobal = true;
1816 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1817 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1818
1819 /*
1820 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1821 * This should be done before SyncCR3.
1822 */
1823 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1824 {
1825 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1826
1827 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1828 RTGCPHYS GCPhysCR3;
1829 switch (pVCpu->pgm.s.enmGuestMode)
1830 {
1831 case PGMMODE_PAE:
1832 case PGMMODE_PAE_NX:
1833 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1834 break;
1835 case PGMMODE_AMD64:
1836 case PGMMODE_AMD64_NX:
1837 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1838 break;
1839 default:
1840 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1841 break;
1842 }
1843
1844 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1845 {
1846 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1847 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1848 }
1849#ifdef IN_RING3
1850 if (rc == VINF_PGM_SYNC_CR3)
1851 rc = pgmPoolSyncCR3(pVCpu);
1852#else
1853 if (rc == VINF_PGM_SYNC_CR3)
1854 {
1855 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1856 return rc;
1857 }
1858#endif
1859 AssertRCReturn(rc, rc);
1860 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1861 }
1862
1863 /*
1864 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1865 */
1866 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1867 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1868 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1869 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1870 if (rc == VINF_SUCCESS)
1871 {
1872 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1873 {
1874 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1875 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1876 }
1877
1878 /*
1879 * Check if we have a pending update of the CR3 monitoring.
1880 */
1881 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1882 {
1883 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1884 Assert(!pVM->pgm.s.fMappingsFixed);
1885 }
1886 }
1887
1888 /*
1889 * Now flush the CR3 (guest context).
1890 */
1891 if (rc == VINF_SUCCESS)
1892 PGM_INVL_VCPU_TLBS(pVCpu);
1893 return rc;
1894}
1895
1896
1897/**
1898 * Called whenever CR0 or CR4 in a way which may change
1899 * the paging mode.
1900 *
1901 * @returns VBox status code, with the following informational code for
1902 * VM scheduling.
1903 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1904 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1905 * (I.e. not in R3.)
1906 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1907 *
1908 * @param pVCpu VMCPU handle.
1909 * @param cr0 The new cr0.
1910 * @param cr4 The new cr4.
1911 * @param efer The new extended feature enable register.
1912 */
1913VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1914{
1915 PVM pVM = pVCpu->CTX_SUFF(pVM);
1916 PGMMODE enmGuestMode;
1917
1918 /*
1919 * Calc the new guest mode.
1920 */
1921 if (!(cr0 & X86_CR0_PE))
1922 enmGuestMode = PGMMODE_REAL;
1923 else if (!(cr0 & X86_CR0_PG))
1924 enmGuestMode = PGMMODE_PROTECTED;
1925 else if (!(cr4 & X86_CR4_PAE))
1926 enmGuestMode = PGMMODE_32_BIT;
1927 else if (!(efer & MSR_K6_EFER_LME))
1928 {
1929 if (!(efer & MSR_K6_EFER_NXE))
1930 enmGuestMode = PGMMODE_PAE;
1931 else
1932 enmGuestMode = PGMMODE_PAE_NX;
1933 }
1934 else
1935 {
1936 if (!(efer & MSR_K6_EFER_NXE))
1937 enmGuestMode = PGMMODE_AMD64;
1938 else
1939 enmGuestMode = PGMMODE_AMD64_NX;
1940 }
1941
1942 /*
1943 * Did it change?
1944 */
1945 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1946 return VINF_SUCCESS;
1947
1948 /* Flush the TLB */
1949 PGM_INVL_VCPU_TLBS(pVCpu);
1950
1951#ifdef IN_RING3
1952 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1953#else
1954 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1955 return VINF_PGM_CHANGE_MODE;
1956#endif
1957}
1958
1959
1960/**
1961 * Gets the current guest paging mode.
1962 *
1963 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1964 *
1965 * @returns The current paging mode.
1966 * @param pVCpu VMCPU handle.
1967 */
1968VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1969{
1970 return pVCpu->pgm.s.enmGuestMode;
1971}
1972
1973
1974/**
1975 * Gets the current shadow paging mode.
1976 *
1977 * @returns The current paging mode.
1978 * @param pVCpu VMCPU handle.
1979 */
1980VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
1981{
1982 return pVCpu->pgm.s.enmShadowMode;
1983}
1984
1985/**
1986 * Gets the current host paging mode.
1987 *
1988 * @returns The current paging mode.
1989 * @param pVM The VM handle.
1990 */
1991VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1992{
1993 switch (pVM->pgm.s.enmHostMode)
1994 {
1995 case SUPPAGINGMODE_32_BIT:
1996 case SUPPAGINGMODE_32_BIT_GLOBAL:
1997 return PGMMODE_32_BIT;
1998
1999 case SUPPAGINGMODE_PAE:
2000 case SUPPAGINGMODE_PAE_GLOBAL:
2001 return PGMMODE_PAE;
2002
2003 case SUPPAGINGMODE_PAE_NX:
2004 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2005 return PGMMODE_PAE_NX;
2006
2007 case SUPPAGINGMODE_AMD64:
2008 case SUPPAGINGMODE_AMD64_GLOBAL:
2009 return PGMMODE_AMD64;
2010
2011 case SUPPAGINGMODE_AMD64_NX:
2012 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2013 return PGMMODE_AMD64_NX;
2014
2015 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2016 }
2017
2018 return PGMMODE_INVALID;
2019}
2020
2021
2022/**
2023 * Get mode name.
2024 *
2025 * @returns read-only name string.
2026 * @param enmMode The mode which name is desired.
2027 */
2028VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2029{
2030 switch (enmMode)
2031 {
2032 case PGMMODE_REAL: return "Real";
2033 case PGMMODE_PROTECTED: return "Protected";
2034 case PGMMODE_32_BIT: return "32-bit";
2035 case PGMMODE_PAE: return "PAE";
2036 case PGMMODE_PAE_NX: return "PAE+NX";
2037 case PGMMODE_AMD64: return "AMD64";
2038 case PGMMODE_AMD64_NX: return "AMD64+NX";
2039 case PGMMODE_NESTED: return "Nested";
2040 case PGMMODE_EPT: return "EPT";
2041 default: return "unknown mode value";
2042 }
2043}
2044
2045
2046/**
2047 * Check if the PGM lock is currently taken.
2048 *
2049 * @returns bool locked/not locked
2050 * @param pVM The VM to operate on.
2051 */
2052VMMDECL(bool) PGMIsLocked(PVM pVM)
2053{
2054 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2055}
2056
2057
2058/**
2059 * Check if this VCPU currently owns the PGM lock.
2060 *
2061 * @returns bool owner/not owner
2062 * @param pVM The VM to operate on.
2063 */
2064VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2065{
2066 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2067}
2068
2069
2070/**
2071 * Acquire the PGM lock.
2072 *
2073 * @returns VBox status code
2074 * @param pVM The VM to operate on.
2075 */
2076int pgmLock(PVM pVM)
2077{
2078 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2079#ifdef IN_RC
2080 if (rc == VERR_SEM_BUSY)
2081 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2082#elif defined(IN_RING0)
2083 if (rc == VERR_SEM_BUSY)
2084 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2085#endif
2086 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2087 return rc;
2088}
2089
2090
2091/**
2092 * Release the PGM lock.
2093 *
2094 * @returns VBox status code
2095 * @param pVM The VM to operate on.
2096 */
2097void pgmUnlock(PVM pVM)
2098{
2099 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2100}
2101
2102#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2103
2104/**
2105 * Temporarily maps one guest page specified by GC physical address.
2106 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2107 *
2108 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2109 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2110 *
2111 * @returns VBox status.
2112 * @param pVM VM handle.
2113 * @param GCPhys GC Physical address of the page.
2114 * @param ppv Where to store the address of the mapping.
2115 */
2116VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2117{
2118 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2119
2120 /*
2121 * Get the ram range.
2122 */
2123 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2124 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2125 pRam = pRam->CTX_SUFF(pNext);
2126 if (!pRam)
2127 {
2128 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2129 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2130 }
2131
2132 /*
2133 * Pass it on to PGMDynMapHCPage.
2134 */
2135 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2136 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2137#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2138 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2139#else
2140 PGMDynMapHCPage(pVM, HCPhys, ppv);
2141#endif
2142 return VINF_SUCCESS;
2143}
2144
2145
2146/**
2147 * Temporarily maps one guest page specified by unaligned GC physical address.
2148 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2149 *
2150 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2151 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2152 *
2153 * The caller is aware that only the speicifed page is mapped and that really bad things
2154 * will happen if writing beyond the page!
2155 *
2156 * @returns VBox status.
2157 * @param pVM VM handle.
2158 * @param GCPhys GC Physical address within the page to be mapped.
2159 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2160 */
2161VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2162{
2163 /*
2164 * Get the ram range.
2165 */
2166 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2167 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2168 pRam = pRam->CTX_SUFF(pNext);
2169 if (!pRam)
2170 {
2171 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2172 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2173 }
2174
2175 /*
2176 * Pass it on to PGMDynMapHCPage.
2177 */
2178 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2179#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2180 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2181#else
2182 PGMDynMapHCPage(pVM, HCPhys, ppv);
2183#endif
2184 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2185 return VINF_SUCCESS;
2186}
2187
2188# ifdef IN_RC
2189
2190/**
2191 * Temporarily maps one host page specified by HC physical address.
2192 *
2193 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2194 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2195 *
2196 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2197 * @param pVM VM handle.
2198 * @param HCPhys HC Physical address of the page.
2199 * @param ppv Where to store the address of the mapping. This is the
2200 * address of the PAGE not the exact address corresponding
2201 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2202 * page offset.
2203 */
2204VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2205{
2206 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2207
2208 /*
2209 * Check the cache.
2210 */
2211 register unsigned iCache;
2212 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2213 {
2214 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2215 {
2216 { 0, 9, 10, 11, 12, 13, 14, 15},
2217 { 0, 1, 10, 11, 12, 13, 14, 15},
2218 { 0, 1, 2, 11, 12, 13, 14, 15},
2219 { 0, 1, 2, 3, 12, 13, 14, 15},
2220 { 0, 1, 2, 3, 4, 13, 14, 15},
2221 { 0, 1, 2, 3, 4, 5, 14, 15},
2222 { 0, 1, 2, 3, 4, 5, 6, 15},
2223 { 0, 1, 2, 3, 4, 5, 6, 7},
2224 { 8, 1, 2, 3, 4, 5, 6, 7},
2225 { 8, 9, 2, 3, 4, 5, 6, 7},
2226 { 8, 9, 10, 3, 4, 5, 6, 7},
2227 { 8, 9, 10, 11, 4, 5, 6, 7},
2228 { 8, 9, 10, 11, 12, 5, 6, 7},
2229 { 8, 9, 10, 11, 12, 13, 6, 7},
2230 { 8, 9, 10, 11, 12, 13, 14, 7},
2231 { 8, 9, 10, 11, 12, 13, 14, 15},
2232 };
2233 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2234 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2235
2236 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2237 {
2238 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2239
2240 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2241 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2242 {
2243 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2244 *ppv = pv;
2245 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2246 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2247 return VINF_SUCCESS;
2248 }
2249 LogFlow(("Out of sync entry %d\n", iPage));
2250 }
2251 }
2252 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2253 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2254 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2255
2256 /*
2257 * Update the page tables.
2258 */
2259 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2260 unsigned i;
2261 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2262 {
2263 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2264 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2265 break;
2266 iPage++;
2267 }
2268 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2269
2270 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2271 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2272 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2273 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2274
2275 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2276 *ppv = pv;
2277 ASMInvalidatePage(pv);
2278 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2279 return VINF_SUCCESS;
2280}
2281
2282
2283/**
2284 * Temporarily lock a dynamic page to prevent it from being reused.
2285 *
2286 * @param pVM VM handle.
2287 * @param GCPage GC address of page
2288 */
2289VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2290{
2291 unsigned iPage;
2292
2293 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2294 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2295 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2296 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2297}
2298
2299
2300/**
2301 * Unlock a dynamic page
2302 *
2303 * @param pVM VM handle.
2304 * @param GCPage GC address of page
2305 */
2306VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2307{
2308 unsigned iPage;
2309
2310 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2311 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2312
2313 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2314 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2315 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2316 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2317 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2318}
2319
2320
2321# ifdef VBOX_STRICT
2322/**
2323 * Check for lock leaks.
2324 *
2325 * @param pVM VM handle.
2326 */
2327VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2328{
2329 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2330 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2331}
2332# endif /* VBOX_STRICT */
2333
2334# endif /* IN_RC */
2335#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2336
2337#if !defined(IN_R0) || defined(LOG_ENABLED)
2338
2339/** Format handler for PGMPAGE.
2340 * @copydoc FNRTSTRFORMATTYPE */
2341static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2342 const char *pszType, void const *pvValue,
2343 int cchWidth, int cchPrecision, unsigned fFlags,
2344 void *pvUser)
2345{
2346 size_t cch;
2347 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2348 if (VALID_PTR(pPage))
2349 {
2350 char szTmp[64+80];
2351
2352 cch = 0;
2353
2354 /* The single char state stuff. */
2355 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2356 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2357
2358#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2359 if (IS_PART_INCLUDED(5))
2360 {
2361 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2362 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2363 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2364 }
2365
2366 /* The type. */
2367 if (IS_PART_INCLUDED(4))
2368 {
2369 szTmp[cch++] = ':';
2370 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2371 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2372 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2373 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2374 }
2375
2376 /* The numbers. */
2377 if (IS_PART_INCLUDED(3))
2378 {
2379 szTmp[cch++] = ':';
2380 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2381 }
2382
2383 if (IS_PART_INCLUDED(2))
2384 {
2385 szTmp[cch++] = ':';
2386 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2387 }
2388
2389 if (IS_PART_INCLUDED(6))
2390 {
2391 szTmp[cch++] = ':';
2392 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2393 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2394 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2395 }
2396#undef IS_PART_INCLUDED
2397
2398 cch = pfnOutput(pvArgOutput, szTmp, cch);
2399 }
2400 else
2401 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2402 return cch;
2403}
2404
2405
2406/** Format handler for PGMRAMRANGE.
2407 * @copydoc FNRTSTRFORMATTYPE */
2408static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2409 const char *pszType, void const *pvValue,
2410 int cchWidth, int cchPrecision, unsigned fFlags,
2411 void *pvUser)
2412{
2413 size_t cch;
2414 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2415 if (VALID_PTR(pRam))
2416 {
2417 char szTmp[80];
2418 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2419 cch = pfnOutput(pvArgOutput, szTmp, cch);
2420 }
2421 else
2422 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2423 return cch;
2424}
2425
2426/** Format type andlers to be registered/deregistered. */
2427static const struct
2428{
2429 char szType[24];
2430 PFNRTSTRFORMATTYPE pfnHandler;
2431} g_aPgmFormatTypes[] =
2432{
2433 { "pgmpage", pgmFormatTypeHandlerPage },
2434 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2435};
2436
2437#endif /* !IN_R0 || LOG_ENABLED */
2438
2439
2440/**
2441 * Registers the global string format types.
2442 *
2443 * This should be called at module load time or in some other manner that ensure
2444 * that it's called exactly one time.
2445 *
2446 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2447 */
2448VMMDECL(int) PGMRegisterStringFormatTypes(void)
2449{
2450#if !defined(IN_R0) || defined(LOG_ENABLED)
2451 int rc = VINF_SUCCESS;
2452 unsigned i;
2453 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2454 {
2455 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2456# ifdef IN_RING0
2457 if (rc == VERR_ALREADY_EXISTS)
2458 {
2459 /* in case of cleanup failure in ring-0 */
2460 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2461 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2462 }
2463# endif
2464 }
2465 if (RT_FAILURE(rc))
2466 while (i-- > 0)
2467 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2468
2469 return rc;
2470#else
2471 return VINF_SUCCESS;
2472#endif
2473}
2474
2475
2476/**
2477 * Deregisters the global string format types.
2478 *
2479 * This should be called at module unload time or in some other manner that
2480 * ensure that it's called exactly one time.
2481 */
2482VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2483{
2484#if !defined(IN_R0) || defined(LOG_ENABLED)
2485 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2486 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2487#endif
2488}
2489
2490#ifdef VBOX_STRICT
2491
2492/**
2493 * Asserts that there are no mapping conflicts.
2494 *
2495 * @returns Number of conflicts.
2496 * @param pVM The VM Handle.
2497 */
2498VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2499{
2500 unsigned cErrors = 0;
2501
2502 /* Only applies to raw mode -> 1 VPCU */
2503 Assert(pVM->cCPUs == 1);
2504 PVMCPU pVCpu = &pVM->aCpus[0];
2505
2506 /*
2507 * Check for mapping conflicts.
2508 */
2509 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2510 pMapping;
2511 pMapping = pMapping->CTX_SUFF(pNext))
2512 {
2513 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2514 for (RTGCPTR GCPtr = pMapping->GCPtr;
2515 GCPtr <= pMapping->GCPtrLast;
2516 GCPtr += PAGE_SIZE)
2517 {
2518 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2519 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2520 {
2521 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2522 cErrors++;
2523 break;
2524 }
2525 }
2526 }
2527
2528 return cErrors;
2529}
2530
2531
2532/**
2533 * Asserts that everything related to the guest CR3 is correctly shadowed.
2534 *
2535 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2536 * and assert the correctness of the guest CR3 mapping before asserting that the
2537 * shadow page tables is in sync with the guest page tables.
2538 *
2539 * @returns Number of conflicts.
2540 * @param pVM The VM Handle.
2541 * @param pVCpu VMCPU handle.
2542 * @param cr3 The current guest CR3 register value.
2543 * @param cr4 The current guest CR4 register value.
2544 */
2545VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2546{
2547 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2548 pgmLock(pVM);
2549 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2550 pgmUnlock(pVM);
2551 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2552 return cErrors;
2553}
2554
2555#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette