VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 20671

Last change on this file since 20671 was 20671, checked in by vboxsync, 16 years ago

Bigger lock for the pagefault handler.
Avoid deadlocks when syncing notification handlers with our recompiler.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.5 KB
Line 
1/* $Id: PGMAll.cpp 20671 2009-06-17 15:23:14Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The VMCPU handle. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75
76/*
77 * Shadow - 32-bit mode
78 */
79#define PGM_SHW_TYPE PGM_TYPE_32BIT
80#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
81#include "PGMAllShw.h"
82
83/* Guest - real mode */
84#define PGM_GST_TYPE PGM_TYPE_REAL
85#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
86#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
87#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
88#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
89#include "PGMGstDefs.h"
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef BTH_PGMPOOLKIND_ROOT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - protected mode */
99#define PGM_GST_TYPE PGM_TYPE_PROT
100#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
103#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
104#include "PGMGstDefs.h"
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef BTH_PGMPOOLKIND_ROOT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113/* Guest - 32-bit mode */
114#define PGM_GST_TYPE PGM_TYPE_32BIT
115#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
116#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
117#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
118#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
119#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
120#include "PGMGstDefs.h"
121#include "PGMAllGst.h"
122#include "PGMAllBth.h"
123#undef BTH_PGMPOOLKIND_PT_FOR_BIG
124#undef BTH_PGMPOOLKIND_PT_FOR_PT
125#undef BTH_PGMPOOLKIND_ROOT
126#undef PGM_BTH_NAME
127#undef PGM_GST_TYPE
128#undef PGM_GST_NAME
129
130#undef PGM_SHW_TYPE
131#undef PGM_SHW_NAME
132
133
134/*
135 * Shadow - PAE mode
136 */
137#define PGM_SHW_TYPE PGM_TYPE_PAE
138#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
140#include "PGMAllShw.h"
141
142/* Guest - real mode */
143#define PGM_GST_TYPE PGM_TYPE_REAL
144#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
147#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
148#include "PGMGstDefs.h"
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - 32-bit mode */
171#define PGM_GST_TYPE PGM_TYPE_32BIT
172#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
175#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
176#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
177#include "PGMGstDefs.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef BTH_PGMPOOLKIND_ROOT
182#undef PGM_BTH_NAME
183#undef PGM_GST_TYPE
184#undef PGM_GST_NAME
185
186
187/* Guest - PAE mode */
188#define PGM_GST_TYPE PGM_TYPE_PAE
189#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
194#include "PGMGstDefs.h"
195#include "PGMAllGst.h"
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_BIG
198#undef BTH_PGMPOOLKIND_PT_FOR_PT
199#undef BTH_PGMPOOLKIND_ROOT
200#undef PGM_BTH_NAME
201#undef PGM_GST_TYPE
202#undef PGM_GST_NAME
203
204#undef PGM_SHW_TYPE
205#undef PGM_SHW_NAME
206
207
208#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
209/*
210 * Shadow - AMD64 mode
211 */
212# define PGM_SHW_TYPE PGM_TYPE_AMD64
213# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
214# include "PGMAllShw.h"
215
216/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
217# define PGM_GST_TYPE PGM_TYPE_PROT
218# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
219# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
220# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
221# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
222# include "PGMGstDefs.h"
223# include "PGMAllBth.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_PT
225# undef BTH_PGMPOOLKIND_ROOT
226# undef PGM_BTH_NAME
227# undef PGM_GST_TYPE
228# undef PGM_GST_NAME
229
230# ifdef VBOX_WITH_64_BITS_GUESTS
231/* Guest - AMD64 mode */
232# define PGM_GST_TYPE PGM_TYPE_AMD64
233# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
234# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
235# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
236# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
237# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
238# include "PGMGstDefs.h"
239# include "PGMAllGst.h"
240# include "PGMAllBth.h"
241# undef BTH_PGMPOOLKIND_PT_FOR_BIG
242# undef BTH_PGMPOOLKIND_PT_FOR_PT
243# undef BTH_PGMPOOLKIND_ROOT
244# undef PGM_BTH_NAME
245# undef PGM_GST_TYPE
246# undef PGM_GST_NAME
247# endif /* VBOX_WITH_64_BITS_GUESTS */
248
249# undef PGM_SHW_TYPE
250# undef PGM_SHW_NAME
251
252
253/*
254 * Shadow - Nested paging mode
255 */
256# define PGM_SHW_TYPE PGM_TYPE_NESTED
257# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
258# include "PGMAllShw.h"
259
260/* Guest - real mode */
261# define PGM_GST_TYPE PGM_TYPE_REAL
262# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
263# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
264# include "PGMGstDefs.h"
265# include "PGMAllBth.h"
266# undef PGM_BTH_NAME
267# undef PGM_GST_TYPE
268# undef PGM_GST_NAME
269
270/* Guest - protected mode */
271# define PGM_GST_TYPE PGM_TYPE_PROT
272# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
273# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
274# include "PGMGstDefs.h"
275# include "PGMAllBth.h"
276# undef PGM_BTH_NAME
277# undef PGM_GST_TYPE
278# undef PGM_GST_NAME
279
280/* Guest - 32-bit mode */
281# define PGM_GST_TYPE PGM_TYPE_32BIT
282# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289
290/* Guest - PAE mode */
291# define PGM_GST_TYPE PGM_TYPE_PAE
292# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
293# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
294# include "PGMGstDefs.h"
295# include "PGMAllBth.h"
296# undef PGM_BTH_NAME
297# undef PGM_GST_TYPE
298# undef PGM_GST_NAME
299
300# ifdef VBOX_WITH_64_BITS_GUESTS
301/* Guest - AMD64 mode */
302# define PGM_GST_TYPE PGM_TYPE_AMD64
303# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
304# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
305# include "PGMGstDefs.h"
306# include "PGMAllBth.h"
307# undef PGM_BTH_NAME
308# undef PGM_GST_TYPE
309# undef PGM_GST_NAME
310# endif /* VBOX_WITH_64_BITS_GUESTS */
311
312# undef PGM_SHW_TYPE
313# undef PGM_SHW_NAME
314
315
316/*
317 * Shadow - EPT
318 */
319# define PGM_SHW_TYPE PGM_TYPE_EPT
320# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
321# include "PGMAllShw.h"
322
323/* Guest - real mode */
324# define PGM_GST_TYPE PGM_TYPE_REAL
325# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
326# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
327# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
328# include "PGMGstDefs.h"
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - protected mode */
336# define PGM_GST_TYPE PGM_TYPE_PROT
337# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMGstDefs.h"
341# include "PGMAllBth.h"
342# undef BTH_PGMPOOLKIND_PT_FOR_PT
343# undef PGM_BTH_NAME
344# undef PGM_GST_TYPE
345# undef PGM_GST_NAME
346
347/* Guest - 32-bit mode */
348# define PGM_GST_TYPE PGM_TYPE_32BIT
349# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
350# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
351# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
352# include "PGMGstDefs.h"
353# include "PGMAllBth.h"
354# undef BTH_PGMPOOLKIND_PT_FOR_PT
355# undef PGM_BTH_NAME
356# undef PGM_GST_TYPE
357# undef PGM_GST_NAME
358
359/* Guest - PAE mode */
360# define PGM_GST_TYPE PGM_TYPE_PAE
361# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
363# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef BTH_PGMPOOLKIND_PT_FOR_PT
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370
371# ifdef VBOX_WITH_64_BITS_GUESTS
372/* Guest - AMD64 mode */
373# define PGM_GST_TYPE PGM_TYPE_AMD64
374# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
375# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
376# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
377# include "PGMGstDefs.h"
378# include "PGMAllBth.h"
379# undef BTH_PGMPOOLKIND_PT_FOR_PT
380# undef PGM_BTH_NAME
381# undef PGM_GST_TYPE
382# undef PGM_GST_NAME
383# endif /* VBOX_WITH_64_BITS_GUESTS */
384
385# undef PGM_SHW_TYPE
386# undef PGM_SHW_NAME
387
388#endif /* !IN_RC */
389
390
391#ifndef IN_RING3
392/**
393 * #PF Handler.
394 *
395 * @returns VBox status code (appropriate for trap handling and GC return).
396 * @param pVCpu VMCPU handle.
397 * @param uErr The trap error code.
398 * @param pRegFrame Trap register frame.
399 * @param pvFault The fault address.
400 */
401VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
402{
403 PVM pVM = pVCpu->CTX_SUFF(pVM);
404
405 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
406 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
407 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
408
409
410#ifdef VBOX_WITH_STATISTICS
411 /*
412 * Error code stats.
413 */
414 if (uErr & X86_TRAP_PF_US)
415 {
416 if (!(uErr & X86_TRAP_PF_P))
417 {
418 if (uErr & X86_TRAP_PF_RW)
419 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
420 else
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
422 }
423 else if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
425 else if (uErr & X86_TRAP_PF_RSVD)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
427 else if (uErr & X86_TRAP_PF_ID)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
429 else
430 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
431 }
432 else
433 { /* Supervisor */
434 if (!(uErr & X86_TRAP_PF_P))
435 {
436 if (uErr & X86_TRAP_PF_RW)
437 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
438 else
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
440 }
441 else if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
443 else if (uErr & X86_TRAP_PF_ID)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
445 else if (uErr & X86_TRAP_PF_RSVD)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
447 }
448#endif /* VBOX_WITH_STATISTICS */
449
450 /*
451 * Call the worker.
452 */
453 pgmLock(pVM);
454 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
455 Assert(PGMIsLockOwner(pVM));
456 pgmUnlock(pVM);
457 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
458 rc = VINF_SUCCESS;
459 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
460 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
461 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
462 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
463 return rc;
464}
465#endif /* !IN_RING3 */
466
467
468/**
469 * Prefetch a page
470 *
471 * Typically used to sync commonly used pages before entering raw mode
472 * after a CR3 reload.
473 *
474 * @returns VBox status code suitable for scheduling.
475 * @retval VINF_SUCCESS on success.
476 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
477 * @param pVCpu VMCPU handle.
478 * @param GCPtrPage Page to invalidate.
479 */
480VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
481{
482 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
483 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
484 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
485 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
486 return rc;
487}
488
489
490/**
491 * Gets the mapping corresponding to the specified address (if any).
492 *
493 * @returns Pointer to the mapping.
494 * @returns NULL if not
495 *
496 * @param pVM The virtual machine.
497 * @param GCPtr The guest context pointer.
498 */
499PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
500{
501 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
502 while (pMapping)
503 {
504 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
505 break;
506 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
507 return pMapping;
508 pMapping = pMapping->CTX_SUFF(pNext);
509 }
510 return NULL;
511}
512
513
514/**
515 * Verifies a range of pages for read or write access
516 *
517 * Only checks the guest's page tables
518 *
519 * @returns VBox status code.
520 * @param pVCpu VMCPU handle.
521 * @param Addr Guest virtual address to check
522 * @param cbSize Access size
523 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
524 * @remarks Current not in use.
525 */
526VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
527{
528 /*
529 * Validate input.
530 */
531 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
532 {
533 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
534 return VERR_INVALID_PARAMETER;
535 }
536
537 uint64_t fPage;
538 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
539 if (RT_FAILURE(rc))
540 {
541 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
542 return VINF_EM_RAW_GUEST_TRAP;
543 }
544
545 /*
546 * Check if the access would cause a page fault
547 *
548 * Note that hypervisor page directories are not present in the guest's tables, so this check
549 * is sufficient.
550 */
551 bool fWrite = !!(fAccess & X86_PTE_RW);
552 bool fUser = !!(fAccess & X86_PTE_US);
553 if ( !(fPage & X86_PTE_P)
554 || (fWrite && !(fPage & X86_PTE_RW))
555 || (fUser && !(fPage & X86_PTE_US)) )
556 {
557 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
558 return VINF_EM_RAW_GUEST_TRAP;
559 }
560 if ( RT_SUCCESS(rc)
561 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
562 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
563 return rc;
564}
565
566
567/**
568 * Verifies a range of pages for read or write access
569 *
570 * Supports handling of pages marked for dirty bit tracking and CSAM
571 *
572 * @returns VBox status code.
573 * @param pVCpu VMCPU handle.
574 * @param Addr Guest virtual address to check
575 * @param cbSize Access size
576 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
577 */
578VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
579{
580 PVM pVM = pVCpu->CTX_SUFF(pVM);
581
582 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
583
584 /*
585 * Get going.
586 */
587 uint64_t fPageGst;
588 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
589 if (RT_FAILURE(rc))
590 {
591 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
592 return VINF_EM_RAW_GUEST_TRAP;
593 }
594
595 /*
596 * Check if the access would cause a page fault
597 *
598 * Note that hypervisor page directories are not present in the guest's tables, so this check
599 * is sufficient.
600 */
601 const bool fWrite = !!(fAccess & X86_PTE_RW);
602 const bool fUser = !!(fAccess & X86_PTE_US);
603 if ( !(fPageGst & X86_PTE_P)
604 || (fWrite && !(fPageGst & X86_PTE_RW))
605 || (fUser && !(fPageGst & X86_PTE_US)) )
606 {
607 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
608 return VINF_EM_RAW_GUEST_TRAP;
609 }
610
611 if (!HWACCMIsNestedPagingActive(pVM))
612 {
613 /*
614 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
615 */
616 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
617 if ( rc == VERR_PAGE_NOT_PRESENT
618 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
619 {
620 /*
621 * Page is not present in our page tables.
622 * Try to sync it!
623 */
624 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
625 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
626 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
627 if (rc != VINF_SUCCESS)
628 return rc;
629 }
630 else
631 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
632 }
633
634#if 0 /* def VBOX_STRICT; triggers too often now */
635 /*
636 * This check is a bit paranoid, but useful.
637 */
638 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
639 uint64_t fPageShw;
640 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
641 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
642 || (fWrite && !(fPageShw & X86_PTE_RW))
643 || (fUser && !(fPageShw & X86_PTE_US)) )
644 {
645 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
646 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
647 return VINF_EM_RAW_GUEST_TRAP;
648 }
649#endif
650
651 if ( RT_SUCCESS(rc)
652 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
653 || Addr + cbSize < Addr))
654 {
655 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
656 for (;;)
657 {
658 Addr += PAGE_SIZE;
659 if (cbSize > PAGE_SIZE)
660 cbSize -= PAGE_SIZE;
661 else
662 cbSize = 1;
663 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
664 if (rc != VINF_SUCCESS)
665 break;
666 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
667 break;
668 }
669 }
670 return rc;
671}
672
673
674/**
675 * Emulation of the invlpg instruction (HC only actually).
676 *
677 * @returns VBox status code, special care required.
678 * @retval VINF_PGM_SYNC_CR3 - handled.
679 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
680 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
681 *
682 * @param pVCpu VMCPU handle.
683 * @param GCPtrPage Page to invalidate.
684 *
685 * @remark ASSUMES the page table entry or page directory is valid. Fairly
686 * safe, but there could be edge cases!
687 *
688 * @todo Flush page or page directory only if necessary!
689 */
690VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
691{
692 PVM pVM = pVCpu->CTX_SUFF(pVM);
693 int rc;
694 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
695
696#ifndef IN_RING3
697 /*
698 * Notify the recompiler so it can record this instruction.
699 * Failure happens when it's out of space. We'll return to HC in that case.
700 */
701 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
702 if (rc != VINF_SUCCESS)
703 return rc;
704#endif /* !IN_RING3 */
705
706
707#ifdef IN_RC
708 /*
709 * Check for conflicts and pending CR3 monitoring updates.
710 */
711 if (!pVM->pgm.s.fMappingsFixed)
712 {
713 if ( pgmGetMapping(pVM, GCPtrPage)
714 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
715 {
716 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
717 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
718 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
719 return VINF_PGM_SYNC_CR3;
720 }
721
722 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
723 {
724 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
725 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
726 return VINF_EM_RAW_EMULATE_INSTR;
727 }
728 }
729#endif /* IN_RC */
730
731 /*
732 * Call paging mode specific worker.
733 */
734 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
735 pgmLock(pVM);
736 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
737 pgmUnlock(pVM);
738 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
739
740#ifdef IN_RING3
741 /*
742 * Check if we have a pending update of the CR3 monitoring.
743 */
744 if ( RT_SUCCESS(rc)
745 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
746 {
747 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
748 Assert(!pVM->pgm.s.fMappingsFixed);
749 }
750
751 /*
752 * Inform CSAM about the flush
753 *
754 * Note: This is to check if monitored pages have been changed; when we implement
755 * callbacks for virtual handlers, this is no longer required.
756 */
757 CSAMR3FlushPage(pVM, GCPtrPage);
758#endif /* IN_RING3 */
759 return rc;
760}
761
762
763/**
764 * Executes an instruction using the interpreter.
765 *
766 * @returns VBox status code (appropriate for trap handling and GC return).
767 * @param pVM VM handle.
768 * @param pVCpu VMCPU handle.
769 * @param pRegFrame Register frame.
770 * @param pvFault Fault address.
771 */
772VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
773{
774 uint32_t cb;
775 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
776 if (rc == VERR_EM_INTERPRETER)
777 rc = VINF_EM_RAW_EMULATE_INSTR;
778 if (rc != VINF_SUCCESS)
779 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
780 return rc;
781}
782
783
784/**
785 * Gets effective page information (from the VMM page directory).
786 *
787 * @returns VBox status.
788 * @param pVCpu VMCPU handle.
789 * @param GCPtr Guest Context virtual address of the page.
790 * @param pfFlags Where to store the flags. These are X86_PTE_*.
791 * @param pHCPhys Where to store the HC physical address of the page.
792 * This is page aligned.
793 * @remark You should use PGMMapGetPage() for pages in a mapping.
794 */
795VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
796{
797 pgmLock(pVCpu->CTX_SUFF(pVM));
798 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
799 pgmUnlock(pVCpu->CTX_SUFF(pVM));
800 return rc;
801}
802
803
804/**
805 * Sets (replaces) the page flags for a range of pages in the shadow context.
806 *
807 * @returns VBox status.
808 * @param pVCpu VMCPU handle.
809 * @param GCPtr The address of the first page.
810 * @param cb The size of the range in bytes.
811 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
812 * @remark You must use PGMMapSetPage() for pages in a mapping.
813 */
814VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
815{
816 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
817}
818
819
820/**
821 * Modify page flags for a range of pages in the shadow context.
822 *
823 * The existing flags are ANDed with the fMask and ORed with the fFlags.
824 *
825 * @returns VBox status code.
826 * @param pVCpu VMCPU handle.
827 * @param GCPtr Virtual address of the first page in the range.
828 * @param cb Size (in bytes) of the range to apply the modification to.
829 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
830 * @param fMask The AND mask - page flags X86_PTE_*.
831 * Be very CAREFUL when ~'ing constants which could be 32-bit!
832 * @remark You must use PGMMapModifyPage() for pages in a mapping.
833 */
834VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
835{
836 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
837 Assert(cb);
838
839 /*
840 * Align the input.
841 */
842 cb += GCPtr & PAGE_OFFSET_MASK;
843 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
844 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
845
846 /*
847 * Call worker.
848 */
849 PVM pVM = pVCpu->CTX_SUFF(pVM);
850 pgmLock(pVM);
851 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
852 pgmUnlock(pVM);
853 return rc;
854}
855
856/**
857 * Gets the shadow page directory for the specified address, PAE.
858 *
859 * @returns Pointer to the shadow PD.
860 * @param pVCpu The VMCPU handle.
861 * @param GCPtr The address.
862 * @param pGstPdpe Guest PDPT entry
863 * @param ppPD Receives address of page directory
864 */
865int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
866{
867 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
868 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
869 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
870 PVM pVM = pVCpu->CTX_SUFF(pVM);
871 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
872 PPGMPOOLPAGE pShwPage;
873 int rc;
874
875 /* Allocate page directory if not present. */
876 if ( !pPdpe->n.u1Present
877 && !(pPdpe->u & X86_PDPE_PG_MASK))
878 {
879 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
880 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
881 RTGCPTR64 GCPdPt;
882 PGMPOOLKIND enmKind;
883
884# if defined(IN_RC)
885 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
886 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
887# endif
888
889 if (fNestedPaging || !fPaging)
890 {
891 /* AMD-V nested paging or real/protected mode without paging */
892 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
893 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
894 }
895 else
896 {
897 Assert(pGstPdpe);
898
899 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
900 {
901 if (!pGstPdpe->n.u1Present)
902 {
903 /* PD not present; guest must reload CR3 to change it.
904 * No need to monitor anything in this case.
905 */
906 Assert(!HWACCMIsEnabled(pVM));
907
908 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
909 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
910 pGstPdpe->n.u1Present = 1;
911 }
912 else
913 {
914 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
915 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
916 }
917 }
918 else
919 {
920 GCPdPt = CPUMGetGuestCR3(pVCpu);
921 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
922 }
923 }
924
925 /* Create a reference back to the PDPT by using the index in its shadow page. */
926 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
927 AssertRCReturn(rc, rc);
928
929 /* The PD was cached or created; hook it up now. */
930 pPdpe->u |= pShwPage->Core.Key
931 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
932
933# if defined(IN_RC)
934 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
935 * non-present PDPT will continue to cause page faults.
936 */
937 ASMReloadCR3();
938 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
939# endif
940 }
941 else
942 {
943 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
944 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
945 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
946
947 pgmPoolCacheUsed(pPool, pShwPage);
948 }
949 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
950 return VINF_SUCCESS;
951}
952
953
954/**
955 * Gets the pointer to the shadow page directory entry for an address, PAE.
956 *
957 * @returns Pointer to the PDE.
958 * @param pPGM Pointer to the PGMCPU instance data.
959 * @param GCPtr The address.
960 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
961 */
962DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
963{
964 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
965 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
966 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
967 if (!pPdpt->a[iPdPt].n.u1Present)
968 {
969 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
970 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
971 }
972 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
973
974 /* Fetch the pgm pool shadow descriptor. */
975 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
976 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
977
978 *ppShwPde = pShwPde;
979 return VINF_SUCCESS;
980}
981
982#ifndef IN_RC
983
984/**
985 * Syncs the SHADOW page directory pointer for the specified address.
986 *
987 * Allocates backing pages in case the PDPT or PML4 entry is missing.
988 *
989 * The caller is responsible for making sure the guest has a valid PD before
990 * calling this function.
991 *
992 * @returns VBox status.
993 * @param pVCpu VMCPU handle.
994 * @param GCPtr The address.
995 * @param pGstPml4e Guest PML4 entry
996 * @param pGstPdpe Guest PDPT entry
997 * @param ppPD Receives address of page directory
998 */
999int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1000{
1001 PPGMCPU pPGM = &pVCpu->pgm.s;
1002 PVM pVM = pVCpu->CTX_SUFF(pVM);
1003 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1004 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1005 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1006 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1007 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
1008 PPGMPOOLPAGE pShwPage;
1009 int rc;
1010
1011 /* Allocate page directory pointer table if not present. */
1012 if ( !pPml4e->n.u1Present
1013 && !(pPml4e->u & X86_PML4E_PG_MASK))
1014 {
1015 RTGCPTR64 GCPml4;
1016 PGMPOOLKIND enmKind;
1017
1018 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1019
1020 if (fNestedPaging || !fPaging)
1021 {
1022 /* AMD-V nested paging or real/protected mode without paging */
1023 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1024 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1025 }
1026 else
1027 {
1028 Assert(pGstPml4e && pGstPdpe);
1029
1030 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1031 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1032 }
1033
1034 /* Create a reference back to the PDPT by using the index in its shadow page. */
1035 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1036 AssertRCReturn(rc, rc);
1037 }
1038 else
1039 {
1040 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1041 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1042
1043 pgmPoolCacheUsed(pPool, pShwPage);
1044 }
1045 /* The PDPT was cached or created; hook it up now. */
1046 pPml4e->u |= pShwPage->Core.Key
1047 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1048
1049 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1050 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1051 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1052
1053 /* Allocate page directory if not present. */
1054 if ( !pPdpe->n.u1Present
1055 && !(pPdpe->u & X86_PDPE_PG_MASK))
1056 {
1057 RTGCPTR64 GCPdPt;
1058 PGMPOOLKIND enmKind;
1059
1060 if (fNestedPaging || !fPaging)
1061 {
1062 /* AMD-V nested paging or real/protected mode without paging */
1063 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1064 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1065 }
1066 else
1067 {
1068 Assert(pGstPdpe);
1069
1070 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1071 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1072 }
1073
1074 /* Create a reference back to the PDPT by using the index in its shadow page. */
1075 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1076 AssertRCReturn(rc, rc);
1077 }
1078 else
1079 {
1080 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1081 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1082
1083 pgmPoolCacheUsed(pPool, pShwPage);
1084 }
1085 /* The PD was cached or created; hook it up now. */
1086 pPdpe->u |= pShwPage->Core.Key
1087 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1088
1089 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1090 return VINF_SUCCESS;
1091}
1092
1093
1094/**
1095 * Gets the SHADOW page directory pointer for the specified address (long mode).
1096 *
1097 * @returns VBox status.
1098 * @param pVCpu VMCPU handle.
1099 * @param GCPtr The address.
1100 * @param ppPdpt Receives address of pdpt
1101 * @param ppPD Receives address of page directory
1102 */
1103DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1104{
1105 PPGMCPU pPGM = &pVCpu->pgm.s;
1106 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1107 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1108 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1109 if (ppPml4e)
1110 *ppPml4e = (PX86PML4E)pPml4e;
1111
1112 Log4(("pgmShwGetLongModePDPtr %VGv (%VHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1113
1114 if (!pPml4e->n.u1Present)
1115 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1116
1117 PVM pVM = pVCpu->CTX_SUFF(pVM);
1118 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1119 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1120 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1121
1122 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1123 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1124 if (!pPdpt->a[iPdPt].n.u1Present)
1125 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1126
1127 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1128 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1129
1130 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1131 return VINF_SUCCESS;
1132}
1133
1134
1135/**
1136 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1137 * backing pages in case the PDPT or PML4 entry is missing.
1138 *
1139 * @returns VBox status.
1140 * @param pVCpu VMCPU handle.
1141 * @param GCPtr The address.
1142 * @param ppPdpt Receives address of pdpt
1143 * @param ppPD Receives address of page directory
1144 */
1145int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1146{
1147 PPGMCPU pPGM = &pVCpu->pgm.s;
1148 PVM pVM = pVCpu->CTX_SUFF(pVM);
1149 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1150 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1151 PEPTPML4 pPml4;
1152 PEPTPML4E pPml4e;
1153 PPGMPOOLPAGE pShwPage;
1154 int rc;
1155
1156 Assert(HWACCMIsNestedPagingActive(pVM));
1157
1158 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1159 Assert(pPml4);
1160
1161 /* Allocate page directory pointer table if not present. */
1162 pPml4e = &pPml4->a[iPml4];
1163 if ( !pPml4e->n.u1Present
1164 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1165 {
1166 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1167 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1168
1169 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1170 AssertRCReturn(rc, rc);
1171 }
1172 else
1173 {
1174 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1175 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1176
1177 pgmPoolCacheUsed(pPool, pShwPage);
1178 }
1179 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1180 pPml4e->u = pShwPage->Core.Key;
1181 pPml4e->n.u1Present = 1;
1182 pPml4e->n.u1Write = 1;
1183 pPml4e->n.u1Execute = 1;
1184
1185 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1186 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1187 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1188
1189 if (ppPdpt)
1190 *ppPdpt = pPdpt;
1191
1192 /* Allocate page directory if not present. */
1193 if ( !pPdpe->n.u1Present
1194 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1195 {
1196 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1197
1198 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1199 AssertRCReturn(rc, rc);
1200 }
1201 else
1202 {
1203 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1204 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1205
1206 pgmPoolCacheUsed(pPool, pShwPage);
1207 }
1208 /* The PD was cached or created; hook it up now and fill with the default value. */
1209 pPdpe->u = pShwPage->Core.Key;
1210 pPdpe->n.u1Present = 1;
1211 pPdpe->n.u1Write = 1;
1212 pPdpe->n.u1Execute = 1;
1213
1214 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1215 return VINF_SUCCESS;
1216}
1217
1218#endif /* IN_RC */
1219
1220/**
1221 * Gets effective Guest OS page information.
1222 *
1223 * When GCPtr is in a big page, the function will return as if it was a normal
1224 * 4KB page. If the need for distinguishing between big and normal page becomes
1225 * necessary at a later point, a PGMGstGetPage() will be created for that
1226 * purpose.
1227 *
1228 * @returns VBox status.
1229 * @param pVCpu VMCPU handle.
1230 * @param GCPtr Guest Context virtual address of the page.
1231 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1232 * @param pGCPhys Where to store the GC physical address of the page.
1233 * This is page aligned. The fact that the
1234 */
1235VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1236{
1237 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1238}
1239
1240
1241/**
1242 * Checks if the page is present.
1243 *
1244 * @returns true if the page is present.
1245 * @returns false if the page is not present.
1246 * @param pVCpu VMCPU handle.
1247 * @param GCPtr Address within the page.
1248 */
1249VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1250{
1251 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1252 return RT_SUCCESS(rc);
1253}
1254
1255
1256/**
1257 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1258 *
1259 * @returns VBox status.
1260 * @param pVCpu VMCPU handle.
1261 * @param GCPtr The address of the first page.
1262 * @param cb The size of the range in bytes.
1263 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1264 */
1265VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1266{
1267 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1268}
1269
1270
1271/**
1272 * Modify page flags for a range of pages in the guest's tables
1273 *
1274 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1275 *
1276 * @returns VBox status code.
1277 * @param pVCpu VMCPU handle.
1278 * @param GCPtr Virtual address of the first page in the range.
1279 * @param cb Size (in bytes) of the range to apply the modification to.
1280 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1281 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1282 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1283 */
1284VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1285{
1286 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1287
1288 /*
1289 * Validate input.
1290 */
1291 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1292 Assert(cb);
1293
1294 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1295
1296 /*
1297 * Adjust input.
1298 */
1299 cb += GCPtr & PAGE_OFFSET_MASK;
1300 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1301 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1302
1303 /*
1304 * Call worker.
1305 */
1306 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1307
1308 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1309 return rc;
1310}
1311
1312#ifdef IN_RING3
1313
1314/**
1315 * Performs the lazy mapping of the 32-bit guest PD.
1316 *
1317 * @returns Pointer to the mapping.
1318 * @param pPGM The PGM instance data.
1319 */
1320PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1321{
1322 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1323 PVM pVM = PGMCPU2VM(pPGM);
1324 pgmLock(pVM);
1325
1326 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1327 AssertReturn(pPage, NULL);
1328
1329 RTHCPTR HCPtrGuestCR3;
1330 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1331 AssertRCReturn(rc, NULL);
1332
1333 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1334# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1335 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1336# endif
1337
1338 pgmUnlock(pVM);
1339 return pPGM->CTX_SUFF(pGst32BitPd);
1340}
1341
1342
1343/**
1344 * Performs the lazy mapping of the PAE guest PDPT.
1345 *
1346 * @returns Pointer to the mapping.
1347 * @param pPGM The PGM instance data.
1348 */
1349PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1350{
1351 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1352 PVM pVM = PGMCPU2VM(pPGM);
1353 pgmLock(pVM);
1354
1355 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1356 AssertReturn(pPage, NULL);
1357
1358 RTHCPTR HCPtrGuestCR3;
1359 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3);
1360 AssertRCReturn(rc, NULL);
1361
1362 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1363# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1364 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1365# endif
1366
1367 pgmUnlock(pVM);
1368 return pPGM->CTX_SUFF(pGstPaePdpt);
1369}
1370
1371#endif /* IN_RING3 */
1372
1373#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1374/**
1375 * Performs the lazy mapping / updating of a PAE guest PD.
1376 *
1377 * @returns Pointer to the mapping.
1378 * @param pPGM The PGM instance data.
1379 * @param iPdpt Which PD entry to map (0..3).
1380 */
1381PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1382{
1383 PVM pVM = PGMCPU2VM(pPGM);
1384 pgmLock(pVM);
1385
1386 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1387 Assert(pGuestPDPT);
1388 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1389 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1390 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1391
1392 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1393 if (RT_LIKELY(pPage))
1394 {
1395 int rc = VINF_SUCCESS;
1396 RTRCPTR RCPtr = NIL_RTRCPTR;
1397 RTHCPTR HCPtr = NIL_RTHCPTR;
1398#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1399 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1400 AssertRC(rc);
1401#endif
1402 if (RT_SUCCESS(rc) && fChanged)
1403 {
1404 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1405 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1406 }
1407 if (RT_SUCCESS(rc))
1408 {
1409 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1410# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1411 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1412# endif
1413 if (fChanged)
1414 {
1415 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1416 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1417 }
1418
1419 pgmUnlock(pVM);
1420 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1421 }
1422 }
1423
1424 /* Invalid page or some failure, invalidate the entry. */
1425 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1426 pPGM->apGstPaePDsR3[iPdpt] = 0;
1427# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1428 pPGM->apGstPaePDsR0[iPdpt] = 0;
1429# endif
1430 pPGM->apGstPaePDsRC[iPdpt] = 0;
1431
1432 pgmUnlock(pVM);
1433 return NULL;
1434}
1435#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1436
1437
1438#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1439/**
1440 * Performs the lazy mapping of the 32-bit guest PD.
1441 *
1442 * @returns Pointer to the mapping.
1443 * @param pPGM The PGM instance data.
1444 */
1445PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1446{
1447 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1448 PVM pVM = PGMCPU2VM(pPGM);
1449 pgmLock(pVM);
1450
1451 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1452 AssertReturn(pPage, NULL);
1453
1454 RTHCPTR HCPtrGuestCR3;
1455 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3);
1456 AssertRCReturn(rc, NULL);
1457
1458 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1459# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1460 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1461# endif
1462
1463 pgmUnlock(pVM);
1464 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1465}
1466#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1467
1468
1469/**
1470 * Gets the specified page directory pointer table entry.
1471 *
1472 * @returns PDP entry
1473 * @param pVCpu VMCPU handle.
1474 * @param iPdpt PDPT index
1475 */
1476VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1477{
1478 Assert(iPdpt <= 3);
1479 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1480}
1481
1482
1483/**
1484 * Gets the current CR3 register value for the shadow memory context.
1485 * @returns CR3 value.
1486 * @param pVCpu VMCPU handle.
1487 */
1488VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1489{
1490 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1491 AssertPtrReturn(pPoolPage, 0);
1492 return pPoolPage->Core.Key;
1493}
1494
1495
1496/**
1497 * Gets the current CR3 register value for the nested memory context.
1498 * @returns CR3 value.
1499 * @param pVCpu VMCPU handle.
1500 */
1501VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1502{
1503 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1504 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1505}
1506
1507
1508/**
1509 * Gets the current CR3 register value for the HC intermediate memory context.
1510 * @returns CR3 value.
1511 * @param pVM The VM handle.
1512 */
1513VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1514{
1515 switch (pVM->pgm.s.enmHostMode)
1516 {
1517 case SUPPAGINGMODE_32_BIT:
1518 case SUPPAGINGMODE_32_BIT_GLOBAL:
1519 return pVM->pgm.s.HCPhysInterPD;
1520
1521 case SUPPAGINGMODE_PAE:
1522 case SUPPAGINGMODE_PAE_GLOBAL:
1523 case SUPPAGINGMODE_PAE_NX:
1524 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1525 return pVM->pgm.s.HCPhysInterPaePDPT;
1526
1527 case SUPPAGINGMODE_AMD64:
1528 case SUPPAGINGMODE_AMD64_GLOBAL:
1529 case SUPPAGINGMODE_AMD64_NX:
1530 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1531 return pVM->pgm.s.HCPhysInterPaePDPT;
1532
1533 default:
1534 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1535 return ~0;
1536 }
1537}
1538
1539
1540/**
1541 * Gets the current CR3 register value for the RC intermediate memory context.
1542 * @returns CR3 value.
1543 * @param pVM The VM handle.
1544 * @param pVCpu VMCPU handle.
1545 */
1546VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1547{
1548 switch (pVCpu->pgm.s.enmShadowMode)
1549 {
1550 case PGMMODE_32_BIT:
1551 return pVM->pgm.s.HCPhysInterPD;
1552
1553 case PGMMODE_PAE:
1554 case PGMMODE_PAE_NX:
1555 return pVM->pgm.s.HCPhysInterPaePDPT;
1556
1557 case PGMMODE_AMD64:
1558 case PGMMODE_AMD64_NX:
1559 return pVM->pgm.s.HCPhysInterPaePML4;
1560
1561 case PGMMODE_EPT:
1562 case PGMMODE_NESTED:
1563 return 0; /* not relevant */
1564
1565 default:
1566 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1567 return ~0;
1568 }
1569}
1570
1571
1572/**
1573 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1574 * @returns CR3 value.
1575 * @param pVM The VM handle.
1576 */
1577VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1578{
1579 return pVM->pgm.s.HCPhysInterPD;
1580}
1581
1582
1583/**
1584 * Gets the CR3 register value for the PAE intermediate memory context.
1585 * @returns CR3 value.
1586 * @param pVM The VM handle.
1587 */
1588VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1589{
1590 return pVM->pgm.s.HCPhysInterPaePDPT;
1591}
1592
1593
1594/**
1595 * Gets the CR3 register value for the AMD64 intermediate memory context.
1596 * @returns CR3 value.
1597 * @param pVM The VM handle.
1598 */
1599VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1600{
1601 return pVM->pgm.s.HCPhysInterPaePML4;
1602}
1603
1604
1605/**
1606 * Performs and schedules necessary updates following a CR3 load or reload.
1607 *
1608 * This will normally involve mapping the guest PD or nPDPT
1609 *
1610 * @returns VBox status code.
1611 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1612 * safely be ignored and overridden since the FF will be set too then.
1613 * @param pVCpu VMCPU handle.
1614 * @param cr3 The new cr3.
1615 * @param fGlobal Indicates whether this is a global flush or not.
1616 */
1617VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1618{
1619 PVM pVM = pVCpu->CTX_SUFF(pVM);
1620
1621 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1622
1623 /*
1624 * Always flag the necessary updates; necessary for hardware acceleration
1625 */
1626 /** @todo optimize this, it shouldn't always be necessary. */
1627 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1628 if (fGlobal)
1629 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1630 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1631
1632 /*
1633 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1634 */
1635 int rc = VINF_SUCCESS;
1636 RTGCPHYS GCPhysCR3;
1637 switch (pVCpu->pgm.s.enmGuestMode)
1638 {
1639 case PGMMODE_PAE:
1640 case PGMMODE_PAE_NX:
1641 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1642 break;
1643 case PGMMODE_AMD64:
1644 case PGMMODE_AMD64_NX:
1645 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1646 break;
1647 default:
1648 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1649 break;
1650 }
1651
1652 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1653 {
1654 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1655 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1656 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1657 if (RT_LIKELY(rc == VINF_SUCCESS))
1658 {
1659 if (!pVM->pgm.s.fMappingsFixed)
1660 {
1661 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1662 }
1663 }
1664 else
1665 {
1666 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1667 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1668 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1669 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1670 if (!pVM->pgm.s.fMappingsFixed)
1671 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1672 }
1673
1674 if (fGlobal)
1675 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1676 else
1677 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1678 }
1679 else
1680 {
1681 /*
1682 * Check if we have a pending update of the CR3 monitoring.
1683 */
1684 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1685 {
1686 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1687 Assert(!pVM->pgm.s.fMappingsFixed);
1688 }
1689 if (fGlobal)
1690 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1691 else
1692 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1693 }
1694
1695 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1696 return rc;
1697}
1698
1699
1700/**
1701 * Performs and schedules necessary updates following a CR3 load or reload when
1702 * using nested or extended paging.
1703 *
1704 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1705 * TLB and triggering a SyncCR3.
1706 *
1707 * This will normally involve mapping the guest PD or nPDPT
1708 *
1709 * @returns VBox status code.
1710 * @retval VINF_SUCCESS.
1711 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1712 * requires a CR3 sync. This can safely be ignored and overridden since
1713 * the FF will be set too then.)
1714 * @param pVCpu VMCPU handle.
1715 * @param cr3 The new cr3.
1716 */
1717VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1718{
1719 PVM pVM = pVCpu->CTX_SUFF(pVM);
1720
1721 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1722
1723 /* We assume we're only called in nested paging mode. */
1724 Assert(pVM->pgm.s.fMappingsFixed);
1725 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1726 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1727
1728 /*
1729 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1730 */
1731 int rc = VINF_SUCCESS;
1732 RTGCPHYS GCPhysCR3;
1733 switch (pVCpu->pgm.s.enmGuestMode)
1734 {
1735 case PGMMODE_PAE:
1736 case PGMMODE_PAE_NX:
1737 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1738 break;
1739 case PGMMODE_AMD64:
1740 case PGMMODE_AMD64_NX:
1741 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1742 break;
1743 default:
1744 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1745 break;
1746 }
1747 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1748 {
1749 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1750 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1751 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1752 }
1753 return rc;
1754}
1755
1756
1757/**
1758 * Synchronize the paging structures.
1759 *
1760 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1761 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1762 * in several places, most importantly whenever the CR3 is loaded.
1763 *
1764 * @returns VBox status code.
1765 * @param pVCpu VMCPU handle.
1766 * @param cr0 Guest context CR0 register
1767 * @param cr3 Guest context CR3 register
1768 * @param cr4 Guest context CR4 register
1769 * @param fGlobal Including global page directories or not
1770 */
1771VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1772{
1773 PVM pVM = pVCpu->CTX_SUFF(pVM);
1774 int rc;
1775
1776#ifdef PGMPOOL_WITH_MONITORING
1777 /*
1778 * The pool may have pending stuff and even require a return to ring-3 to
1779 * clear the whole thing.
1780 */
1781 rc = pgmPoolSyncCR3(pVCpu);
1782 if (rc != VINF_SUCCESS)
1783 return rc;
1784#endif
1785
1786 /*
1787 * We might be called when we shouldn't.
1788 *
1789 * The mode switching will ensure that the PD is resynced
1790 * after every mode switch. So, if we find ourselves here
1791 * when in protected or real mode we can safely disable the
1792 * FF and return immediately.
1793 */
1794 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1795 {
1796 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1797 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1798 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1799 return VINF_SUCCESS;
1800 }
1801
1802 /* If global pages are not supported, then all flushes are global. */
1803 if (!(cr4 & X86_CR4_PGE))
1804 fGlobal = true;
1805 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1806 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1807
1808 /*
1809 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1810 * This should be done before SyncCR3.
1811 */
1812 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1813 {
1814 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1815
1816 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1817 RTGCPHYS GCPhysCR3;
1818 switch (pVCpu->pgm.s.enmGuestMode)
1819 {
1820 case PGMMODE_PAE:
1821 case PGMMODE_PAE_NX:
1822 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1823 break;
1824 case PGMMODE_AMD64:
1825 case PGMMODE_AMD64_NX:
1826 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1827 break;
1828 default:
1829 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1830 break;
1831 }
1832
1833 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1834 {
1835 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1836 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1837 }
1838#ifdef IN_RING3
1839 if (rc == VINF_PGM_SYNC_CR3)
1840 rc = pgmPoolSyncCR3(pVCpu);
1841#else
1842 if (rc == VINF_PGM_SYNC_CR3)
1843 {
1844 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1845 return rc;
1846 }
1847#endif
1848 AssertRCReturn(rc, rc);
1849 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1850 }
1851
1852 /*
1853 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1854 */
1855 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1856 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1857 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1858 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1859 if (rc == VINF_SUCCESS)
1860 {
1861 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1862 {
1863 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1864 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1865 }
1866
1867 /*
1868 * Check if we have a pending update of the CR3 monitoring.
1869 */
1870 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1871 {
1872 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1873 Assert(!pVM->pgm.s.fMappingsFixed);
1874 }
1875 }
1876
1877 /*
1878 * Now flush the CR3 (guest context).
1879 */
1880 if (rc == VINF_SUCCESS)
1881 PGM_INVL_VCPU_TLBS(pVCpu);
1882 return rc;
1883}
1884
1885
1886/**
1887 * Called whenever CR0 or CR4 in a way which may change
1888 * the paging mode.
1889 *
1890 * @returns VBox status code, with the following informational code for
1891 * VM scheduling.
1892 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1893 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1894 * (I.e. not in R3.)
1895 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1896 *
1897 * @param pVCpu VMCPU handle.
1898 * @param cr0 The new cr0.
1899 * @param cr4 The new cr4.
1900 * @param efer The new extended feature enable register.
1901 */
1902VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1903{
1904 PVM pVM = pVCpu->CTX_SUFF(pVM);
1905 PGMMODE enmGuestMode;
1906
1907 /*
1908 * Calc the new guest mode.
1909 */
1910 if (!(cr0 & X86_CR0_PE))
1911 enmGuestMode = PGMMODE_REAL;
1912 else if (!(cr0 & X86_CR0_PG))
1913 enmGuestMode = PGMMODE_PROTECTED;
1914 else if (!(cr4 & X86_CR4_PAE))
1915 enmGuestMode = PGMMODE_32_BIT;
1916 else if (!(efer & MSR_K6_EFER_LME))
1917 {
1918 if (!(efer & MSR_K6_EFER_NXE))
1919 enmGuestMode = PGMMODE_PAE;
1920 else
1921 enmGuestMode = PGMMODE_PAE_NX;
1922 }
1923 else
1924 {
1925 if (!(efer & MSR_K6_EFER_NXE))
1926 enmGuestMode = PGMMODE_AMD64;
1927 else
1928 enmGuestMode = PGMMODE_AMD64_NX;
1929 }
1930
1931 /*
1932 * Did it change?
1933 */
1934 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1935 return VINF_SUCCESS;
1936
1937 /* Flush the TLB */
1938 PGM_INVL_VCPU_TLBS(pVCpu);
1939
1940#ifdef IN_RING3
1941 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1942#else
1943 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1944 return VINF_PGM_CHANGE_MODE;
1945#endif
1946}
1947
1948
1949/**
1950 * Gets the current guest paging mode.
1951 *
1952 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1953 *
1954 * @returns The current paging mode.
1955 * @param pVCpu VMCPU handle.
1956 */
1957VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1958{
1959 return pVCpu->pgm.s.enmGuestMode;
1960}
1961
1962
1963/**
1964 * Gets the current shadow paging mode.
1965 *
1966 * @returns The current paging mode.
1967 * @param pVCpu VMCPU handle.
1968 */
1969VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
1970{
1971 return pVCpu->pgm.s.enmShadowMode;
1972}
1973
1974/**
1975 * Gets the current host paging mode.
1976 *
1977 * @returns The current paging mode.
1978 * @param pVM The VM handle.
1979 */
1980VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1981{
1982 switch (pVM->pgm.s.enmHostMode)
1983 {
1984 case SUPPAGINGMODE_32_BIT:
1985 case SUPPAGINGMODE_32_BIT_GLOBAL:
1986 return PGMMODE_32_BIT;
1987
1988 case SUPPAGINGMODE_PAE:
1989 case SUPPAGINGMODE_PAE_GLOBAL:
1990 return PGMMODE_PAE;
1991
1992 case SUPPAGINGMODE_PAE_NX:
1993 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1994 return PGMMODE_PAE_NX;
1995
1996 case SUPPAGINGMODE_AMD64:
1997 case SUPPAGINGMODE_AMD64_GLOBAL:
1998 return PGMMODE_AMD64;
1999
2000 case SUPPAGINGMODE_AMD64_NX:
2001 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2002 return PGMMODE_AMD64_NX;
2003
2004 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2005 }
2006
2007 return PGMMODE_INVALID;
2008}
2009
2010
2011/**
2012 * Get mode name.
2013 *
2014 * @returns read-only name string.
2015 * @param enmMode The mode which name is desired.
2016 */
2017VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2018{
2019 switch (enmMode)
2020 {
2021 case PGMMODE_REAL: return "Real";
2022 case PGMMODE_PROTECTED: return "Protected";
2023 case PGMMODE_32_BIT: return "32-bit";
2024 case PGMMODE_PAE: return "PAE";
2025 case PGMMODE_PAE_NX: return "PAE+NX";
2026 case PGMMODE_AMD64: return "AMD64";
2027 case PGMMODE_AMD64_NX: return "AMD64+NX";
2028 case PGMMODE_NESTED: return "Nested";
2029 case PGMMODE_EPT: return "EPT";
2030 default: return "unknown mode value";
2031 }
2032}
2033
2034
2035/**
2036 * Check if the PGM lock is currently taken.
2037 *
2038 * @returns bool locked/not locked
2039 * @param pVM The VM to operate on.
2040 */
2041VMMDECL(bool) PGMIsLocked(PVM pVM)
2042{
2043 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2044}
2045
2046
2047/**
2048 * Check if this VCPU currently owns the PGM lock.
2049 *
2050 * @returns bool owner/not owner
2051 * @param pVM The VM to operate on.
2052 */
2053VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2054{
2055 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2056}
2057
2058
2059/**
2060 * Acquire the PGM lock.
2061 *
2062 * @returns VBox status code
2063 * @param pVM The VM to operate on.
2064 */
2065int pgmLock(PVM pVM)
2066{
2067 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2068#ifdef IN_RC
2069 if (rc == VERR_SEM_BUSY)
2070 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2071#elif defined(IN_RING0)
2072 if (rc == VERR_SEM_BUSY)
2073 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2074#endif
2075 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2076 return rc;
2077}
2078
2079
2080/**
2081 * Release the PGM lock.
2082 *
2083 * @returns VBox status code
2084 * @param pVM The VM to operate on.
2085 */
2086void pgmUnlock(PVM pVM)
2087{
2088 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2089}
2090
2091#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2092
2093/**
2094 * Temporarily maps one guest page specified by GC physical address.
2095 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2096 *
2097 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2098 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2099 *
2100 * @returns VBox status.
2101 * @param pVM VM handle.
2102 * @param GCPhys GC Physical address of the page.
2103 * @param ppv Where to store the address of the mapping.
2104 */
2105VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2106{
2107 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2108
2109 /*
2110 * Get the ram range.
2111 */
2112 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2113 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2114 pRam = pRam->CTX_SUFF(pNext);
2115 if (!pRam)
2116 {
2117 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2118 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2119 }
2120
2121 /*
2122 * Pass it on to PGMDynMapHCPage.
2123 */
2124 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2125 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2126#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2127 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2128#else
2129 PGMDynMapHCPage(pVM, HCPhys, ppv);
2130#endif
2131 return VINF_SUCCESS;
2132}
2133
2134
2135/**
2136 * Temporarily maps one guest page specified by unaligned GC physical address.
2137 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2138 *
2139 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2140 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2141 *
2142 * The caller is aware that only the speicifed page is mapped and that really bad things
2143 * will happen if writing beyond the page!
2144 *
2145 * @returns VBox status.
2146 * @param pVM VM handle.
2147 * @param GCPhys GC Physical address within the page to be mapped.
2148 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2149 */
2150VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2151{
2152 /*
2153 * Get the ram range.
2154 */
2155 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2156 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2157 pRam = pRam->CTX_SUFF(pNext);
2158 if (!pRam)
2159 {
2160 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2161 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2162 }
2163
2164 /*
2165 * Pass it on to PGMDynMapHCPage.
2166 */
2167 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2168#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2169 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2170#else
2171 PGMDynMapHCPage(pVM, HCPhys, ppv);
2172#endif
2173 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2174 return VINF_SUCCESS;
2175}
2176
2177# ifdef IN_RC
2178
2179/**
2180 * Temporarily maps one host page specified by HC physical address.
2181 *
2182 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2183 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2184 *
2185 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2186 * @param pVM VM handle.
2187 * @param HCPhys HC Physical address of the page.
2188 * @param ppv Where to store the address of the mapping. This is the
2189 * address of the PAGE not the exact address corresponding
2190 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2191 * page offset.
2192 */
2193VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2194{
2195 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2196
2197 /*
2198 * Check the cache.
2199 */
2200 register unsigned iCache;
2201 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2202 {
2203 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2204 {
2205 { 0, 9, 10, 11, 12, 13, 14, 15},
2206 { 0, 1, 10, 11, 12, 13, 14, 15},
2207 { 0, 1, 2, 11, 12, 13, 14, 15},
2208 { 0, 1, 2, 3, 12, 13, 14, 15},
2209 { 0, 1, 2, 3, 4, 13, 14, 15},
2210 { 0, 1, 2, 3, 4, 5, 14, 15},
2211 { 0, 1, 2, 3, 4, 5, 6, 15},
2212 { 0, 1, 2, 3, 4, 5, 6, 7},
2213 { 8, 1, 2, 3, 4, 5, 6, 7},
2214 { 8, 9, 2, 3, 4, 5, 6, 7},
2215 { 8, 9, 10, 3, 4, 5, 6, 7},
2216 { 8, 9, 10, 11, 4, 5, 6, 7},
2217 { 8, 9, 10, 11, 12, 5, 6, 7},
2218 { 8, 9, 10, 11, 12, 13, 6, 7},
2219 { 8, 9, 10, 11, 12, 13, 14, 7},
2220 { 8, 9, 10, 11, 12, 13, 14, 15},
2221 };
2222 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2223 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2224
2225 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2226 {
2227 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2228
2229 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2230 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2231 {
2232 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2233 *ppv = pv;
2234 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2235 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2236 return VINF_SUCCESS;
2237 }
2238 LogFlow(("Out of sync entry %d\n", iPage));
2239 }
2240 }
2241 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2242 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2243 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2244
2245 /*
2246 * Update the page tables.
2247 */
2248 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2249 unsigned i;
2250 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2251 {
2252 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2253 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2254 break;
2255 iPage++;
2256 }
2257 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2258
2259 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2260 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2261 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2262 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2263
2264 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2265 *ppv = pv;
2266 ASMInvalidatePage(pv);
2267 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2268 return VINF_SUCCESS;
2269}
2270
2271
2272/**
2273 * Temporarily lock a dynamic page to prevent it from being reused.
2274 *
2275 * @param pVM VM handle.
2276 * @param GCPage GC address of page
2277 */
2278VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2279{
2280 unsigned iPage;
2281
2282 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2283 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2284 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2285 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2286}
2287
2288
2289/**
2290 * Unlock a dynamic page
2291 *
2292 * @param pVM VM handle.
2293 * @param GCPage GC address of page
2294 */
2295VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2296{
2297 unsigned iPage;
2298
2299 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2300 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2301
2302 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2303 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2304 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2305 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2306 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2307}
2308
2309
2310# ifdef VBOX_STRICT
2311/**
2312 * Check for lock leaks.
2313 *
2314 * @param pVM VM handle.
2315 */
2316VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2317{
2318 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2319 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2320}
2321# endif /* VBOX_STRICT */
2322
2323# endif /* IN_RC */
2324#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2325
2326#if !defined(IN_R0) || defined(LOG_ENABLED)
2327
2328/** Format handler for PGMPAGE.
2329 * @copydoc FNRTSTRFORMATTYPE */
2330static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2331 const char *pszType, void const *pvValue,
2332 int cchWidth, int cchPrecision, unsigned fFlags,
2333 void *pvUser)
2334{
2335 size_t cch;
2336 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2337 if (VALID_PTR(pPage))
2338 {
2339 char szTmp[64+80];
2340
2341 cch = 0;
2342
2343 /* The single char state stuff. */
2344 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2345 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2346
2347#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2348 if (IS_PART_INCLUDED(5))
2349 {
2350 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2351 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2352 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2353 }
2354
2355 /* The type. */
2356 if (IS_PART_INCLUDED(4))
2357 {
2358 szTmp[cch++] = ':';
2359 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2360 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2361 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2362 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2363 }
2364
2365 /* The numbers. */
2366 if (IS_PART_INCLUDED(3))
2367 {
2368 szTmp[cch++] = ':';
2369 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2370 }
2371
2372 if (IS_PART_INCLUDED(2))
2373 {
2374 szTmp[cch++] = ':';
2375 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2376 }
2377
2378 if (IS_PART_INCLUDED(6))
2379 {
2380 szTmp[cch++] = ':';
2381 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2382 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2383 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2384 }
2385#undef IS_PART_INCLUDED
2386
2387 cch = pfnOutput(pvArgOutput, szTmp, cch);
2388 }
2389 else
2390 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2391 return cch;
2392}
2393
2394
2395/** Format handler for PGMRAMRANGE.
2396 * @copydoc FNRTSTRFORMATTYPE */
2397static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2398 const char *pszType, void const *pvValue,
2399 int cchWidth, int cchPrecision, unsigned fFlags,
2400 void *pvUser)
2401{
2402 size_t cch;
2403 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2404 if (VALID_PTR(pRam))
2405 {
2406 char szTmp[80];
2407 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2408 cch = pfnOutput(pvArgOutput, szTmp, cch);
2409 }
2410 else
2411 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2412 return cch;
2413}
2414
2415/** Format type andlers to be registered/deregistered. */
2416static const struct
2417{
2418 char szType[24];
2419 PFNRTSTRFORMATTYPE pfnHandler;
2420} g_aPgmFormatTypes[] =
2421{
2422 { "pgmpage", pgmFormatTypeHandlerPage },
2423 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2424};
2425
2426#endif /* !IN_R0 || LOG_ENABLED */
2427
2428
2429/**
2430 * Registers the global string format types.
2431 *
2432 * This should be called at module load time or in some other manner that ensure
2433 * that it's called exactly one time.
2434 *
2435 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2436 */
2437VMMDECL(int) PGMRegisterStringFormatTypes(void)
2438{
2439#if !defined(IN_R0) || defined(LOG_ENABLED)
2440 int rc = VINF_SUCCESS;
2441 unsigned i;
2442 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2443 {
2444 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2445# ifdef IN_RING0
2446 if (rc == VERR_ALREADY_EXISTS)
2447 {
2448 /* in case of cleanup failure in ring-0 */
2449 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2450 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2451 }
2452# endif
2453 }
2454 if (RT_FAILURE(rc))
2455 while (i-- > 0)
2456 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2457
2458 return rc;
2459#else
2460 return VINF_SUCCESS;
2461#endif
2462}
2463
2464
2465/**
2466 * Deregisters the global string format types.
2467 *
2468 * This should be called at module unload time or in some other manner that
2469 * ensure that it's called exactly one time.
2470 */
2471VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2472{
2473#if !defined(IN_R0) || defined(LOG_ENABLED)
2474 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2475 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2476#endif
2477}
2478
2479#ifdef VBOX_STRICT
2480
2481/**
2482 * Asserts that there are no mapping conflicts.
2483 *
2484 * @returns Number of conflicts.
2485 * @param pVM The VM Handle.
2486 */
2487VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2488{
2489 unsigned cErrors = 0;
2490
2491 /* Only applies to raw mode -> 1 VPCU */
2492 Assert(pVM->cCPUs == 1);
2493 PVMCPU pVCpu = &pVM->aCpus[0];
2494
2495 /*
2496 * Check for mapping conflicts.
2497 */
2498 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2499 pMapping;
2500 pMapping = pMapping->CTX_SUFF(pNext))
2501 {
2502 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2503 for (RTGCPTR GCPtr = pMapping->GCPtr;
2504 GCPtr <= pMapping->GCPtrLast;
2505 GCPtr += PAGE_SIZE)
2506 {
2507 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2508 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2509 {
2510 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2511 cErrors++;
2512 break;
2513 }
2514 }
2515 }
2516
2517 return cErrors;
2518}
2519
2520
2521/**
2522 * Asserts that everything related to the guest CR3 is correctly shadowed.
2523 *
2524 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2525 * and assert the correctness of the guest CR3 mapping before asserting that the
2526 * shadow page tables is in sync with the guest page tables.
2527 *
2528 * @returns Number of conflicts.
2529 * @param pVM The VM Handle.
2530 * @param pVCpu VMCPU handle.
2531 * @param cr3 The current guest CR3 register value.
2532 * @param cr4 The current guest CR4 register value.
2533 */
2534VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2535{
2536 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2537 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2538 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2539 return cErrors;
2540}
2541
2542#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette