VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 20088

Last change on this file since 20088 was 20068, checked in by vboxsync, 16 years ago

Trying to get rid of annoying assertions

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.4 KB
Line 
1/* $Id: PGMAll.cpp 20068 2009-05-27 11:32:49Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The VMCPU handle. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75
76/*
77 * Shadow - 32-bit mode
78 */
79#define PGM_SHW_TYPE PGM_TYPE_32BIT
80#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
81#include "PGMAllShw.h"
82
83/* Guest - real mode */
84#define PGM_GST_TYPE PGM_TYPE_REAL
85#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
86#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
87#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
88#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
89#include "PGMGstDefs.h"
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef BTH_PGMPOOLKIND_ROOT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - protected mode */
99#define PGM_GST_TYPE PGM_TYPE_PROT
100#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
103#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
104#include "PGMGstDefs.h"
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef BTH_PGMPOOLKIND_ROOT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113/* Guest - 32-bit mode */
114#define PGM_GST_TYPE PGM_TYPE_32BIT
115#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
116#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
117#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
118#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
119#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
120#include "PGMGstDefs.h"
121#include "PGMAllGst.h"
122#include "PGMAllBth.h"
123#undef BTH_PGMPOOLKIND_PT_FOR_BIG
124#undef BTH_PGMPOOLKIND_PT_FOR_PT
125#undef BTH_PGMPOOLKIND_ROOT
126#undef PGM_BTH_NAME
127#undef PGM_GST_TYPE
128#undef PGM_GST_NAME
129
130#undef PGM_SHW_TYPE
131#undef PGM_SHW_NAME
132
133
134/*
135 * Shadow - PAE mode
136 */
137#define PGM_SHW_TYPE PGM_TYPE_PAE
138#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
140#include "PGMAllShw.h"
141
142/* Guest - real mode */
143#define PGM_GST_TYPE PGM_TYPE_REAL
144#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
147#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
148#include "PGMGstDefs.h"
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - 32-bit mode */
171#define PGM_GST_TYPE PGM_TYPE_32BIT
172#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
175#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
176#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
177#include "PGMGstDefs.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef BTH_PGMPOOLKIND_ROOT
182#undef PGM_BTH_NAME
183#undef PGM_GST_TYPE
184#undef PGM_GST_NAME
185
186
187/* Guest - PAE mode */
188#define PGM_GST_TYPE PGM_TYPE_PAE
189#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
194#include "PGMGstDefs.h"
195#include "PGMAllGst.h"
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_BIG
198#undef BTH_PGMPOOLKIND_PT_FOR_PT
199#undef BTH_PGMPOOLKIND_ROOT
200#undef PGM_BTH_NAME
201#undef PGM_GST_TYPE
202#undef PGM_GST_NAME
203
204#undef PGM_SHW_TYPE
205#undef PGM_SHW_NAME
206
207
208#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
209/*
210 * Shadow - AMD64 mode
211 */
212# define PGM_SHW_TYPE PGM_TYPE_AMD64
213# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
214# include "PGMAllShw.h"
215
216/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
217# define PGM_GST_TYPE PGM_TYPE_PROT
218# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
219# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
220# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
221# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
222# include "PGMGstDefs.h"
223# include "PGMAllBth.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_PT
225# undef BTH_PGMPOOLKIND_ROOT
226# undef PGM_BTH_NAME
227# undef PGM_GST_TYPE
228# undef PGM_GST_NAME
229
230# ifdef VBOX_WITH_64_BITS_GUESTS
231/* Guest - AMD64 mode */
232# define PGM_GST_TYPE PGM_TYPE_AMD64
233# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
234# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
235# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
236# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
237# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
238# include "PGMGstDefs.h"
239# include "PGMAllGst.h"
240# include "PGMAllBth.h"
241# undef BTH_PGMPOOLKIND_PT_FOR_BIG
242# undef BTH_PGMPOOLKIND_PT_FOR_PT
243# undef BTH_PGMPOOLKIND_ROOT
244# undef PGM_BTH_NAME
245# undef PGM_GST_TYPE
246# undef PGM_GST_NAME
247# endif /* VBOX_WITH_64_BITS_GUESTS */
248
249# undef PGM_SHW_TYPE
250# undef PGM_SHW_NAME
251
252
253/*
254 * Shadow - Nested paging mode
255 */
256# define PGM_SHW_TYPE PGM_TYPE_NESTED
257# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
258# include "PGMAllShw.h"
259
260/* Guest - real mode */
261# define PGM_GST_TYPE PGM_TYPE_REAL
262# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
263# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
264# include "PGMGstDefs.h"
265# include "PGMAllBth.h"
266# undef PGM_BTH_NAME
267# undef PGM_GST_TYPE
268# undef PGM_GST_NAME
269
270/* Guest - protected mode */
271# define PGM_GST_TYPE PGM_TYPE_PROT
272# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
273# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
274# include "PGMGstDefs.h"
275# include "PGMAllBth.h"
276# undef PGM_BTH_NAME
277# undef PGM_GST_TYPE
278# undef PGM_GST_NAME
279
280/* Guest - 32-bit mode */
281# define PGM_GST_TYPE PGM_TYPE_32BIT
282# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289
290/* Guest - PAE mode */
291# define PGM_GST_TYPE PGM_TYPE_PAE
292# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
293# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
294# include "PGMGstDefs.h"
295# include "PGMAllBth.h"
296# undef PGM_BTH_NAME
297# undef PGM_GST_TYPE
298# undef PGM_GST_NAME
299
300# ifdef VBOX_WITH_64_BITS_GUESTS
301/* Guest - AMD64 mode */
302# define PGM_GST_TYPE PGM_TYPE_AMD64
303# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
304# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
305# include "PGMGstDefs.h"
306# include "PGMAllBth.h"
307# undef PGM_BTH_NAME
308# undef PGM_GST_TYPE
309# undef PGM_GST_NAME
310# endif /* VBOX_WITH_64_BITS_GUESTS */
311
312# undef PGM_SHW_TYPE
313# undef PGM_SHW_NAME
314
315
316/*
317 * Shadow - EPT
318 */
319# define PGM_SHW_TYPE PGM_TYPE_EPT
320# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
321# include "PGMAllShw.h"
322
323/* Guest - real mode */
324# define PGM_GST_TYPE PGM_TYPE_REAL
325# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
326# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
327# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
328# include "PGMGstDefs.h"
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - protected mode */
336# define PGM_GST_TYPE PGM_TYPE_PROT
337# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMGstDefs.h"
341# include "PGMAllBth.h"
342# undef BTH_PGMPOOLKIND_PT_FOR_PT
343# undef PGM_BTH_NAME
344# undef PGM_GST_TYPE
345# undef PGM_GST_NAME
346
347/* Guest - 32-bit mode */
348# define PGM_GST_TYPE PGM_TYPE_32BIT
349# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
350# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
351# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
352# include "PGMGstDefs.h"
353# include "PGMAllBth.h"
354# undef BTH_PGMPOOLKIND_PT_FOR_PT
355# undef PGM_BTH_NAME
356# undef PGM_GST_TYPE
357# undef PGM_GST_NAME
358
359/* Guest - PAE mode */
360# define PGM_GST_TYPE PGM_TYPE_PAE
361# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
363# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef BTH_PGMPOOLKIND_PT_FOR_PT
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370
371# ifdef VBOX_WITH_64_BITS_GUESTS
372/* Guest - AMD64 mode */
373# define PGM_GST_TYPE PGM_TYPE_AMD64
374# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
375# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
376# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
377# include "PGMGstDefs.h"
378# include "PGMAllBth.h"
379# undef BTH_PGMPOOLKIND_PT_FOR_PT
380# undef PGM_BTH_NAME
381# undef PGM_GST_TYPE
382# undef PGM_GST_NAME
383# endif /* VBOX_WITH_64_BITS_GUESTS */
384
385# undef PGM_SHW_TYPE
386# undef PGM_SHW_NAME
387
388#endif /* !IN_RC */
389
390
391#ifndef IN_RING3
392/**
393 * #PF Handler.
394 *
395 * @returns VBox status code (appropriate for trap handling and GC return).
396 * @param pVCpu VMCPU handle.
397 * @param uErr The trap error code.
398 * @param pRegFrame Trap register frame.
399 * @param pvFault The fault address.
400 */
401VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
402{
403 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
404 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
405 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
406
407
408#ifdef VBOX_WITH_STATISTICS
409 /*
410 * Error code stats.
411 */
412 if (uErr & X86_TRAP_PF_US)
413 {
414 if (!(uErr & X86_TRAP_PF_P))
415 {
416 if (uErr & X86_TRAP_PF_RW)
417 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
418 else
419 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
420 }
421 else if (uErr & X86_TRAP_PF_RW)
422 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
423 else if (uErr & X86_TRAP_PF_RSVD)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
425 else if (uErr & X86_TRAP_PF_ID)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
427 else
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
429 }
430 else
431 { /* Supervisor */
432 if (!(uErr & X86_TRAP_PF_P))
433 {
434 if (uErr & X86_TRAP_PF_RW)
435 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
436 else
437 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
438 }
439 else if (uErr & X86_TRAP_PF_RW)
440 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
441 else if (uErr & X86_TRAP_PF_ID)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
443 else if (uErr & X86_TRAP_PF_RSVD)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
445 }
446#endif /* VBOX_WITH_STATISTICS */
447
448 /*
449 * Call the worker.
450 */
451 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
452 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
453 rc = VINF_SUCCESS;
454 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
455 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
456 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
457 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
458 return rc;
459}
460#endif /* !IN_RING3 */
461
462
463/**
464 * Prefetch a page
465 *
466 * Typically used to sync commonly used pages before entering raw mode
467 * after a CR3 reload.
468 *
469 * @returns VBox status code suitable for scheduling.
470 * @retval VINF_SUCCESS on success.
471 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
472 * @param pVCpu VMCPU handle.
473 * @param GCPtrPage Page to invalidate.
474 */
475VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
476{
477 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
478 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
479 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
480 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
481 return rc;
482}
483
484
485/**
486 * Gets the mapping corresponding to the specified address (if any).
487 *
488 * @returns Pointer to the mapping.
489 * @returns NULL if not
490 *
491 * @param pVM The virtual machine.
492 * @param GCPtr The guest context pointer.
493 */
494PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
495{
496 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
497 while (pMapping)
498 {
499 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
500 break;
501 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
502 return pMapping;
503 pMapping = pMapping->CTX_SUFF(pNext);
504 }
505 return NULL;
506}
507
508
509/**
510 * Verifies a range of pages for read or write access
511 *
512 * Only checks the guest's page tables
513 *
514 * @returns VBox status code.
515 * @param pVCpu VMCPU handle.
516 * @param Addr Guest virtual address to check
517 * @param cbSize Access size
518 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
519 * @remarks Current not in use.
520 */
521VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
522{
523 /*
524 * Validate input.
525 */
526 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
527 {
528 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
529 return VERR_INVALID_PARAMETER;
530 }
531
532 uint64_t fPage;
533 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
534 if (RT_FAILURE(rc))
535 {
536 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
537 return VINF_EM_RAW_GUEST_TRAP;
538 }
539
540 /*
541 * Check if the access would cause a page fault
542 *
543 * Note that hypervisor page directories are not present in the guest's tables, so this check
544 * is sufficient.
545 */
546 bool fWrite = !!(fAccess & X86_PTE_RW);
547 bool fUser = !!(fAccess & X86_PTE_US);
548 if ( !(fPage & X86_PTE_P)
549 || (fWrite && !(fPage & X86_PTE_RW))
550 || (fUser && !(fPage & X86_PTE_US)) )
551 {
552 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
553 return VINF_EM_RAW_GUEST_TRAP;
554 }
555 if ( RT_SUCCESS(rc)
556 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
557 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
558 return rc;
559}
560
561
562/**
563 * Verifies a range of pages for read or write access
564 *
565 * Supports handling of pages marked for dirty bit tracking and CSAM
566 *
567 * @returns VBox status code.
568 * @param pVCpu VMCPU handle.
569 * @param Addr Guest virtual address to check
570 * @param cbSize Access size
571 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
572 */
573VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
574{
575 PVM pVM = pVCpu->CTX_SUFF(pVM);
576
577 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
578
579 /*
580 * Get going.
581 */
582 uint64_t fPageGst;
583 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
584 if (RT_FAILURE(rc))
585 {
586 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
587 return VINF_EM_RAW_GUEST_TRAP;
588 }
589
590 /*
591 * Check if the access would cause a page fault
592 *
593 * Note that hypervisor page directories are not present in the guest's tables, so this check
594 * is sufficient.
595 */
596 const bool fWrite = !!(fAccess & X86_PTE_RW);
597 const bool fUser = !!(fAccess & X86_PTE_US);
598 if ( !(fPageGst & X86_PTE_P)
599 || (fWrite && !(fPageGst & X86_PTE_RW))
600 || (fUser && !(fPageGst & X86_PTE_US)) )
601 {
602 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
603 return VINF_EM_RAW_GUEST_TRAP;
604 }
605
606 if (!HWACCMIsNestedPagingActive(pVM))
607 {
608 /*
609 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
610 */
611 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
612 if ( rc == VERR_PAGE_NOT_PRESENT
613 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
614 {
615 /*
616 * Page is not present in our page tables.
617 * Try to sync it!
618 */
619 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
620 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
621 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
622 if (rc != VINF_SUCCESS)
623 return rc;
624 }
625 else
626 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
627 }
628
629#if 0 /* def VBOX_STRICT; triggers too often now */
630 /*
631 * This check is a bit paranoid, but useful.
632 */
633 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
634 uint64_t fPageShw;
635 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
636 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
637 || (fWrite && !(fPageShw & X86_PTE_RW))
638 || (fUser && !(fPageShw & X86_PTE_US)) )
639 {
640 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
641 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
642 return VINF_EM_RAW_GUEST_TRAP;
643 }
644#endif
645
646 if ( RT_SUCCESS(rc)
647 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
648 || Addr + cbSize < Addr))
649 {
650 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
651 for (;;)
652 {
653 Addr += PAGE_SIZE;
654 if (cbSize > PAGE_SIZE)
655 cbSize -= PAGE_SIZE;
656 else
657 cbSize = 1;
658 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
659 if (rc != VINF_SUCCESS)
660 break;
661 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
662 break;
663 }
664 }
665 return rc;
666}
667
668
669/**
670 * Emulation of the invlpg instruction (HC only actually).
671 *
672 * @returns VBox status code, special care required.
673 * @retval VINF_PGM_SYNC_CR3 - handled.
674 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
675 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
676 *
677 * @param pVCpu VMCPU handle.
678 * @param GCPtrPage Page to invalidate.
679 *
680 * @remark ASSUMES the page table entry or page directory is valid. Fairly
681 * safe, but there could be edge cases!
682 *
683 * @todo Flush page or page directory only if necessary!
684 */
685VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
686{
687 PVM pVM = pVCpu->CTX_SUFF(pVM);
688 int rc;
689 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
690
691#ifndef IN_RING3
692 /*
693 * Notify the recompiler so it can record this instruction.
694 * Failure happens when it's out of space. We'll return to HC in that case.
695 */
696 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
697 if (rc != VINF_SUCCESS)
698 return rc;
699#endif /* !IN_RING3 */
700
701
702#ifdef IN_RC
703 /*
704 * Check for conflicts and pending CR3 monitoring updates.
705 */
706 if (!pVM->pgm.s.fMappingsFixed)
707 {
708 if ( pgmGetMapping(pVM, GCPtrPage)
709 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
710 {
711 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
712 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
713 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
714 return VINF_PGM_SYNC_CR3;
715 }
716
717 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
718 {
719 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
720 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
721 return VINF_EM_RAW_EMULATE_INSTR;
722 }
723 }
724#endif /* IN_RC */
725
726 /*
727 * Call paging mode specific worker.
728 */
729 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
730 pgmLock(pVM);
731 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
732 pgmUnlock(pVM);
733 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
734
735#ifdef IN_RING3
736 /*
737 * Check if we have a pending update of the CR3 monitoring.
738 */
739 if ( RT_SUCCESS(rc)
740 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
741 {
742 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
743 Assert(!pVM->pgm.s.fMappingsFixed);
744 }
745
746 /*
747 * Inform CSAM about the flush
748 *
749 * Note: This is to check if monitored pages have been changed; when we implement
750 * callbacks for virtual handlers, this is no longer required.
751 */
752 CSAMR3FlushPage(pVM, GCPtrPage);
753#endif /* IN_RING3 */
754 return rc;
755}
756
757
758/**
759 * Executes an instruction using the interpreter.
760 *
761 * @returns VBox status code (appropriate for trap handling and GC return).
762 * @param pVM VM handle.
763 * @param pVCpu VMCPU handle.
764 * @param pRegFrame Register frame.
765 * @param pvFault Fault address.
766 */
767VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
768{
769 uint32_t cb;
770 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
771 if (rc == VERR_EM_INTERPRETER)
772 rc = VINF_EM_RAW_EMULATE_INSTR;
773 if (rc != VINF_SUCCESS)
774 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
775 return rc;
776}
777
778
779/**
780 * Gets effective page information (from the VMM page directory).
781 *
782 * @returns VBox status.
783 * @param pVCpu VMCPU handle.
784 * @param GCPtr Guest Context virtual address of the page.
785 * @param pfFlags Where to store the flags. These are X86_PTE_*.
786 * @param pHCPhys Where to store the HC physical address of the page.
787 * This is page aligned.
788 * @remark You should use PGMMapGetPage() for pages in a mapping.
789 */
790VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
791{
792 pgmLock(pVCpu->CTX_SUFF(pVM));
793 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
794 pgmUnlock(pVCpu->CTX_SUFF(pVM));
795 return rc;
796}
797
798
799/**
800 * Sets (replaces) the page flags for a range of pages in the shadow context.
801 *
802 * @returns VBox status.
803 * @param pVCpu VMCPU handle.
804 * @param GCPtr The address of the first page.
805 * @param cb The size of the range in bytes.
806 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
807 * @remark You must use PGMMapSetPage() for pages in a mapping.
808 */
809VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
810{
811 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
812}
813
814
815/**
816 * Modify page flags for a range of pages in the shadow context.
817 *
818 * The existing flags are ANDed with the fMask and ORed with the fFlags.
819 *
820 * @returns VBox status code.
821 * @param pVCpu VMCPU handle.
822 * @param GCPtr Virtual address of the first page in the range.
823 * @param cb Size (in bytes) of the range to apply the modification to.
824 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
825 * @param fMask The AND mask - page flags X86_PTE_*.
826 * Be very CAREFUL when ~'ing constants which could be 32-bit!
827 * @remark You must use PGMMapModifyPage() for pages in a mapping.
828 */
829VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
830{
831 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
832 Assert(cb);
833
834 /*
835 * Align the input.
836 */
837 cb += GCPtr & PAGE_OFFSET_MASK;
838 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
839 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
840
841 /*
842 * Call worker.
843 */
844 PVM pVM = pVCpu->CTX_SUFF(pVM);
845 pgmLock(pVM);
846 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
847 pgmUnlock(pVM);
848 return rc;
849}
850
851/**
852 * Gets the shadow page directory for the specified address, PAE.
853 *
854 * @returns Pointer to the shadow PD.
855 * @param pVCpu The VMCPU handle.
856 * @param GCPtr The address.
857 * @param pGstPdpe Guest PDPT entry
858 * @param ppPD Receives address of page directory
859 */
860int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
861{
862 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
863 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
864 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
865 PVM pVM = pVCpu->CTX_SUFF(pVM);
866 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
867 PPGMPOOLPAGE pShwPage;
868 int rc;
869
870 /* Allocate page directory if not present. */
871 if ( !pPdpe->n.u1Present
872 && !(pPdpe->u & X86_PDPE_PG_MASK))
873 {
874 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
875 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
876 RTGCPTR64 GCPdPt;
877 PGMPOOLKIND enmKind;
878
879# if defined(IN_RC)
880 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
881 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
882# endif
883
884 if (fNestedPaging || !fPaging)
885 {
886 /* AMD-V nested paging or real/protected mode without paging */
887 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
888 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
889 }
890 else
891 {
892 Assert(pGstPdpe);
893
894 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
895 {
896 if (!pGstPdpe->n.u1Present)
897 {
898 /* PD not present; guest must reload CR3 to change it.
899 * No need to monitor anything in this case.
900 */
901 Assert(!HWACCMIsEnabled(pVM));
902
903 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
904 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
905 pGstPdpe->n.u1Present = 1;
906 }
907 else
908 {
909 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
910 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
911 }
912 }
913 else
914 {
915 GCPdPt = CPUMGetGuestCR3(pVCpu);
916 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
917 }
918 }
919
920 /* Create a reference back to the PDPT by using the index in its shadow page. */
921 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
922 AssertRCReturn(rc, rc);
923
924 /* The PD was cached or created; hook it up now. */
925 pPdpe->u |= pShwPage->Core.Key
926 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
927
928# if defined(IN_RC)
929 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
930 * non-present PDPT will continue to cause page faults.
931 */
932 ASMReloadCR3();
933 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
934# endif
935 }
936 else
937 {
938 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
939 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
940 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
941
942 pgmPoolCacheUsed(pPool, pShwPage);
943 }
944 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
945 return VINF_SUCCESS;
946}
947
948
949/**
950 * Gets the pointer to the shadow page directory entry for an address, PAE.
951 *
952 * @returns Pointer to the PDE.
953 * @param pPGM Pointer to the PGMCPU instance data.
954 * @param GCPtr The address.
955 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
956 */
957DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
958{
959 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
960 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
961 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
962 if (!pPdpt->a[iPdPt].n.u1Present)
963 {
964 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
965 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
966 }
967 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
968
969 /* Fetch the pgm pool shadow descriptor. */
970 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
971 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
972
973 *ppShwPde = pShwPde;
974 return VINF_SUCCESS;
975}
976
977#ifndef IN_RC
978
979/**
980 * Syncs the SHADOW page directory pointer for the specified address.
981 *
982 * Allocates backing pages in case the PDPT or PML4 entry is missing.
983 *
984 * The caller is responsible for making sure the guest has a valid PD before
985 * calling this function.
986 *
987 * @returns VBox status.
988 * @param pVCpu VMCPU handle.
989 * @param GCPtr The address.
990 * @param pGstPml4e Guest PML4 entry
991 * @param pGstPdpe Guest PDPT entry
992 * @param ppPD Receives address of page directory
993 */
994int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
995{
996 PPGMCPU pPGM = &pVCpu->pgm.s;
997 PVM pVM = pVCpu->CTX_SUFF(pVM);
998 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
999 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1000 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1001 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1002 bool fPaging = !!(CPUMGetGuestCR0(pVCpu) & X86_CR0_PG);
1003 PPGMPOOLPAGE pShwPage;
1004 int rc;
1005
1006 /* Allocate page directory pointer table if not present. */
1007 if ( !pPml4e->n.u1Present
1008 && !(pPml4e->u & X86_PML4E_PG_MASK))
1009 {
1010 RTGCPTR64 GCPml4;
1011 PGMPOOLKIND enmKind;
1012
1013 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1014
1015 if (fNestedPaging || !fPaging)
1016 {
1017 /* AMD-V nested paging or real/protected mode without paging */
1018 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1019 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1020 }
1021 else
1022 {
1023 Assert(pGstPml4e && pGstPdpe);
1024
1025 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1026 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1027 }
1028
1029 /* Create a reference back to the PDPT by using the index in its shadow page. */
1030 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1031 AssertRCReturn(rc, rc);
1032 }
1033 else
1034 {
1035 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1036 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1037
1038 pgmPoolCacheUsed(pPool, pShwPage);
1039 }
1040 /* The PDPT was cached or created; hook it up now. */
1041 pPml4e->u |= pShwPage->Core.Key
1042 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1043
1044 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1045 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1046 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1047
1048 /* Allocate page directory if not present. */
1049 if ( !pPdpe->n.u1Present
1050 && !(pPdpe->u & X86_PDPE_PG_MASK))
1051 {
1052 RTGCPTR64 GCPdPt;
1053 PGMPOOLKIND enmKind;
1054
1055 if (fNestedPaging || !fPaging)
1056 {
1057 /* AMD-V nested paging or real/protected mode without paging */
1058 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1059 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1060 }
1061 else
1062 {
1063 Assert(pGstPdpe);
1064
1065 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1066 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1067 }
1068
1069 /* Create a reference back to the PDPT by using the index in its shadow page. */
1070 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1071 AssertRCReturn(rc, rc);
1072 }
1073 else
1074 {
1075 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1076 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1077
1078 pgmPoolCacheUsed(pPool, pShwPage);
1079 }
1080 /* The PD was cached or created; hook it up now. */
1081 pPdpe->u |= pShwPage->Core.Key
1082 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1083
1084 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1085 return VINF_SUCCESS;
1086}
1087
1088
1089/**
1090 * Gets the SHADOW page directory pointer for the specified address (long mode).
1091 *
1092 * @returns VBox status.
1093 * @param pVCpu VMCPU handle.
1094 * @param GCPtr The address.
1095 * @param ppPdpt Receives address of pdpt
1096 * @param ppPD Receives address of page directory
1097 */
1098DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1099{
1100 PPGMCPU pPGM = &pVCpu->pgm.s;
1101 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1102 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1103 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1104 if (ppPml4e)
1105 *ppPml4e = (PX86PML4E)pPml4e;
1106
1107 Log4(("pgmShwGetLongModePDPtr %VGv (%VHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1108
1109 if (!pPml4e->n.u1Present)
1110 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1111
1112 PVM pVM = pVCpu->CTX_SUFF(pVM);
1113 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1114 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1115 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1116
1117 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1118 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1119 if (!pPdpt->a[iPdPt].n.u1Present)
1120 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1121
1122 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1123 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1124
1125 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1126 return VINF_SUCCESS;
1127}
1128
1129
1130/**
1131 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1132 * backing pages in case the PDPT or PML4 entry is missing.
1133 *
1134 * @returns VBox status.
1135 * @param pVCpu VMCPU handle.
1136 * @param GCPtr The address.
1137 * @param ppPdpt Receives address of pdpt
1138 * @param ppPD Receives address of page directory
1139 */
1140int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1141{
1142 PPGMCPU pPGM = &pVCpu->pgm.s;
1143 PVM pVM = pVCpu->CTX_SUFF(pVM);
1144 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1145 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1146 PEPTPML4 pPml4;
1147 PEPTPML4E pPml4e;
1148 PPGMPOOLPAGE pShwPage;
1149 int rc;
1150
1151 Assert(HWACCMIsNestedPagingActive(pVM));
1152
1153 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1154 Assert(pPml4);
1155
1156 /* Allocate page directory pointer table if not present. */
1157 pPml4e = &pPml4->a[iPml4];
1158 if ( !pPml4e->n.u1Present
1159 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1160 {
1161 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1162 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1163
1164 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1165 AssertRCReturn(rc, rc);
1166 }
1167 else
1168 {
1169 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1170 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1171
1172 pgmPoolCacheUsed(pPool, pShwPage);
1173 }
1174 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1175 pPml4e->u = pShwPage->Core.Key;
1176 pPml4e->n.u1Present = 1;
1177 pPml4e->n.u1Write = 1;
1178 pPml4e->n.u1Execute = 1;
1179
1180 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1181 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1182 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1183
1184 if (ppPdpt)
1185 *ppPdpt = pPdpt;
1186
1187 /* Allocate page directory if not present. */
1188 if ( !pPdpe->n.u1Present
1189 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1190 {
1191 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1192
1193 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1194 AssertRCReturn(rc, rc);
1195 }
1196 else
1197 {
1198 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1199 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1200
1201 pgmPoolCacheUsed(pPool, pShwPage);
1202 }
1203 /* The PD was cached or created; hook it up now and fill with the default value. */
1204 pPdpe->u = pShwPage->Core.Key;
1205 pPdpe->n.u1Present = 1;
1206 pPdpe->n.u1Write = 1;
1207 pPdpe->n.u1Execute = 1;
1208
1209 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1210 return VINF_SUCCESS;
1211}
1212
1213#endif /* IN_RC */
1214
1215/**
1216 * Gets effective Guest OS page information.
1217 *
1218 * When GCPtr is in a big page, the function will return as if it was a normal
1219 * 4KB page. If the need for distinguishing between big and normal page becomes
1220 * necessary at a later point, a PGMGstGetPage() will be created for that
1221 * purpose.
1222 *
1223 * @returns VBox status.
1224 * @param pVCpu VMCPU handle.
1225 * @param GCPtr Guest Context virtual address of the page.
1226 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1227 * @param pGCPhys Where to store the GC physical address of the page.
1228 * This is page aligned. The fact that the
1229 */
1230VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1231{
1232 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1233}
1234
1235
1236/**
1237 * Checks if the page is present.
1238 *
1239 * @returns true if the page is present.
1240 * @returns false if the page is not present.
1241 * @param pVCpu VMCPU handle.
1242 * @param GCPtr Address within the page.
1243 */
1244VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1245{
1246 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1247 return RT_SUCCESS(rc);
1248}
1249
1250
1251/**
1252 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1253 *
1254 * @returns VBox status.
1255 * @param pVCpu VMCPU handle.
1256 * @param GCPtr The address of the first page.
1257 * @param cb The size of the range in bytes.
1258 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1259 */
1260VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1261{
1262 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1263}
1264
1265
1266/**
1267 * Modify page flags for a range of pages in the guest's tables
1268 *
1269 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1270 *
1271 * @returns VBox status code.
1272 * @param pVCpu VMCPU handle.
1273 * @param GCPtr Virtual address of the first page in the range.
1274 * @param cb Size (in bytes) of the range to apply the modification to.
1275 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1276 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1277 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1278 */
1279VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1280{
1281 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1282
1283 /*
1284 * Validate input.
1285 */
1286 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1287 Assert(cb);
1288
1289 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1290
1291 /*
1292 * Adjust input.
1293 */
1294 cb += GCPtr & PAGE_OFFSET_MASK;
1295 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1296 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1297
1298 /*
1299 * Call worker.
1300 */
1301 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1302
1303 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1304 return rc;
1305}
1306
1307#ifdef IN_RING3
1308
1309/**
1310 * Performs the lazy mapping of the 32-bit guest PD.
1311 *
1312 * @returns Pointer to the mapping.
1313 * @param pPGM The PGM instance data.
1314 */
1315PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1316{
1317 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1318 PVM pVM = PGMCPU2VM(pPGM);
1319 pgmLock(pVM);
1320
1321 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1322 AssertReturn(pPage, NULL);
1323
1324 RTHCPTR HCPtrGuestCR3;
1325 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1326 AssertRCReturn(rc, NULL);
1327
1328 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1329# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1330 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1331# endif
1332
1333 pgmUnlock(pVM);
1334 return pPGM->CTX_SUFF(pGst32BitPd);
1335}
1336
1337
1338/**
1339 * Performs the lazy mapping of the PAE guest PDPT.
1340 *
1341 * @returns Pointer to the mapping.
1342 * @param pPGM The PGM instance data.
1343 */
1344PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1345{
1346 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1347 PVM pVM = PGMCPU2VM(pPGM);
1348 pgmLock(pVM);
1349
1350 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1351 AssertReturn(pPage, NULL);
1352
1353 RTHCPTR HCPtrGuestCR3;
1354 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3);
1355 AssertRCReturn(rc, NULL);
1356
1357 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1358# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1359 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1360# endif
1361
1362 pgmUnlock(pVM);
1363 return pPGM->CTX_SUFF(pGstPaePdpt);
1364}
1365
1366#endif /* IN_RING3 */
1367
1368#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1369/**
1370 * Performs the lazy mapping / updating of a PAE guest PD.
1371 *
1372 * @returns Pointer to the mapping.
1373 * @param pPGM The PGM instance data.
1374 * @param iPdpt Which PD entry to map (0..3).
1375 */
1376PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1377{
1378 PVM pVM = PGMCPU2VM(pPGM);
1379 pgmLock(pVM);
1380
1381 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1382 Assert(pGuestPDPT);
1383 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1384 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1385 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1386
1387 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1388 if (RT_LIKELY(pPage))
1389 {
1390 int rc = VINF_SUCCESS;
1391 RTRCPTR RCPtr = NIL_RTRCPTR;
1392 RTHCPTR HCPtr = NIL_RTHCPTR;
1393#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1394 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1395 AssertRC(rc);
1396#endif
1397 if (RT_SUCCESS(rc) && fChanged)
1398 {
1399 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1400 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1401 }
1402 if (RT_SUCCESS(rc))
1403 {
1404 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1405# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1406 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1407# endif
1408 if (fChanged)
1409 {
1410 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1411 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1412 }
1413
1414 pgmUnlock(pVM);
1415 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1416 }
1417 }
1418
1419 /* Invalid page or some failure, invalidate the entry. */
1420 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1421 pPGM->apGstPaePDsR3[iPdpt] = 0;
1422# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1423 pPGM->apGstPaePDsR0[iPdpt] = 0;
1424# endif
1425 pPGM->apGstPaePDsRC[iPdpt] = 0;
1426
1427 pgmUnlock(pVM);
1428 return NULL;
1429}
1430#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1431
1432
1433#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1434/**
1435 * Performs the lazy mapping of the 32-bit guest PD.
1436 *
1437 * @returns Pointer to the mapping.
1438 * @param pPGM The PGM instance data.
1439 */
1440PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1441{
1442 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1443 PVM pVM = PGMCPU2VM(pPGM);
1444 pgmLock(pVM);
1445
1446 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1447 AssertReturn(pPage, NULL);
1448
1449 RTHCPTR HCPtrGuestCR3;
1450 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3);
1451 AssertRCReturn(rc, NULL);
1452
1453 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1454# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1455 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1456# endif
1457
1458 pgmUnlock(pVM);
1459 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1460}
1461#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1462
1463
1464/**
1465 * Gets the specified page directory pointer table entry.
1466 *
1467 * @returns PDP entry
1468 * @param pVCpu VMCPU handle.
1469 * @param iPdpt PDPT index
1470 */
1471VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1472{
1473 Assert(iPdpt <= 3);
1474 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1475}
1476
1477
1478/**
1479 * Gets the current CR3 register value for the shadow memory context.
1480 * @returns CR3 value.
1481 * @param pVCpu VMCPU handle.
1482 */
1483VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1484{
1485 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1486 AssertPtrReturn(pPoolPage, 0);
1487 return pPoolPage->Core.Key;
1488}
1489
1490
1491/**
1492 * Gets the current CR3 register value for the nested memory context.
1493 * @returns CR3 value.
1494 * @param pVCpu VMCPU handle.
1495 */
1496VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1497{
1498 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1499 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1500}
1501
1502
1503/**
1504 * Gets the current CR3 register value for the HC intermediate memory context.
1505 * @returns CR3 value.
1506 * @param pVM The VM handle.
1507 */
1508VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1509{
1510 switch (pVM->pgm.s.enmHostMode)
1511 {
1512 case SUPPAGINGMODE_32_BIT:
1513 case SUPPAGINGMODE_32_BIT_GLOBAL:
1514 return pVM->pgm.s.HCPhysInterPD;
1515
1516 case SUPPAGINGMODE_PAE:
1517 case SUPPAGINGMODE_PAE_GLOBAL:
1518 case SUPPAGINGMODE_PAE_NX:
1519 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1520 return pVM->pgm.s.HCPhysInterPaePDPT;
1521
1522 case SUPPAGINGMODE_AMD64:
1523 case SUPPAGINGMODE_AMD64_GLOBAL:
1524 case SUPPAGINGMODE_AMD64_NX:
1525 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1526 return pVM->pgm.s.HCPhysInterPaePDPT;
1527
1528 default:
1529 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1530 return ~0;
1531 }
1532}
1533
1534
1535/**
1536 * Gets the current CR3 register value for the RC intermediate memory context.
1537 * @returns CR3 value.
1538 * @param pVM The VM handle.
1539 * @param pVCpu VMCPU handle.
1540 */
1541VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1542{
1543 switch (pVCpu->pgm.s.enmShadowMode)
1544 {
1545 case PGMMODE_32_BIT:
1546 return pVM->pgm.s.HCPhysInterPD;
1547
1548 case PGMMODE_PAE:
1549 case PGMMODE_PAE_NX:
1550 return pVM->pgm.s.HCPhysInterPaePDPT;
1551
1552 case PGMMODE_AMD64:
1553 case PGMMODE_AMD64_NX:
1554 return pVM->pgm.s.HCPhysInterPaePML4;
1555
1556 case PGMMODE_EPT:
1557 case PGMMODE_NESTED:
1558 return 0; /* not relevant */
1559
1560 default:
1561 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1562 return ~0;
1563 }
1564}
1565
1566
1567/**
1568 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1569 * @returns CR3 value.
1570 * @param pVM The VM handle.
1571 */
1572VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1573{
1574 return pVM->pgm.s.HCPhysInterPD;
1575}
1576
1577
1578/**
1579 * Gets the CR3 register value for the PAE intermediate memory context.
1580 * @returns CR3 value.
1581 * @param pVM The VM handle.
1582 */
1583VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1584{
1585 return pVM->pgm.s.HCPhysInterPaePDPT;
1586}
1587
1588
1589/**
1590 * Gets the CR3 register value for the AMD64 intermediate memory context.
1591 * @returns CR3 value.
1592 * @param pVM The VM handle.
1593 */
1594VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1595{
1596 return pVM->pgm.s.HCPhysInterPaePML4;
1597}
1598
1599
1600/**
1601 * Performs and schedules necessary updates following a CR3 load or reload.
1602 *
1603 * This will normally involve mapping the guest PD or nPDPT
1604 *
1605 * @returns VBox status code.
1606 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1607 * safely be ignored and overridden since the FF will be set too then.
1608 * @param pVCpu VMCPU handle.
1609 * @param cr3 The new cr3.
1610 * @param fGlobal Indicates whether this is a global flush or not.
1611 */
1612VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1613{
1614 PVM pVM = pVCpu->CTX_SUFF(pVM);
1615
1616 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1617
1618 /*
1619 * Always flag the necessary updates; necessary for hardware acceleration
1620 */
1621 /** @todo optimize this, it shouldn't always be necessary. */
1622 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1623 if (fGlobal)
1624 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1625 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1626
1627 /*
1628 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1629 */
1630 int rc = VINF_SUCCESS;
1631 RTGCPHYS GCPhysCR3;
1632 switch (pVCpu->pgm.s.enmGuestMode)
1633 {
1634 case PGMMODE_PAE:
1635 case PGMMODE_PAE_NX:
1636 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1637 break;
1638 case PGMMODE_AMD64:
1639 case PGMMODE_AMD64_NX:
1640 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1641 break;
1642 default:
1643 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1644 break;
1645 }
1646
1647 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1648 {
1649 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1650 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1651 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1652 if (RT_LIKELY(rc == VINF_SUCCESS))
1653 {
1654 if (!pVM->pgm.s.fMappingsFixed)
1655 {
1656 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1657 }
1658 }
1659 else
1660 {
1661 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1662 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1663 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1664 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1665 if (!pVM->pgm.s.fMappingsFixed)
1666 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1667 }
1668
1669 if (fGlobal)
1670 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1671 else
1672 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1673 }
1674 else
1675 {
1676 /*
1677 * Check if we have a pending update of the CR3 monitoring.
1678 */
1679 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1680 {
1681 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1682 Assert(!pVM->pgm.s.fMappingsFixed);
1683 }
1684 if (fGlobal)
1685 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1686 else
1687 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1688 }
1689
1690 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1691 return rc;
1692}
1693
1694
1695/**
1696 * Performs and schedules necessary updates following a CR3 load or reload when
1697 * using nested or extended paging.
1698 *
1699 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1700 * TLB and triggering a SyncCR3.
1701 *
1702 * This will normally involve mapping the guest PD or nPDPT
1703 *
1704 * @returns VBox status code.
1705 * @retval VINF_SUCCESS.
1706 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1707 * requires a CR3 sync. This can safely be ignored and overridden since
1708 * the FF will be set too then.)
1709 * @param pVCpu VMCPU handle.
1710 * @param cr3 The new cr3.
1711 */
1712VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1713{
1714 PVM pVM = pVCpu->CTX_SUFF(pVM);
1715
1716 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1717
1718 /* We assume we're only called in nested paging mode. */
1719 Assert(pVM->pgm.s.fMappingsFixed);
1720 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1721 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1722
1723 /*
1724 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1725 */
1726 int rc = VINF_SUCCESS;
1727 RTGCPHYS GCPhysCR3;
1728 switch (pVCpu->pgm.s.enmGuestMode)
1729 {
1730 case PGMMODE_PAE:
1731 case PGMMODE_PAE_NX:
1732 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1733 break;
1734 case PGMMODE_AMD64:
1735 case PGMMODE_AMD64_NX:
1736 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1737 break;
1738 default:
1739 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1740 break;
1741 }
1742 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1743 {
1744 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1745 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1746 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1747 }
1748 return rc;
1749}
1750
1751
1752/**
1753 * Synchronize the paging structures.
1754 *
1755 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1756 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1757 * in several places, most importantly whenever the CR3 is loaded.
1758 *
1759 * @returns VBox status code.
1760 * @param pVCpu VMCPU handle.
1761 * @param cr0 Guest context CR0 register
1762 * @param cr3 Guest context CR3 register
1763 * @param cr4 Guest context CR4 register
1764 * @param fGlobal Including global page directories or not
1765 */
1766VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1767{
1768 PVM pVM = pVCpu->CTX_SUFF(pVM);
1769 int rc;
1770
1771#ifdef PGMPOOL_WITH_MONITORING
1772 /*
1773 * The pool may have pending stuff and even require a return to ring-3 to
1774 * clear the whole thing.
1775 */
1776 rc = pgmPoolSyncCR3(pVM);
1777 if (rc != VINF_SUCCESS)
1778 return rc;
1779#endif
1780
1781 /*
1782 * We might be called when we shouldn't.
1783 *
1784 * The mode switching will ensure that the PD is resynced
1785 * after every mode switch. So, if we find ourselves here
1786 * when in protected or real mode we can safely disable the
1787 * FF and return immediately.
1788 */
1789 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1790 {
1791 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1792 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1793 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1794 return VINF_SUCCESS;
1795 }
1796
1797 /* If global pages are not supported, then all flushes are global. */
1798 if (!(cr4 & X86_CR4_PGE))
1799 fGlobal = true;
1800 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1801 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1802
1803 /*
1804 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1805 * This should be done before SyncCR3.
1806 */
1807 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1808 {
1809 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1810
1811 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1812 RTGCPHYS GCPhysCR3;
1813 switch (pVCpu->pgm.s.enmGuestMode)
1814 {
1815 case PGMMODE_PAE:
1816 case PGMMODE_PAE_NX:
1817 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1818 break;
1819 case PGMMODE_AMD64:
1820 case PGMMODE_AMD64_NX:
1821 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1822 break;
1823 default:
1824 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1825 break;
1826 }
1827
1828 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1829 {
1830 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1831 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1832 }
1833#ifdef IN_RING3
1834 if (rc == VINF_PGM_SYNC_CR3)
1835 rc = pgmPoolSyncCR3(pVM);
1836#else
1837 if (rc == VINF_PGM_SYNC_CR3)
1838 {
1839 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1840 return rc;
1841 }
1842#endif
1843 AssertRCReturn(rc, rc);
1844 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1845 }
1846
1847 /*
1848 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1849 */
1850 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1851 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1852 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1853 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1854 if (rc == VINF_SUCCESS)
1855 {
1856 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1857 {
1858 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1859 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1860 }
1861
1862 /*
1863 * Check if we have a pending update of the CR3 monitoring.
1864 */
1865 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1866 {
1867 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1868 Assert(!pVM->pgm.s.fMappingsFixed);
1869 }
1870 }
1871
1872 /*
1873 * Now flush the CR3 (guest context).
1874 */
1875 if (rc == VINF_SUCCESS)
1876 PGM_INVL_VCPU_TLBS(pVCpu);
1877 return rc;
1878}
1879
1880
1881/**
1882 * Called whenever CR0 or CR4 in a way which may change
1883 * the paging mode.
1884 *
1885 * @returns VBox status code, with the following informational code for
1886 * VM scheduling.
1887 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1888 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1889 * (I.e. not in R3.)
1890 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1891 *
1892 * @param pVCpu VMCPU handle.
1893 * @param cr0 The new cr0.
1894 * @param cr4 The new cr4.
1895 * @param efer The new extended feature enable register.
1896 */
1897VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1898{
1899 PVM pVM = pVCpu->CTX_SUFF(pVM);
1900 PGMMODE enmGuestMode;
1901
1902 /*
1903 * Calc the new guest mode.
1904 */
1905 if (!(cr0 & X86_CR0_PE))
1906 enmGuestMode = PGMMODE_REAL;
1907 else if (!(cr0 & X86_CR0_PG))
1908 enmGuestMode = PGMMODE_PROTECTED;
1909 else if (!(cr4 & X86_CR4_PAE))
1910 enmGuestMode = PGMMODE_32_BIT;
1911 else if (!(efer & MSR_K6_EFER_LME))
1912 {
1913 if (!(efer & MSR_K6_EFER_NXE))
1914 enmGuestMode = PGMMODE_PAE;
1915 else
1916 enmGuestMode = PGMMODE_PAE_NX;
1917 }
1918 else
1919 {
1920 if (!(efer & MSR_K6_EFER_NXE))
1921 enmGuestMode = PGMMODE_AMD64;
1922 else
1923 enmGuestMode = PGMMODE_AMD64_NX;
1924 }
1925
1926 /*
1927 * Did it change?
1928 */
1929 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1930 return VINF_SUCCESS;
1931
1932 /* Flush the TLB */
1933 PGM_INVL_VCPU_TLBS(pVCpu);
1934
1935#ifdef IN_RING3
1936 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1937#else
1938 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1939 return VINF_PGM_CHANGE_MODE;
1940#endif
1941}
1942
1943
1944/**
1945 * Gets the current guest paging mode.
1946 *
1947 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1948 *
1949 * @returns The current paging mode.
1950 * @param pVCpu VMCPU handle.
1951 */
1952VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1953{
1954 return pVCpu->pgm.s.enmGuestMode;
1955}
1956
1957
1958/**
1959 * Gets the current shadow paging mode.
1960 *
1961 * @returns The current paging mode.
1962 * @param pVCpu VMCPU handle.
1963 */
1964VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
1965{
1966 return pVCpu->pgm.s.enmShadowMode;
1967}
1968
1969/**
1970 * Gets the current host paging mode.
1971 *
1972 * @returns The current paging mode.
1973 * @param pVM The VM handle.
1974 */
1975VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1976{
1977 switch (pVM->pgm.s.enmHostMode)
1978 {
1979 case SUPPAGINGMODE_32_BIT:
1980 case SUPPAGINGMODE_32_BIT_GLOBAL:
1981 return PGMMODE_32_BIT;
1982
1983 case SUPPAGINGMODE_PAE:
1984 case SUPPAGINGMODE_PAE_GLOBAL:
1985 return PGMMODE_PAE;
1986
1987 case SUPPAGINGMODE_PAE_NX:
1988 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1989 return PGMMODE_PAE_NX;
1990
1991 case SUPPAGINGMODE_AMD64:
1992 case SUPPAGINGMODE_AMD64_GLOBAL:
1993 return PGMMODE_AMD64;
1994
1995 case SUPPAGINGMODE_AMD64_NX:
1996 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1997 return PGMMODE_AMD64_NX;
1998
1999 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2000 }
2001
2002 return PGMMODE_INVALID;
2003}
2004
2005
2006/**
2007 * Get mode name.
2008 *
2009 * @returns read-only name string.
2010 * @param enmMode The mode which name is desired.
2011 */
2012VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2013{
2014 switch (enmMode)
2015 {
2016 case PGMMODE_REAL: return "Real";
2017 case PGMMODE_PROTECTED: return "Protected";
2018 case PGMMODE_32_BIT: return "32-bit";
2019 case PGMMODE_PAE: return "PAE";
2020 case PGMMODE_PAE_NX: return "PAE+NX";
2021 case PGMMODE_AMD64: return "AMD64";
2022 case PGMMODE_AMD64_NX: return "AMD64+NX";
2023 case PGMMODE_NESTED: return "Nested";
2024 case PGMMODE_EPT: return "EPT";
2025 default: return "unknown mode value";
2026 }
2027}
2028
2029
2030/**
2031 * Check if the PGM lock is currently taken.
2032 *
2033 * @returns bool locked/not locked
2034 * @param pVM The VM to operate on.
2035 */
2036VMMDECL(bool) PGMIsLocked(PVM pVM)
2037{
2038 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2039}
2040
2041
2042/**
2043 * Check if this VCPU currently owns the PGM lock.
2044 *
2045 * @returns bool owner/not owner
2046 * @param pVM The VM to operate on.
2047 */
2048VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2049{
2050 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2051}
2052
2053
2054/**
2055 * Acquire the PGM lock.
2056 *
2057 * @returns VBox status code
2058 * @param pVM The VM to operate on.
2059 */
2060int pgmLock(PVM pVM)
2061{
2062 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2063#ifdef IN_RC
2064 if (rc == VERR_SEM_BUSY)
2065 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2066#elif defined(IN_RING0)
2067 if (rc == VERR_SEM_BUSY)
2068 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2069#endif
2070 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2071 return rc;
2072}
2073
2074
2075/**
2076 * Release the PGM lock.
2077 *
2078 * @returns VBox status code
2079 * @param pVM The VM to operate on.
2080 */
2081void pgmUnlock(PVM pVM)
2082{
2083 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2084}
2085
2086#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2087
2088/**
2089 * Temporarily maps one guest page specified by GC physical address.
2090 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2091 *
2092 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2093 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2094 *
2095 * @returns VBox status.
2096 * @param pVM VM handle.
2097 * @param GCPhys GC Physical address of the page.
2098 * @param ppv Where to store the address of the mapping.
2099 */
2100VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2101{
2102 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2103
2104 /*
2105 * Get the ram range.
2106 */
2107 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2108 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2109 pRam = pRam->CTX_SUFF(pNext);
2110 if (!pRam)
2111 {
2112 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2113 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2114 }
2115
2116 /*
2117 * Pass it on to PGMDynMapHCPage.
2118 */
2119 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2120 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2121#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2122 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2123#else
2124 PGMDynMapHCPage(pVM, HCPhys, ppv);
2125#endif
2126 return VINF_SUCCESS;
2127}
2128
2129
2130/**
2131 * Temporarily maps one guest page specified by unaligned GC physical address.
2132 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2133 *
2134 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2135 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2136 *
2137 * The caller is aware that only the speicifed page is mapped and that really bad things
2138 * will happen if writing beyond the page!
2139 *
2140 * @returns VBox status.
2141 * @param pVM VM handle.
2142 * @param GCPhys GC Physical address within the page to be mapped.
2143 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2144 */
2145VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2146{
2147 /*
2148 * Get the ram range.
2149 */
2150 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2151 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2152 pRam = pRam->CTX_SUFF(pNext);
2153 if (!pRam)
2154 {
2155 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2156 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2157 }
2158
2159 /*
2160 * Pass it on to PGMDynMapHCPage.
2161 */
2162 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2163#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2164 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2165#else
2166 PGMDynMapHCPage(pVM, HCPhys, ppv);
2167#endif
2168 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2169 return VINF_SUCCESS;
2170}
2171
2172# ifdef IN_RC
2173
2174/**
2175 * Temporarily maps one host page specified by HC physical address.
2176 *
2177 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2178 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2179 *
2180 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2181 * @param pVM VM handle.
2182 * @param HCPhys HC Physical address of the page.
2183 * @param ppv Where to store the address of the mapping. This is the
2184 * address of the PAGE not the exact address corresponding
2185 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2186 * page offset.
2187 */
2188VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2189{
2190 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2191
2192 /*
2193 * Check the cache.
2194 */
2195 register unsigned iCache;
2196 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2197 {
2198 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2199 {
2200 { 0, 9, 10, 11, 12, 13, 14, 15},
2201 { 0, 1, 10, 11, 12, 13, 14, 15},
2202 { 0, 1, 2, 11, 12, 13, 14, 15},
2203 { 0, 1, 2, 3, 12, 13, 14, 15},
2204 { 0, 1, 2, 3, 4, 13, 14, 15},
2205 { 0, 1, 2, 3, 4, 5, 14, 15},
2206 { 0, 1, 2, 3, 4, 5, 6, 15},
2207 { 0, 1, 2, 3, 4, 5, 6, 7},
2208 { 8, 1, 2, 3, 4, 5, 6, 7},
2209 { 8, 9, 2, 3, 4, 5, 6, 7},
2210 { 8, 9, 10, 3, 4, 5, 6, 7},
2211 { 8, 9, 10, 11, 4, 5, 6, 7},
2212 { 8, 9, 10, 11, 12, 5, 6, 7},
2213 { 8, 9, 10, 11, 12, 13, 6, 7},
2214 { 8, 9, 10, 11, 12, 13, 14, 7},
2215 { 8, 9, 10, 11, 12, 13, 14, 15},
2216 };
2217 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2218 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2219
2220 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2221 {
2222 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2223
2224 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2225 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2226 {
2227 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2228 *ppv = pv;
2229 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2230 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2231 return VINF_SUCCESS;
2232 }
2233 else
2234 LogFlow(("Out of sync entry %d\n", iPage));
2235 }
2236 }
2237 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2238 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2239 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2240
2241 /*
2242 * Update the page tables.
2243 */
2244 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2245 unsigned i;
2246 for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
2247 {
2248 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2249 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2250 break;
2251 iPage++;
2252 }
2253 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2254
2255 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2256 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2257 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2258 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2259
2260 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2261 *ppv = pv;
2262 ASMInvalidatePage(pv);
2263 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2264 return VINF_SUCCESS;
2265}
2266
2267
2268/**
2269 * Temporarily lock a dynamic page to prevent it from being reused.
2270 *
2271 * @param pVM VM handle.
2272 * @param GCPage GC address of page
2273 */
2274VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2275{
2276 unsigned iPage;
2277
2278 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2279 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2280 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2281 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2282}
2283
2284
2285/**
2286 * Unlock a dynamic page
2287 *
2288 * @param pVM VM handle.
2289 * @param GCPage GC address of page
2290 */
2291VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2292{
2293 unsigned iPage;
2294
2295 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2296 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2297
2298 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2299 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2300 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2301 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2302 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2303}
2304
2305
2306# ifdef VBOX_STRICT
2307/**
2308 * Check for lock leaks.
2309 *
2310 * @param pVM VM handle.
2311 */
2312VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2313{
2314 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2315 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2316}
2317# endif /* VBOX_STRICT */
2318
2319# endif /* IN_RC */
2320#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2321
2322#if !defined(IN_R0) || defined(LOG_ENABLED)
2323
2324/** Format handler for PGMPAGE.
2325 * @copydoc FNRTSTRFORMATTYPE */
2326static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2327 const char *pszType, void const *pvValue,
2328 int cchWidth, int cchPrecision, unsigned fFlags,
2329 void *pvUser)
2330{
2331 size_t cch;
2332 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2333 if (VALID_PTR(pPage))
2334 {
2335 char szTmp[64+80];
2336
2337 cch = 0;
2338
2339 /* The single char state stuff. */
2340 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2341 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2342
2343#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2344 if (IS_PART_INCLUDED(5))
2345 {
2346 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2347 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2348 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2349 }
2350
2351 /* The type. */
2352 if (IS_PART_INCLUDED(4))
2353 {
2354 szTmp[cch++] = ':';
2355 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2356 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2357 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2358 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2359 }
2360
2361 /* The numbers. */
2362 if (IS_PART_INCLUDED(3))
2363 {
2364 szTmp[cch++] = ':';
2365 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2366 }
2367
2368 if (IS_PART_INCLUDED(2))
2369 {
2370 szTmp[cch++] = ':';
2371 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2372 }
2373
2374 if (IS_PART_INCLUDED(6))
2375 {
2376 szTmp[cch++] = ':';
2377 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2378 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2379 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2380 }
2381#undef IS_PART_INCLUDED
2382
2383 cch = pfnOutput(pvArgOutput, szTmp, cch);
2384 }
2385 else
2386 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2387 return cch;
2388}
2389
2390
2391/** Format handler for PGMRAMRANGE.
2392 * @copydoc FNRTSTRFORMATTYPE */
2393static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2394 const char *pszType, void const *pvValue,
2395 int cchWidth, int cchPrecision, unsigned fFlags,
2396 void *pvUser)
2397{
2398 size_t cch;
2399 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2400 if (VALID_PTR(pRam))
2401 {
2402 char szTmp[80];
2403 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2404 cch = pfnOutput(pvArgOutput, szTmp, cch);
2405 }
2406 else
2407 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2408 return cch;
2409}
2410
2411/** Format type andlers to be registered/deregistered. */
2412static const struct
2413{
2414 char szType[24];
2415 PFNRTSTRFORMATTYPE pfnHandler;
2416} g_aPgmFormatTypes[] =
2417{
2418 { "pgmpage", pgmFormatTypeHandlerPage },
2419 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2420};
2421
2422#endif /* !IN_R0 || LOG_ENABLED */
2423
2424
2425/**
2426 * Registers the global string format types.
2427 *
2428 * This should be called at module load time or in some other manner that ensure
2429 * that it's called exactly one time.
2430 *
2431 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2432 */
2433VMMDECL(int) PGMRegisterStringFormatTypes(void)
2434{
2435#if !defined(IN_R0) || defined(LOG_ENABLED)
2436 int rc = VINF_SUCCESS;
2437 unsigned i;
2438 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2439 {
2440 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2441# ifdef IN_RING0
2442 if (rc == VERR_ALREADY_EXISTS)
2443 {
2444 /* in case of cleanup failure in ring-0 */
2445 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2446 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2447 }
2448# endif
2449 }
2450 if (RT_FAILURE(rc))
2451 while (i-- > 0)
2452 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2453
2454 return rc;
2455#else
2456 return VINF_SUCCESS;
2457#endif
2458}
2459
2460
2461/**
2462 * Deregisters the global string format types.
2463 *
2464 * This should be called at module unload time or in some other manner that
2465 * ensure that it's called exactly one time.
2466 */
2467VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2468{
2469#if !defined(IN_R0) || defined(LOG_ENABLED)
2470 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2471 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2472#endif
2473}
2474
2475#ifdef VBOX_STRICT
2476
2477/**
2478 * Asserts that there are no mapping conflicts.
2479 *
2480 * @returns Number of conflicts.
2481 * @param pVM The VM Handle.
2482 */
2483VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2484{
2485 unsigned cErrors = 0;
2486
2487 /* Only applies to raw mode -> 1 VPCU */
2488 Assert(pVM->cCPUs == 1);
2489 PVMCPU pVCpu = &pVM->aCpus[0];
2490
2491 /*
2492 * Check for mapping conflicts.
2493 */
2494 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2495 pMapping;
2496 pMapping = pMapping->CTX_SUFF(pNext))
2497 {
2498 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2499 for (RTGCPTR GCPtr = pMapping->GCPtr;
2500 GCPtr <= pMapping->GCPtrLast;
2501 GCPtr += PAGE_SIZE)
2502 {
2503 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2504 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2505 {
2506 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2507 cErrors++;
2508 break;
2509 }
2510 }
2511 }
2512
2513 return cErrors;
2514}
2515
2516
2517/**
2518 * Asserts that everything related to the guest CR3 is correctly shadowed.
2519 *
2520 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2521 * and assert the correctness of the guest CR3 mapping before asserting that the
2522 * shadow page tables is in sync with the guest page tables.
2523 *
2524 * @returns Number of conflicts.
2525 * @param pVM The VM Handle.
2526 * @param pVCpu VMCPU handle.
2527 * @param cr3 The current guest CR3 register value.
2528 * @param cr4 The current guest CR4 register value.
2529 */
2530VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2531{
2532 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2533 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2534 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2535 return cErrors;
2536}
2537
2538#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette