VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 16665

Last change on this file since 16665 was 16626, checked in by vboxsync, 16 years ago

VBOX_WITH_PGMPOOL_PAGING_ONLY: Deal with split PDs in pae/32 bit case.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 72.6 KB
Line 
1/* $Id: PGMAll.cpp 16626 2009-02-10 12:41:48Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
74DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
76DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
77DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
78#endif
79
80/*
81 * Shadow - 32-bit mode
82 */
83#define PGM_SHW_TYPE PGM_TYPE_32BIT
84#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
85#include "PGMAllShw.h"
86
87/* Guest - real mode */
88#define PGM_GST_TYPE PGM_TYPE_REAL
89#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
92#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
93#include "PGMAllGst.h"
94#include "PGMAllBth.h"
95#undef BTH_PGMPOOLKIND_PT_FOR_PT
96#undef BTH_PGMPOOLKIND_ROOT
97#undef PGM_BTH_NAME
98#undef PGM_GST_TYPE
99#undef PGM_GST_NAME
100
101/* Guest - protected mode */
102#define PGM_GST_TYPE PGM_TYPE_PROT
103#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
104#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
105#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
106#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMAllGst.h"
123#include "PGMAllBth.h"
124#undef BTH_PGMPOOLKIND_PT_FOR_BIG
125#undef BTH_PGMPOOLKIND_PT_FOR_PT
126#undef BTH_PGMPOOLKIND_ROOT
127#undef PGM_BTH_NAME
128#undef PGM_GST_TYPE
129#undef PGM_GST_NAME
130
131#undef PGM_SHW_TYPE
132#undef PGM_SHW_NAME
133
134
135/*
136 * Shadow - PAE mode
137 */
138#define PGM_SHW_TYPE PGM_TYPE_PAE
139#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#include "PGMAllShw.h"
142
143/* Guest - real mode */
144#define PGM_GST_TYPE PGM_TYPE_REAL
145#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
146#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
147#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
148#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - 32-bit mode */
170#define PGM_GST_TYPE PGM_TYPE_32BIT
171#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
174#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_BIG
178#undef BTH_PGMPOOLKIND_PT_FOR_PT
179#undef BTH_PGMPOOLKIND_ROOT
180#undef PGM_BTH_NAME
181#undef PGM_GST_TYPE
182#undef PGM_GST_NAME
183
184
185/* Guest - PAE mode */
186#define PGM_GST_TYPE PGM_TYPE_PAE
187#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
188#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
189#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
190#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
191#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
206/*
207 * Shadow - AMD64 mode
208 */
209# define PGM_SHW_TYPE PGM_TYPE_AMD64
210# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
211# include "PGMAllShw.h"
212
213/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
214# define PGM_GST_TYPE PGM_TYPE_PROT
215# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219# include "PGMAllBth.h"
220# undef BTH_PGMPOOLKIND_PT_FOR_PT
221# undef BTH_PGMPOOLKIND_ROOT
222# undef PGM_BTH_NAME
223# undef PGM_GST_TYPE
224# undef PGM_GST_NAME
225
226# ifdef VBOX_WITH_64_BITS_GUESTS
227/* Guest - AMD64 mode */
228# define PGM_GST_TYPE PGM_TYPE_AMD64
229# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
230# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
231# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
232# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
233# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
234# include "PGMAllGst.h"
235# include "PGMAllBth.h"
236# undef BTH_PGMPOOLKIND_PT_FOR_BIG
237# undef BTH_PGMPOOLKIND_PT_FOR_PT
238# undef BTH_PGMPOOLKIND_ROOT
239# undef PGM_BTH_NAME
240# undef PGM_GST_TYPE
241# undef PGM_GST_NAME
242# endif /* VBOX_WITH_64_BITS_GUESTS */
243
244# undef PGM_SHW_TYPE
245# undef PGM_SHW_NAME
246
247
248/*
249 * Shadow - Nested paging mode
250 */
251# define PGM_SHW_TYPE PGM_TYPE_NESTED
252# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
253# include "PGMAllShw.h"
254
255/* Guest - real mode */
256# define PGM_GST_TYPE PGM_TYPE_REAL
257# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
258# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
259# include "PGMAllBth.h"
260# undef PGM_BTH_NAME
261# undef PGM_GST_TYPE
262# undef PGM_GST_NAME
263
264/* Guest - protected mode */
265# define PGM_GST_TYPE PGM_TYPE_PROT
266# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
267# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
268# include "PGMAllBth.h"
269# undef PGM_BTH_NAME
270# undef PGM_GST_TYPE
271# undef PGM_GST_NAME
272
273/* Guest - 32-bit mode */
274# define PGM_GST_TYPE PGM_TYPE_32BIT
275# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
276# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - PAE mode */
283# define PGM_GST_TYPE PGM_TYPE_PAE
284# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290
291# ifdef VBOX_WITH_64_BITS_GUESTS
292/* Guest - AMD64 mode */
293# define PGM_GST_TYPE PGM_TYPE_AMD64
294# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
296# include "PGMAllBth.h"
297# undef PGM_BTH_NAME
298# undef PGM_GST_TYPE
299# undef PGM_GST_NAME
300# endif /* VBOX_WITH_64_BITS_GUESTS */
301
302# undef PGM_SHW_TYPE
303# undef PGM_SHW_NAME
304
305
306/*
307 * Shadow - EPT
308 */
309# define PGM_SHW_TYPE PGM_TYPE_EPT
310# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
311# include "PGMAllShw.h"
312
313/* Guest - real mode */
314# define PGM_GST_TYPE PGM_TYPE_REAL
315# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
316# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
317# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
318# include "PGMAllBth.h"
319# undef BTH_PGMPOOLKIND_PT_FOR_PT
320# undef PGM_BTH_NAME
321# undef PGM_GST_TYPE
322# undef PGM_GST_NAME
323
324/* Guest - protected mode */
325# define PGM_GST_TYPE PGM_TYPE_PROT
326# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
327# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
328# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - 32-bit mode */
336# define PGM_GST_TYPE PGM_TYPE_32BIT
337# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMAllBth.h"
341# undef BTH_PGMPOOLKIND_PT_FOR_PT
342# undef PGM_BTH_NAME
343# undef PGM_GST_TYPE
344# undef PGM_GST_NAME
345
346/* Guest - PAE mode */
347# define PGM_GST_TYPE PGM_TYPE_PAE
348# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
349# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
350# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
351# include "PGMAllBth.h"
352# undef BTH_PGMPOOLKIND_PT_FOR_PT
353# undef PGM_BTH_NAME
354# undef PGM_GST_TYPE
355# undef PGM_GST_NAME
356
357# ifdef VBOX_WITH_64_BITS_GUESTS
358/* Guest - AMD64 mode */
359# define PGM_GST_TYPE PGM_TYPE_AMD64
360# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
361# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
362# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
363# include "PGMAllBth.h"
364# undef BTH_PGMPOOLKIND_PT_FOR_PT
365# undef PGM_BTH_NAME
366# undef PGM_GST_TYPE
367# undef PGM_GST_NAME
368# endif /* VBOX_WITH_64_BITS_GUESTS */
369
370# undef PGM_SHW_TYPE
371# undef PGM_SHW_NAME
372
373#endif /* !IN_RC */
374
375
376#ifndef IN_RING3
377/**
378 * #PF Handler.
379 *
380 * @returns VBox status code (appropriate for trap handling and GC return).
381 * @param pVM VM Handle.
382 * @param uErr The trap error code.
383 * @param pRegFrame Trap register frame.
384 * @param pvFault The fault address.
385 */
386VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
387{
388 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
389 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
390 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
391
392
393#ifdef VBOX_WITH_STATISTICS
394 /*
395 * Error code stats.
396 */
397 if (uErr & X86_TRAP_PF_US)
398 {
399 if (!(uErr & X86_TRAP_PF_P))
400 {
401 if (uErr & X86_TRAP_PF_RW)
402 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
403 else
404 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
405 }
406 else if (uErr & X86_TRAP_PF_RW)
407 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
408 else if (uErr & X86_TRAP_PF_RSVD)
409 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
410 else if (uErr & X86_TRAP_PF_ID)
411 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
412 else
413 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
414 }
415 else
416 { /* Supervisor */
417 if (!(uErr & X86_TRAP_PF_P))
418 {
419 if (uErr & X86_TRAP_PF_RW)
420 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
421 else
422 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
423 }
424 else if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
426 else if (uErr & X86_TRAP_PF_ID)
427 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
428 else if (uErr & X86_TRAP_PF_RSVD)
429 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
430 }
431#endif /* VBOX_WITH_STATISTICS */
432
433 /*
434 * Call the worker.
435 */
436 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
437 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
438 rc = VINF_SUCCESS;
439 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
440 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
441 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
442 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
443 return rc;
444}
445#endif /* !IN_RING3 */
446
447
448/**
449 * Prefetch a page
450 *
451 * Typically used to sync commonly used pages before entering raw mode
452 * after a CR3 reload.
453 *
454 * @returns VBox status code suitable for scheduling.
455 * @retval VINF_SUCCESS on success.
456 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
457 * @param pVM VM handle.
458 * @param GCPtrPage Page to invalidate.
459 */
460VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
461{
462 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
463 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
464 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
465 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
466 return rc;
467}
468
469
470/**
471 * Gets the mapping corresponding to the specified address (if any).
472 *
473 * @returns Pointer to the mapping.
474 * @returns NULL if not
475 *
476 * @param pVM The virtual machine.
477 * @param GCPtr The guest context pointer.
478 */
479PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
480{
481 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
482 while (pMapping)
483 {
484 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
485 break;
486 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
487 return pMapping;
488 pMapping = pMapping->CTX_SUFF(pNext);
489 }
490 return NULL;
491}
492
493
494/**
495 * Verifies a range of pages for read or write access
496 *
497 * Only checks the guest's page tables
498 *
499 * @returns VBox status code.
500 * @param pVM VM handle.
501 * @param Addr Guest virtual address to check
502 * @param cbSize Access size
503 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
504 * @remarks Current not in use.
505 */
506VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
507{
508 /*
509 * Validate input.
510 */
511 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
512 {
513 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
514 return VERR_INVALID_PARAMETER;
515 }
516
517 uint64_t fPage;
518 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
519 if (RT_FAILURE(rc))
520 {
521 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
522 return VINF_EM_RAW_GUEST_TRAP;
523 }
524
525 /*
526 * Check if the access would cause a page fault
527 *
528 * Note that hypervisor page directories are not present in the guest's tables, so this check
529 * is sufficient.
530 */
531 bool fWrite = !!(fAccess & X86_PTE_RW);
532 bool fUser = !!(fAccess & X86_PTE_US);
533 if ( !(fPage & X86_PTE_P)
534 || (fWrite && !(fPage & X86_PTE_RW))
535 || (fUser && !(fPage & X86_PTE_US)) )
536 {
537 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
538 return VINF_EM_RAW_GUEST_TRAP;
539 }
540 if ( RT_SUCCESS(rc)
541 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
542 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
543 return rc;
544}
545
546
547/**
548 * Verifies a range of pages for read or write access
549 *
550 * Supports handling of pages marked for dirty bit tracking and CSAM
551 *
552 * @returns VBox status code.
553 * @param pVM VM handle.
554 * @param Addr Guest virtual address to check
555 * @param cbSize Access size
556 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
557 */
558VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
559{
560 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
561
562 /*
563 * Get going.
564 */
565 uint64_t fPageGst;
566 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
567 if (RT_FAILURE(rc))
568 {
569 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
570 return VINF_EM_RAW_GUEST_TRAP;
571 }
572
573 /*
574 * Check if the access would cause a page fault
575 *
576 * Note that hypervisor page directories are not present in the guest's tables, so this check
577 * is sufficient.
578 */
579 const bool fWrite = !!(fAccess & X86_PTE_RW);
580 const bool fUser = !!(fAccess & X86_PTE_US);
581 if ( !(fPageGst & X86_PTE_P)
582 || (fWrite && !(fPageGst & X86_PTE_RW))
583 || (fUser && !(fPageGst & X86_PTE_US)) )
584 {
585 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
586 return VINF_EM_RAW_GUEST_TRAP;
587 }
588
589 if (!HWACCMIsNestedPagingActive(pVM))
590 {
591 /*
592 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
593 */
594 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
595 if ( rc == VERR_PAGE_NOT_PRESENT
596 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
597 {
598 /*
599 * Page is not present in our page tables.
600 * Try to sync it!
601 */
602 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
603 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
604 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
605 if (rc != VINF_SUCCESS)
606 return rc;
607 }
608 else
609 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
610 }
611
612#if 0 /* def VBOX_STRICT; triggers too often now */
613 /*
614 * This check is a bit paranoid, but useful.
615 */
616 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
617 uint64_t fPageShw;
618 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
619 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
620 || (fWrite && !(fPageShw & X86_PTE_RW))
621 || (fUser && !(fPageShw & X86_PTE_US)) )
622 {
623 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
624 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
625 return VINF_EM_RAW_GUEST_TRAP;
626 }
627#endif
628
629 if ( RT_SUCCESS(rc)
630 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
631 || Addr + cbSize < Addr))
632 {
633 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
634 for (;;)
635 {
636 Addr += PAGE_SIZE;
637 if (cbSize > PAGE_SIZE)
638 cbSize -= PAGE_SIZE;
639 else
640 cbSize = 1;
641 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
642 if (rc != VINF_SUCCESS)
643 break;
644 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
645 break;
646 }
647 }
648 return rc;
649}
650
651
652/**
653 * Emulation of the invlpg instruction (HC only actually).
654 *
655 * @returns VBox status code, special care required.
656 * @retval VINF_PGM_SYNC_CR3 - handled.
657 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
658 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
659 *
660 * @param pVM VM handle.
661 * @param GCPtrPage Page to invalidate.
662 *
663 * @remark ASSUMES the page table entry or page directory is valid. Fairly
664 * safe, but there could be edge cases!
665 *
666 * @todo Flush page or page directory only if necessary!
667 */
668VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
669{
670 int rc;
671 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
672
673#ifndef IN_RING3
674 /*
675 * Notify the recompiler so it can record this instruction.
676 * Failure happens when it's out of space. We'll return to HC in that case.
677 */
678 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
679 if (rc != VINF_SUCCESS)
680 return rc;
681#endif /* !IN_RING3 */
682
683
684#ifdef IN_RC
685 /*
686 * Check for conflicts and pending CR3 monitoring updates.
687 */
688 if (!pVM->pgm.s.fMappingsFixed)
689 {
690 if ( pgmGetMapping(pVM, GCPtrPage)
691 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
692 {
693 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
694 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
695 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
696 return VINF_PGM_SYNC_CR3;
697 }
698
699 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
700 {
701 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
702 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
703 return VINF_EM_RAW_EMULATE_INSTR;
704 }
705 }
706#endif /* IN_RC */
707
708 /*
709 * Call paging mode specific worker.
710 */
711 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
712 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
713 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
714
715#ifdef IN_RING3
716 /*
717 * Check if we have a pending update of the CR3 monitoring.
718 */
719 if ( RT_SUCCESS(rc)
720 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
721 {
722 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
723 Assert(!pVM->pgm.s.fMappingsFixed);
724 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
725#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
726 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
727#endif
728 }
729
730 /*
731 * Inform CSAM about the flush
732 *
733 * Note: This is to check if monitored pages have been changed; when we implement
734 * callbacks for virtual handlers, this is no longer required.
735 */
736 CSAMR3FlushPage(pVM, GCPtrPage);
737#endif /* IN_RING3 */
738 return rc;
739}
740
741
742/**
743 * Executes an instruction using the interpreter.
744 *
745 * @returns VBox status code (appropriate for trap handling and GC return).
746 * @param pVM VM handle.
747 * @param pRegFrame Register frame.
748 * @param pvFault Fault address.
749 */
750VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
751{
752 uint32_t cb;
753 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
754 if (rc == VERR_EM_INTERPRETER)
755 rc = VINF_EM_RAW_EMULATE_INSTR;
756 if (rc != VINF_SUCCESS)
757 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
758 return rc;
759}
760
761
762/**
763 * Gets effective page information (from the VMM page directory).
764 *
765 * @returns VBox status.
766 * @param pVM VM Handle.
767 * @param GCPtr Guest Context virtual address of the page.
768 * @param pfFlags Where to store the flags. These are X86_PTE_*.
769 * @param pHCPhys Where to store the HC physical address of the page.
770 * This is page aligned.
771 * @remark You should use PGMMapGetPage() for pages in a mapping.
772 */
773VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
774{
775 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
776}
777
778
779/**
780 * Sets (replaces) the page flags for a range of pages in the shadow context.
781 *
782 * @returns VBox status.
783 * @param pVM VM handle.
784 * @param GCPtr The address of the first page.
785 * @param cb The size of the range in bytes.
786 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
787 * @remark You must use PGMMapSetPage() for pages in a mapping.
788 */
789VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
790{
791 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
792}
793
794
795/**
796 * Modify page flags for a range of pages in the shadow context.
797 *
798 * The existing flags are ANDed with the fMask and ORed with the fFlags.
799 *
800 * @returns VBox status code.
801 * @param pVM VM handle.
802 * @param GCPtr Virtual address of the first page in the range.
803 * @param cb Size (in bytes) of the range to apply the modification to.
804 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
805 * @param fMask The AND mask - page flags X86_PTE_*.
806 * Be very CAREFUL when ~'ing constants which could be 32-bit!
807 * @remark You must use PGMMapModifyPage() for pages in a mapping.
808 */
809VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
810{
811 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
812 Assert(cb);
813
814 /*
815 * Align the input.
816 */
817 cb += GCPtr & PAGE_OFFSET_MASK;
818 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
819 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
820
821 /*
822 * Call worker.
823 */
824 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
825}
826
827
828/**
829 * Gets the SHADOW page directory pointer for the specified address.
830 *
831 * @returns VBox status.
832 * @param pVM VM handle.
833 * @param GCPtr The address.
834 * @param ppPdpt Receives address of pdpt
835 * @param ppPD Receives address of page directory
836 * @remarks Unused.
837 */
838DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
839{
840 PPGM pPGM = &pVM->pgm.s;
841 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
842 PPGMPOOLPAGE pShwPage;
843
844 Assert(!HWACCMIsNestedPagingActive(pVM));
845
846 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
847 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
848 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
849
850 *ppPdpt = pPdpt;
851 if (!pPdpe->n.u1Present)
852 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
853
854 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
855 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
856
857 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
858 return VINF_SUCCESS;
859}
860
861#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
862
863/**
864 * Gets the shadow page directory for the specified address, PAE.
865 *
866 * @returns Pointer to the shadow PD.
867 * @param pVM VM handle.
868 * @param GCPtr The address.
869 * @param pGstPdpe Guest PDPT entry
870 * @param ppPD Receives address of page directory
871 */
872DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
873{
874 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
875 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
876 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
877 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
878 PPGMPOOLPAGE pShwPage;
879 int rc;
880
881 /* Allocate page directory if not present. */
882 if ( !pPdpe->n.u1Present
883 && !(pPdpe->u & X86_PDPE_PG_MASK))
884 {
885 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
886 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
887 RTGCPTR64 GCPdPt;
888 PGMPOOLKIND enmKind;
889
890 if (fNestedPaging || !fPaging)
891 {
892 /* AMD-V nested paging or real/protected mode without paging */
893 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
894 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
895 }
896 else
897 {
898 Assert(pGstPdpe);
899
900 if (CPUMGetGuestCR4(pVM) & X86_CR4_PAE)
901 {
902 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
903 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
904 }
905 else
906 {
907 GCPdPt = CPUMGetGuestCR3(pVM);
908 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
909 }
910 }
911
912 /* Create a reference back to the PDPT by using the index in its shadow page. */
913 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
914 if (rc == VERR_PGM_POOL_FLUSHED)
915 {
916 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
917 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
918 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
919 return VINF_PGM_SYNC_CR3;
920 }
921 AssertRCReturn(rc, rc);
922 }
923 else
924 {
925 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
926 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
927 }
928 /* The PD was cached or created; hook it up now. */
929 pPdpe->u |= pShwPage->Core.Key
930 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
931
932 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
933 return VINF_SUCCESS;
934}
935
936/**
937 * Gets the pointer to the shadow page directory entry for an address, PAE.
938 *
939 * @returns Pointer to the PDE.
940 * @param pPGM Pointer to the PGM instance data.
941 * @param GCPtr The address.
942 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
943 */
944DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
945{
946 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
947 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
948 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
949 if (!pPdpt->a[iPdPt].n.u1Present)
950 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
951
952 /* Fetch the pgm pool shadow descriptor. */
953 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
954 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
955
956 *ppShwPde = pShwPde;
957 return VINF_SUCCESS;
958}
959#endif
960
961#ifndef IN_RC
962
963/**
964 * Syncs the SHADOW page directory pointer for the specified address.
965 *
966 * Allocates backing pages in case the PDPT or PML4 entry is missing.
967 *
968 * The caller is responsible for making sure the guest has a valid PD before
969 * calling this function.
970 *
971 * @returns VBox status.
972 * @param pVM VM handle.
973 * @param GCPtr The address.
974 * @param pGstPml4e Guest PML4 entry
975 * @param pGstPdpe Guest PDPT entry
976 * @param ppPD Receives address of page directory
977 */
978DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
979{
980 PPGM pPGM = &pVM->pgm.s;
981 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
982 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
983 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
984 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
985#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
986 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
987#endif
988 PPGMPOOLPAGE pShwPage;
989 int rc;
990
991 /* Allocate page directory pointer table if not present. */
992 if ( !pPml4e->n.u1Present
993 && !(pPml4e->u & X86_PML4E_PG_MASK))
994 {
995#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
996 RTGCPTR64 GCPml4;
997 PGMPOOLKIND enmKind;
998
999 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1000
1001 if (fNestedPaging || !fPaging)
1002 {
1003 /* AMD-V nested paging or real/protected mode without paging */
1004 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1005 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1006 }
1007 else
1008 {
1009 Assert(pGstPml4e && pGstPdpe);
1010
1011 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1012 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1013 }
1014
1015 /* Create a reference back to the PDPT by using the index in its shadow page. */
1016 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1017#else
1018 if (!fNestedPaging)
1019 {
1020 Assert(pGstPml4e && pGstPdpe);
1021 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1022
1023 rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
1024 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1025 }
1026 else
1027 {
1028 /* AMD-V nested paging. (Intel EPT never comes here) */
1029 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1030 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
1031 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1032 }
1033#endif
1034 if (rc == VERR_PGM_POOL_FLUSHED)
1035 {
1036 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1037 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1038 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1039 return VINF_PGM_SYNC_CR3;
1040 }
1041 AssertRCReturn(rc, rc);
1042 }
1043 else
1044 {
1045 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1046 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1047 }
1048 /* The PDPT was cached or created; hook it up now. */
1049 pPml4e->u |= pShwPage->Core.Key
1050 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1051
1052 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1053 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1054 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1055
1056 /* Allocate page directory if not present. */
1057 if ( !pPdpe->n.u1Present
1058 && !(pPdpe->u & X86_PDPE_PG_MASK))
1059 {
1060#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1061 RTGCPTR64 GCPdPt;
1062 PGMPOOLKIND enmKind;
1063
1064 if (fNestedPaging || !fPaging)
1065 {
1066 /* AMD-V nested paging or real/protected mode without paging */
1067 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1068 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1069 }
1070 else
1071 {
1072 Assert(pGstPdpe);
1073
1074 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1075 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1076 }
1077
1078 /* Create a reference back to the PDPT by using the index in its shadow page. */
1079 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
1080#else
1081 if (!fNestedPaging)
1082 {
1083 Assert(pGstPml4e && pGstPdpe);
1084 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
1085 /* Create a reference back to the PDPT by using the index in its shadow page. */
1086 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
1087 }
1088 else
1089 {
1090 /* AMD-V nested paging. (Intel EPT never comes here) */
1091 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1092
1093 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1094 }
1095#endif
1096 if (rc == VERR_PGM_POOL_FLUSHED)
1097 {
1098 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1099 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1100 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1101 return VINF_PGM_SYNC_CR3;
1102 }
1103 AssertRCReturn(rc, rc);
1104 }
1105 else
1106 {
1107 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1108 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1109 }
1110 /* The PD was cached or created; hook it up now. */
1111 pPdpe->u |= pShwPage->Core.Key
1112 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1113
1114 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1115 return VINF_SUCCESS;
1116}
1117
1118
1119/**
1120 * Gets the SHADOW page directory pointer for the specified address (long mode).
1121 *
1122 * @returns VBox status.
1123 * @param pVM VM handle.
1124 * @param GCPtr The address.
1125 * @param ppPdpt Receives address of pdpt
1126 * @param ppPD Receives address of page directory
1127 */
1128DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1129{
1130 PPGM pPGM = &pVM->pgm.s;
1131 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1132 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1133 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1134 if (ppPml4e)
1135 *ppPml4e = (PX86PML4E)pPml4e;
1136 if (!pPml4e->n.u1Present)
1137 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1138
1139 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1140 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1141 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1142
1143 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1144 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1145 if (!pPdpt->a[iPdPt].n.u1Present)
1146 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1147
1148 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1149 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1150
1151 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1152 return VINF_SUCCESS;
1153}
1154
1155
1156/**
1157 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1158 * backing pages in case the PDPT or PML4 entry is missing.
1159 *
1160 * @returns VBox status.
1161 * @param pVM VM handle.
1162 * @param GCPtr The address.
1163 * @param ppPdpt Receives address of pdpt
1164 * @param ppPD Receives address of page directory
1165 */
1166DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1167{
1168 PPGM pPGM = &pVM->pgm.s;
1169 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1170 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1171 PEPTPML4 pPml4;
1172 PEPTPML4E pPml4e;
1173 PPGMPOOLPAGE pShwPage;
1174 int rc;
1175
1176 Assert(HWACCMIsNestedPagingActive(pVM));
1177
1178# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1179 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
1180 AssertRCReturn(rc, rc);
1181# else
1182 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
1183# endif
1184 Assert(pPml4);
1185
1186 /* Allocate page directory pointer table if not present. */
1187 pPml4e = &pPml4->a[iPml4];
1188 if ( !pPml4e->n.u1Present
1189 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1190 {
1191 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1192 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1193
1194#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1195 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1196#else
1197 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1198#endif
1199 if (rc == VERR_PGM_POOL_FLUSHED)
1200 {
1201 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1202 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1203 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1204 return VINF_PGM_SYNC_CR3;
1205 }
1206 AssertRCReturn(rc, rc);
1207 }
1208 else
1209 {
1210 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1211 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1212 }
1213 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1214 pPml4e->u = pShwPage->Core.Key;
1215 pPml4e->n.u1Present = 1;
1216 pPml4e->n.u1Write = 1;
1217 pPml4e->n.u1Execute = 1;
1218
1219 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1220 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1221 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1222
1223 if (ppPdpt)
1224 *ppPdpt = pPdpt;
1225
1226 /* Allocate page directory if not present. */
1227 if ( !pPdpe->n.u1Present
1228 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1229 {
1230 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1231
1232#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1233 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1234#else
1235 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1236#endif
1237 if (rc == VERR_PGM_POOL_FLUSHED)
1238 {
1239 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1240 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1241 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1242 return VINF_PGM_SYNC_CR3;
1243 }
1244 AssertRCReturn(rc, rc);
1245 }
1246 else
1247 {
1248 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1249 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1250 }
1251 /* The PD was cached or created; hook it up now and fill with the default value. */
1252 pPdpe->u = pShwPage->Core.Key;
1253 pPdpe->n.u1Present = 1;
1254 pPdpe->n.u1Write = 1;
1255 pPdpe->n.u1Execute = 1;
1256
1257 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1258 return VINF_SUCCESS;
1259}
1260
1261#endif /* IN_RC */
1262
1263/**
1264 * Gets effective Guest OS page information.
1265 *
1266 * When GCPtr is in a big page, the function will return as if it was a normal
1267 * 4KB page. If the need for distinguishing between big and normal page becomes
1268 * necessary at a later point, a PGMGstGetPage() will be created for that
1269 * purpose.
1270 *
1271 * @returns VBox status.
1272 * @param pVM VM Handle.
1273 * @param GCPtr Guest Context virtual address of the page.
1274 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1275 * @param pGCPhys Where to store the GC physical address of the page.
1276 * This is page aligned. The fact that the
1277 */
1278VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1279{
1280 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1281}
1282
1283
1284/**
1285 * Checks if the page is present.
1286 *
1287 * @returns true if the page is present.
1288 * @returns false if the page is not present.
1289 * @param pVM The VM handle.
1290 * @param GCPtr Address within the page.
1291 */
1292VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1293{
1294 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1295 return RT_SUCCESS(rc);
1296}
1297
1298
1299/**
1300 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1301 *
1302 * @returns VBox status.
1303 * @param pVM VM handle.
1304 * @param GCPtr The address of the first page.
1305 * @param cb The size of the range in bytes.
1306 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1307 */
1308VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1309{
1310 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1311}
1312
1313
1314/**
1315 * Modify page flags for a range of pages in the guest's tables
1316 *
1317 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1318 *
1319 * @returns VBox status code.
1320 * @param pVM VM handle.
1321 * @param GCPtr Virtual address of the first page in the range.
1322 * @param cb Size (in bytes) of the range to apply the modification to.
1323 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1324 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1325 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1326 */
1327VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1328{
1329 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1330
1331 /*
1332 * Validate input.
1333 */
1334 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1335 Assert(cb);
1336
1337 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1338
1339 /*
1340 * Adjust input.
1341 */
1342 cb += GCPtr & PAGE_OFFSET_MASK;
1343 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1344 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1345
1346 /*
1347 * Call worker.
1348 */
1349 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1350
1351 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1352 return rc;
1353}
1354
1355
1356/**
1357 * Gets the specified page directory pointer table entry.
1358 *
1359 * @returns PDP entry
1360 * @param pPGM Pointer to the PGM instance data.
1361 * @param iPdpt PDPT index
1362 */
1363VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1364{
1365 Assert(iPdpt <= 3);
1366 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1367}
1368
1369
1370/**
1371 * Gets the current CR3 register value for the shadow memory context.
1372 * @returns CR3 value.
1373 * @param pVM The VM handle.
1374 */
1375VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1376{
1377#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1378 return pVM->pgm.s.HCPhysShwCR3;
1379#else
1380 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1381 switch (enmShadowMode)
1382 {
1383 case PGMMODE_32_BIT:
1384 return pVM->pgm.s.HCPhysShw32BitPD;
1385
1386 case PGMMODE_PAE:
1387 case PGMMODE_PAE_NX:
1388 return pVM->pgm.s.HCPhysShwPaePdpt;
1389
1390 case PGMMODE_AMD64:
1391 case PGMMODE_AMD64_NX:
1392 return pVM->pgm.s.HCPhysShwCR3;
1393
1394 case PGMMODE_EPT:
1395 return pVM->pgm.s.HCPhysShwNestedRoot;
1396
1397 case PGMMODE_NESTED:
1398 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1399
1400 default:
1401 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1402 return ~0;
1403 }
1404#endif
1405}
1406
1407
1408/**
1409 * Gets the current CR3 register value for the nested memory context.
1410 * @returns CR3 value.
1411 * @param pVM The VM handle.
1412 */
1413VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1414{
1415#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1416 return pVM->pgm.s.HCPhysShwCR3;
1417#else
1418 switch (enmShadowMode)
1419 {
1420 case PGMMODE_32_BIT:
1421 return pVM->pgm.s.HCPhysShw32BitPD;
1422
1423 case PGMMODE_PAE:
1424 case PGMMODE_PAE_NX:
1425 return pVM->pgm.s.HCPhysShwPaePdpt;
1426
1427 case PGMMODE_AMD64:
1428 case PGMMODE_AMD64_NX:
1429 return pVM->pgm.s.HCPhysShwCR3;
1430
1431 default:
1432 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1433 return ~0;
1434 }
1435#endif
1436}
1437
1438
1439/**
1440 * Gets the current CR3 register value for the EPT paging memory context.
1441 * @returns CR3 value.
1442 * @param pVM The VM handle.
1443 */
1444VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
1445{
1446 return pVM->pgm.s.HCPhysShwNestedRoot;
1447}
1448
1449
1450/**
1451 * Gets the CR3 register value for the 32-Bit shadow memory context.
1452 * @returns CR3 value.
1453 * @param pVM The VM handle.
1454 */
1455VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1456{
1457#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1458 return pVM->pgm.s.HCPhysShwCR3;
1459#else
1460 return pVM->pgm.s.HCPhysShw32BitPD;
1461#endif
1462}
1463
1464
1465/**
1466 * Gets the CR3 register value for the PAE shadow memory context.
1467 * @returns CR3 value.
1468 * @param pVM The VM handle.
1469 */
1470VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1471{
1472#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1473 return pVM->pgm.s.HCPhysShwCR3;
1474#else
1475 return pVM->pgm.s.HCPhysShwPaePdpt;
1476#endif
1477}
1478
1479
1480/**
1481 * Gets the CR3 register value for the AMD64 shadow memory context.
1482 * @returns CR3 value.
1483 * @param pVM The VM handle.
1484 */
1485VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1486{
1487 return pVM->pgm.s.HCPhysShwCR3;
1488}
1489
1490/**
1491 * Gets the current CR3 register value for the HC intermediate memory context.
1492 * @returns CR3 value.
1493 * @param pVM The VM handle.
1494 */
1495VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1496{
1497 switch (pVM->pgm.s.enmHostMode)
1498 {
1499 case SUPPAGINGMODE_32_BIT:
1500 case SUPPAGINGMODE_32_BIT_GLOBAL:
1501 return pVM->pgm.s.HCPhysInterPD;
1502
1503 case SUPPAGINGMODE_PAE:
1504 case SUPPAGINGMODE_PAE_GLOBAL:
1505 case SUPPAGINGMODE_PAE_NX:
1506 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1507 return pVM->pgm.s.HCPhysInterPaePDPT;
1508
1509 case SUPPAGINGMODE_AMD64:
1510 case SUPPAGINGMODE_AMD64_GLOBAL:
1511 case SUPPAGINGMODE_AMD64_NX:
1512 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1513 return pVM->pgm.s.HCPhysInterPaePDPT;
1514
1515 default:
1516 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1517 return ~0;
1518 }
1519}
1520
1521
1522/**
1523 * Gets the current CR3 register value for the RC intermediate memory context.
1524 * @returns CR3 value.
1525 * @param pVM The VM handle.
1526 */
1527VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1528{
1529 switch (pVM->pgm.s.enmShadowMode)
1530 {
1531 case PGMMODE_32_BIT:
1532 return pVM->pgm.s.HCPhysInterPD;
1533
1534 case PGMMODE_PAE:
1535 case PGMMODE_PAE_NX:
1536 return pVM->pgm.s.HCPhysInterPaePDPT;
1537
1538 case PGMMODE_AMD64:
1539 case PGMMODE_AMD64_NX:
1540 return pVM->pgm.s.HCPhysInterPaePML4;
1541
1542 case PGMMODE_EPT:
1543 case PGMMODE_NESTED:
1544 return 0; /* not relevant */
1545
1546 default:
1547 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1548 return ~0;
1549 }
1550}
1551
1552
1553/**
1554 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1555 * @returns CR3 value.
1556 * @param pVM The VM handle.
1557 */
1558VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1559{
1560 return pVM->pgm.s.HCPhysInterPD;
1561}
1562
1563
1564/**
1565 * Gets the CR3 register value for the PAE intermediate memory context.
1566 * @returns CR3 value.
1567 * @param pVM The VM handle.
1568 */
1569VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1570{
1571 return pVM->pgm.s.HCPhysInterPaePDPT;
1572}
1573
1574
1575/**
1576 * Gets the CR3 register value for the AMD64 intermediate memory context.
1577 * @returns CR3 value.
1578 * @param pVM The VM handle.
1579 */
1580VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1581{
1582 return pVM->pgm.s.HCPhysInterPaePML4;
1583}
1584
1585
1586/**
1587 * Performs and schedules necessary updates following a CR3 load or reload.
1588 *
1589 * This will normally involve mapping the guest PD or nPDPT
1590 *
1591 * @returns VBox status code.
1592 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1593 * safely be ignored and overridden since the FF will be set too then.
1594 * @param pVM VM handle.
1595 * @param cr3 The new cr3.
1596 * @param fGlobal Indicates whether this is a global flush or not.
1597 */
1598VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1599{
1600 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1601
1602 /*
1603 * Always flag the necessary updates; necessary for hardware acceleration
1604 */
1605 /** @todo optimize this, it shouldn't always be necessary. */
1606 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1607 if (fGlobal)
1608 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1609 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1610
1611 /*
1612 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1613 */
1614 int rc = VINF_SUCCESS;
1615 RTGCPHYS GCPhysCR3;
1616 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1617 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1618 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1619 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1620 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1621 else
1622 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1623 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1624 {
1625#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1626 /* Unmap the old CR3 value before activating the new one. */
1627 rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
1628 AssertRC(rc);
1629#endif
1630 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1631 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1632 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1633 if (RT_LIKELY(rc == VINF_SUCCESS))
1634 {
1635 if (!pVM->pgm.s.fMappingsFixed)
1636 {
1637 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1638#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1639 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1640#endif
1641 }
1642 }
1643 else
1644 {
1645 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1646 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1647 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1648 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1649 if (!pVM->pgm.s.fMappingsFixed)
1650 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1651 }
1652
1653 if (fGlobal)
1654 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1655 else
1656 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1657 }
1658 else
1659 {
1660 /*
1661 * Check if we have a pending update of the CR3 monitoring.
1662 */
1663 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1664 {
1665 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1666 Assert(!pVM->pgm.s.fMappingsFixed);
1667#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1668 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1669#endif
1670 }
1671 if (fGlobal)
1672 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1673 else
1674 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1675 }
1676
1677 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1678 return rc;
1679}
1680
1681
1682/**
1683 * Performs and schedules necessary updates following a CR3 load or reload when
1684 * using nested or extended paging.
1685 *
1686 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1687 * TLB and triggering a SyncCR3.
1688 *
1689 * This will normally involve mapping the guest PD or nPDPT
1690 *
1691 * @returns VBox status code.
1692 * @retval VINF_SUCCESS.
1693 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1694 * requires a CR3 sync. This can safely be ignored and overridden since
1695 * the FF will be set too then.)
1696 * @param pVM VM handle.
1697 * @param cr3 The new cr3.
1698 */
1699VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1700{
1701 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1702
1703 /* We assume we're only called in nested paging mode. */
1704 Assert(pVM->pgm.s.fMappingsFixed);
1705 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1706 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1707
1708 /*
1709 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1710 */
1711 int rc = VINF_SUCCESS;
1712 RTGCPHYS GCPhysCR3;
1713 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1714 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1715 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1716 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1717 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1718 else
1719 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1720 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1721 {
1722 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1723 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1724 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */
1725 }
1726 return rc;
1727}
1728
1729
1730/**
1731 * Synchronize the paging structures.
1732 *
1733 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1734 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1735 * in several places, most importantly whenever the CR3 is loaded.
1736 *
1737 * @returns VBox status code.
1738 * @param pVM The virtual machine.
1739 * @param cr0 Guest context CR0 register
1740 * @param cr3 Guest context CR3 register
1741 * @param cr4 Guest context CR4 register
1742 * @param fGlobal Including global page directories or not
1743 */
1744VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1745{
1746 int rc;
1747
1748 /*
1749 * We might be called when we shouldn't.
1750 *
1751 * The mode switching will ensure that the PD is resynced
1752 * after every mode switch. So, if we find ourselves here
1753 * when in protected or real mode we can safely disable the
1754 * FF and return immediately.
1755 */
1756 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1757 {
1758 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1759 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1760 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1761 return VINF_SUCCESS;
1762 }
1763
1764 /* If global pages are not supported, then all flushes are global. */
1765 if (!(cr4 & X86_CR4_PGE))
1766 fGlobal = true;
1767 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1768 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1769
1770#ifdef PGMPOOL_WITH_MONITORING
1771 /*
1772 * The pool may have pending stuff and even require a return to ring-3 to
1773 * clear the whole thing.
1774 */
1775 rc = pgmPoolSyncCR3(pVM);
1776 if (rc != VINF_SUCCESS)
1777 return rc;
1778#endif
1779
1780 /*
1781 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1782 * This should be done before SyncCR3.
1783 */
1784 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1785 {
1786 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1787
1788 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1789 RTGCPHYS GCPhysCR3;
1790 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1791 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1792 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1793 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1794 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1795 else
1796 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1797
1798#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1799 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1800 {
1801 /* Unmap the old CR3 value before activating the new one. */
1802 rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
1803 AssertRC(rc);
1804 }
1805#endif
1806
1807 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1808 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1809#ifdef IN_RING3
1810 if (rc == VINF_PGM_SYNC_CR3)
1811 rc = pgmPoolSyncCR3(pVM);
1812#else
1813 if (rc == VINF_PGM_SYNC_CR3)
1814 {
1815 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1816 return rc;
1817 }
1818#endif
1819 AssertRCReturn(rc, rc);
1820 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1821 }
1822
1823 /*
1824 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1825 */
1826 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1827 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1828 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1829 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1830 if (rc == VINF_SUCCESS)
1831 {
1832 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1833 {
1834 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1835 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1836 }
1837
1838 /*
1839 * Check if we have a pending update of the CR3 monitoring.
1840 */
1841 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1842 {
1843 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1844 Assert(!pVM->pgm.s.fMappingsFixed);
1845 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1846#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1847 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1848#endif
1849 }
1850 }
1851
1852 /*
1853 * Now flush the CR3 (guest context).
1854 */
1855 if (rc == VINF_SUCCESS)
1856 PGM_INVL_GUEST_TLBS();
1857 return rc;
1858}
1859
1860
1861/**
1862 * Called whenever CR0 or CR4 in a way which may change
1863 * the paging mode.
1864 *
1865 * @returns VBox status code fit for scheduling in GC and R0.
1866 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1867 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1868 * @param pVM VM handle.
1869 * @param cr0 The new cr0.
1870 * @param cr4 The new cr4.
1871 * @param efer The new extended feature enable register.
1872 */
1873VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1874{
1875 PGMMODE enmGuestMode;
1876
1877 /*
1878 * Calc the new guest mode.
1879 */
1880 if (!(cr0 & X86_CR0_PE))
1881 enmGuestMode = PGMMODE_REAL;
1882 else if (!(cr0 & X86_CR0_PG))
1883 enmGuestMode = PGMMODE_PROTECTED;
1884 else if (!(cr4 & X86_CR4_PAE))
1885 enmGuestMode = PGMMODE_32_BIT;
1886 else if (!(efer & MSR_K6_EFER_LME))
1887 {
1888 if (!(efer & MSR_K6_EFER_NXE))
1889 enmGuestMode = PGMMODE_PAE;
1890 else
1891 enmGuestMode = PGMMODE_PAE_NX;
1892 }
1893 else
1894 {
1895 if (!(efer & MSR_K6_EFER_NXE))
1896 enmGuestMode = PGMMODE_AMD64;
1897 else
1898 enmGuestMode = PGMMODE_AMD64_NX;
1899 }
1900
1901 /*
1902 * Did it change?
1903 */
1904 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1905 return VINF_SUCCESS;
1906
1907 /* Flush the TLB */
1908 PGM_INVL_GUEST_TLBS();
1909
1910#ifdef IN_RING3
1911 return PGMR3ChangeMode(pVM, enmGuestMode);
1912#else
1913 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1914 return VINF_PGM_CHANGE_MODE;
1915#endif
1916}
1917
1918
1919/**
1920 * Gets the current guest paging mode.
1921 *
1922 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1923 *
1924 * @returns The current paging mode.
1925 * @param pVM The VM handle.
1926 */
1927VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1928{
1929 return pVM->pgm.s.enmGuestMode;
1930}
1931
1932
1933/**
1934 * Gets the current shadow paging mode.
1935 *
1936 * @returns The current paging mode.
1937 * @param pVM The VM handle.
1938 */
1939VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1940{
1941 return pVM->pgm.s.enmShadowMode;
1942}
1943
1944/**
1945 * Gets the current host paging mode.
1946 *
1947 * @returns The current paging mode.
1948 * @param pVM The VM handle.
1949 */
1950VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1951{
1952 switch (pVM->pgm.s.enmHostMode)
1953 {
1954 case SUPPAGINGMODE_32_BIT:
1955 case SUPPAGINGMODE_32_BIT_GLOBAL:
1956 return PGMMODE_32_BIT;
1957
1958 case SUPPAGINGMODE_PAE:
1959 case SUPPAGINGMODE_PAE_GLOBAL:
1960 return PGMMODE_PAE;
1961
1962 case SUPPAGINGMODE_PAE_NX:
1963 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1964 return PGMMODE_PAE_NX;
1965
1966 case SUPPAGINGMODE_AMD64:
1967 case SUPPAGINGMODE_AMD64_GLOBAL:
1968 return PGMMODE_AMD64;
1969
1970 case SUPPAGINGMODE_AMD64_NX:
1971 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1972 return PGMMODE_AMD64_NX;
1973
1974 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1975 }
1976
1977 return PGMMODE_INVALID;
1978}
1979
1980
1981/**
1982 * Get mode name.
1983 *
1984 * @returns read-only name string.
1985 * @param enmMode The mode which name is desired.
1986 */
1987VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1988{
1989 switch (enmMode)
1990 {
1991 case PGMMODE_REAL: return "Real";
1992 case PGMMODE_PROTECTED: return "Protected";
1993 case PGMMODE_32_BIT: return "32-bit";
1994 case PGMMODE_PAE: return "PAE";
1995 case PGMMODE_PAE_NX: return "PAE+NX";
1996 case PGMMODE_AMD64: return "AMD64";
1997 case PGMMODE_AMD64_NX: return "AMD64+NX";
1998 case PGMMODE_NESTED: return "Nested";
1999 case PGMMODE_EPT: return "EPT";
2000 default: return "unknown mode value";
2001 }
2002}
2003
2004
2005/**
2006 * Acquire the PGM lock.
2007 *
2008 * @returns VBox status code
2009 * @param pVM The VM to operate on.
2010 */
2011int pgmLock(PVM pVM)
2012{
2013 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2014#ifdef IN_RC
2015 if (rc == VERR_SEM_BUSY)
2016 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2017#elif defined(IN_RING0)
2018 if (rc == VERR_SEM_BUSY)
2019 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
2020#endif
2021 AssertRC(rc);
2022 return rc;
2023}
2024
2025
2026/**
2027 * Release the PGM lock.
2028 *
2029 * @returns VBox status code
2030 * @param pVM The VM to operate on.
2031 */
2032void pgmUnlock(PVM pVM)
2033{
2034 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2035}
2036
2037#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2038
2039/**
2040 * Temporarily maps one guest page specified by GC physical address.
2041 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2042 *
2043 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2044 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2045 *
2046 * @returns VBox status.
2047 * @param pVM VM handle.
2048 * @param GCPhys GC Physical address of the page.
2049 * @param ppv Where to store the address of the mapping.
2050 */
2051VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2052{
2053 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2054
2055 /*
2056 * Get the ram range.
2057 */
2058 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2059 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2060 pRam = pRam->CTX_SUFF(pNext);
2061 if (!pRam)
2062 {
2063 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2064 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2065 }
2066
2067 /*
2068 * Pass it on to PGMDynMapHCPage.
2069 */
2070 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2071 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2072#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2073 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2074#else
2075 PGMDynMapHCPage(pVM, HCPhys, ppv);
2076#endif
2077 return VINF_SUCCESS;
2078}
2079
2080
2081/**
2082 * Temporarily maps one guest page specified by unaligned GC physical address.
2083 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2084 *
2085 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2086 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2087 *
2088 * The caller is aware that only the speicifed page is mapped and that really bad things
2089 * will happen if writing beyond the page!
2090 *
2091 * @returns VBox status.
2092 * @param pVM VM handle.
2093 * @param GCPhys GC Physical address within the page to be mapped.
2094 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2095 */
2096VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2097{
2098 /*
2099 * Get the ram range.
2100 */
2101 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2102 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2103 pRam = pRam->CTX_SUFF(pNext);
2104 if (!pRam)
2105 {
2106 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2107 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2108 }
2109
2110 /*
2111 * Pass it on to PGMDynMapHCPage.
2112 */
2113 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2114#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2115 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2116#else
2117 PGMDynMapHCPage(pVM, HCPhys, ppv);
2118#endif
2119 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2120 return VINF_SUCCESS;
2121}
2122
2123
2124# ifdef IN_RC
2125/**
2126 * Temporarily maps one host page specified by HC physical address.
2127 *
2128 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2129 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2130 *
2131 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2132 * @param pVM VM handle.
2133 * @param HCPhys HC Physical address of the page.
2134 * @param ppv Where to store the address of the mapping. This is the
2135 * address of the PAGE not the exact address corresponding
2136 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2137 * page offset.
2138 */
2139VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2140{
2141 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2142
2143 /*
2144 * Check the cache.
2145 */
2146 register unsigned iCache;
2147 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys
2148 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys
2149 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys
2150 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)
2151 {
2152 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2153 {
2154 { 0, 5, 6, 7 },
2155 { 0, 1, 6, 7 },
2156 { 0, 1, 2, 7 },
2157 { 0, 1, 2, 3 },
2158 { 4, 1, 2, 3 },
2159 { 4, 5, 2, 3 },
2160 { 4, 5, 6, 3 },
2161 { 4, 5, 6, 7 },
2162 };
2163 Assert(RT_ELEMENTS(au8Trans) == 8);
2164 Assert(RT_ELEMENTS(au8Trans[0]) == 4);
2165 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2166 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2167 *ppv = pv;
2168 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2169 //Log(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2170 return VINF_SUCCESS;
2171 }
2172 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);
2173 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2174
2175 /*
2176 * Update the page tables.
2177 */
2178 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2179 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2180 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);
2181
2182 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2183 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2184 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2185
2186 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2187 *ppv = pv;
2188 ASMInvalidatePage(pv);
2189 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2190 return VINF_SUCCESS;
2191}
2192# endif /* IN_RC */
2193
2194#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2195#ifdef VBOX_STRICT
2196
2197/**
2198 * Asserts that there are no mapping conflicts.
2199 *
2200 * @returns Number of conflicts.
2201 * @param pVM The VM Handle.
2202 */
2203VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2204{
2205 unsigned cErrors = 0;
2206
2207 /*
2208 * Check for mapping conflicts.
2209 */
2210 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2211 pMapping;
2212 pMapping = pMapping->CTX_SUFF(pNext))
2213 {
2214 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2215 for (RTGCPTR GCPtr = pMapping->GCPtr;
2216 GCPtr <= pMapping->GCPtrLast;
2217 GCPtr += PAGE_SIZE)
2218 {
2219 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2220 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2221 {
2222 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2223 cErrors++;
2224 break;
2225 }
2226 }
2227 }
2228
2229 return cErrors;
2230}
2231
2232
2233/**
2234 * Asserts that everything related to the guest CR3 is correctly shadowed.
2235 *
2236 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2237 * and assert the correctness of the guest CR3 mapping before asserting that the
2238 * shadow page tables is in sync with the guest page tables.
2239 *
2240 * @returns Number of conflicts.
2241 * @param pVM The VM Handle.
2242 * @param cr3 The current guest CR3 register value.
2243 * @param cr4 The current guest CR4 register value.
2244 */
2245VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2246{
2247 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2248 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2249 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2250 return cErrors;
2251 return 0;
2252}
2253
2254#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette