VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 16462

Last change on this file since 16462 was 16428, checked in by vboxsync, 16 years ago

VBOX_WITH_PGMPOOL_PAGING_ONLY: cleaned up

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 70.5 KB
Line 
1/* $Id: PGMAll.cpp 16428 2009-01-30 16:49:19Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
74DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
75#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
76DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
77DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
78#endif
79
80/*
81 * Shadow - 32-bit mode
82 */
83#define PGM_SHW_TYPE PGM_TYPE_32BIT
84#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
85#include "PGMAllShw.h"
86
87/* Guest - real mode */
88#define PGM_GST_TYPE PGM_TYPE_REAL
89#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
90#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
91#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
92#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
93#include "PGMAllGst.h"
94#include "PGMAllBth.h"
95#undef BTH_PGMPOOLKIND_PT_FOR_PT
96#undef BTH_PGMPOOLKIND_ROOT
97#undef PGM_BTH_NAME
98#undef PGM_GST_TYPE
99#undef PGM_GST_NAME
100
101/* Guest - protected mode */
102#define PGM_GST_TYPE PGM_TYPE_PROT
103#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
104#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
105#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
106#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
107#include "PGMAllGst.h"
108#include "PGMAllBth.h"
109#undef BTH_PGMPOOLKIND_PT_FOR_PT
110#undef BTH_PGMPOOLKIND_ROOT
111#undef PGM_BTH_NAME
112#undef PGM_GST_TYPE
113#undef PGM_GST_NAME
114
115/* Guest - 32-bit mode */
116#define PGM_GST_TYPE PGM_TYPE_32BIT
117#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
118#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
119#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
120#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
121#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
122#include "PGMAllGst.h"
123#include "PGMAllBth.h"
124#undef BTH_PGMPOOLKIND_PT_FOR_BIG
125#undef BTH_PGMPOOLKIND_PT_FOR_PT
126#undef BTH_PGMPOOLKIND_ROOT
127#undef PGM_BTH_NAME
128#undef PGM_GST_TYPE
129#undef PGM_GST_NAME
130
131#undef PGM_SHW_TYPE
132#undef PGM_SHW_NAME
133
134
135/*
136 * Shadow - PAE mode
137 */
138#define PGM_SHW_TYPE PGM_TYPE_PAE
139#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
140#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
141#include "PGMAllShw.h"
142
143/* Guest - real mode */
144#define PGM_GST_TYPE PGM_TYPE_REAL
145#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
146#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
147#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
148#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - 32-bit mode */
170#define PGM_GST_TYPE PGM_TYPE_32BIT
171#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
174#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
176#include "PGMAllBth.h"
177#undef BTH_PGMPOOLKIND_PT_FOR_BIG
178#undef BTH_PGMPOOLKIND_PT_FOR_PT
179#undef BTH_PGMPOOLKIND_ROOT
180#undef PGM_BTH_NAME
181#undef PGM_GST_TYPE
182#undef PGM_GST_NAME
183
184
185/* Guest - PAE mode */
186#define PGM_GST_TYPE PGM_TYPE_PAE
187#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
188#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
189#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
190#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
191#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef BTH_PGMPOOLKIND_ROOT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201#undef PGM_SHW_TYPE
202#undef PGM_SHW_NAME
203
204
205#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
206/*
207 * Shadow - AMD64 mode
208 */
209# define PGM_SHW_TYPE PGM_TYPE_AMD64
210# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
211# include "PGMAllShw.h"
212
213/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
214# define PGM_GST_TYPE PGM_TYPE_PROT
215# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
216# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
217# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
218# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
219# include "PGMAllBth.h"
220# undef BTH_PGMPOOLKIND_PT_FOR_PT
221# undef BTH_PGMPOOLKIND_ROOT
222# undef PGM_BTH_NAME
223# undef PGM_GST_TYPE
224# undef PGM_GST_NAME
225
226# ifdef VBOX_WITH_64_BITS_GUESTS
227/* Guest - AMD64 mode */
228# define PGM_GST_TYPE PGM_TYPE_AMD64
229# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
230# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
231# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
232# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
233# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
234# include "PGMAllGst.h"
235# include "PGMAllBth.h"
236# undef BTH_PGMPOOLKIND_PT_FOR_BIG
237# undef BTH_PGMPOOLKIND_PT_FOR_PT
238# undef BTH_PGMPOOLKIND_ROOT
239# undef PGM_BTH_NAME
240# undef PGM_GST_TYPE
241# undef PGM_GST_NAME
242# endif /* VBOX_WITH_64_BITS_GUESTS */
243
244# undef PGM_SHW_TYPE
245# undef PGM_SHW_NAME
246
247
248/*
249 * Shadow - Nested paging mode
250 */
251# define PGM_SHW_TYPE PGM_TYPE_NESTED
252# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
253# include "PGMAllShw.h"
254
255/* Guest - real mode */
256# define PGM_GST_TYPE PGM_TYPE_REAL
257# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
258# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
259# include "PGMAllBth.h"
260# undef PGM_BTH_NAME
261# undef PGM_GST_TYPE
262# undef PGM_GST_NAME
263
264/* Guest - protected mode */
265# define PGM_GST_TYPE PGM_TYPE_PROT
266# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
267# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
268# include "PGMAllBth.h"
269# undef PGM_BTH_NAME
270# undef PGM_GST_TYPE
271# undef PGM_GST_NAME
272
273/* Guest - 32-bit mode */
274# define PGM_GST_TYPE PGM_TYPE_32BIT
275# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
276# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
277# include "PGMAllBth.h"
278# undef PGM_BTH_NAME
279# undef PGM_GST_TYPE
280# undef PGM_GST_NAME
281
282/* Guest - PAE mode */
283# define PGM_GST_TYPE PGM_TYPE_PAE
284# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
285# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
286# include "PGMAllBth.h"
287# undef PGM_BTH_NAME
288# undef PGM_GST_TYPE
289# undef PGM_GST_NAME
290
291# ifdef VBOX_WITH_64_BITS_GUESTS
292/* Guest - AMD64 mode */
293# define PGM_GST_TYPE PGM_TYPE_AMD64
294# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
295# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
296# include "PGMAllBth.h"
297# undef PGM_BTH_NAME
298# undef PGM_GST_TYPE
299# undef PGM_GST_NAME
300# endif /* VBOX_WITH_64_BITS_GUESTS */
301
302# undef PGM_SHW_TYPE
303# undef PGM_SHW_NAME
304
305
306/*
307 * Shadow - EPT
308 */
309# define PGM_SHW_TYPE PGM_TYPE_EPT
310# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
311# include "PGMAllShw.h"
312
313/* Guest - real mode */
314# define PGM_GST_TYPE PGM_TYPE_REAL
315# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
316# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
317# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
318# include "PGMAllBth.h"
319# undef BTH_PGMPOOLKIND_PT_FOR_PT
320# undef PGM_BTH_NAME
321# undef PGM_GST_TYPE
322# undef PGM_GST_NAME
323
324/* Guest - protected mode */
325# define PGM_GST_TYPE PGM_TYPE_PROT
326# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
327# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
328# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - 32-bit mode */
336# define PGM_GST_TYPE PGM_TYPE_32BIT
337# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMAllBth.h"
341# undef BTH_PGMPOOLKIND_PT_FOR_PT
342# undef PGM_BTH_NAME
343# undef PGM_GST_TYPE
344# undef PGM_GST_NAME
345
346/* Guest - PAE mode */
347# define PGM_GST_TYPE PGM_TYPE_PAE
348# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
349# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
350# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
351# include "PGMAllBth.h"
352# undef BTH_PGMPOOLKIND_PT_FOR_PT
353# undef PGM_BTH_NAME
354# undef PGM_GST_TYPE
355# undef PGM_GST_NAME
356
357# ifdef VBOX_WITH_64_BITS_GUESTS
358/* Guest - AMD64 mode */
359# define PGM_GST_TYPE PGM_TYPE_AMD64
360# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
361# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
362# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
363# include "PGMAllBth.h"
364# undef BTH_PGMPOOLKIND_PT_FOR_PT
365# undef PGM_BTH_NAME
366# undef PGM_GST_TYPE
367# undef PGM_GST_NAME
368# endif /* VBOX_WITH_64_BITS_GUESTS */
369
370# undef PGM_SHW_TYPE
371# undef PGM_SHW_NAME
372
373#endif /* !IN_RC */
374
375
376#ifndef IN_RING3
377/**
378 * #PF Handler.
379 *
380 * @returns VBox status code (appropriate for trap handling and GC return).
381 * @param pVM VM Handle.
382 * @param uErr The trap error code.
383 * @param pRegFrame Trap register frame.
384 * @param pvFault The fault address.
385 */
386VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
387{
388 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
389 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
390 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
391
392
393#ifdef VBOX_WITH_STATISTICS
394 /*
395 * Error code stats.
396 */
397 if (uErr & X86_TRAP_PF_US)
398 {
399 if (!(uErr & X86_TRAP_PF_P))
400 {
401 if (uErr & X86_TRAP_PF_RW)
402 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
403 else
404 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
405 }
406 else if (uErr & X86_TRAP_PF_RW)
407 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
408 else if (uErr & X86_TRAP_PF_RSVD)
409 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
410 else if (uErr & X86_TRAP_PF_ID)
411 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
412 else
413 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
414 }
415 else
416 { /* Supervisor */
417 if (!(uErr & X86_TRAP_PF_P))
418 {
419 if (uErr & X86_TRAP_PF_RW)
420 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
421 else
422 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
423 }
424 else if (uErr & X86_TRAP_PF_RW)
425 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
426 else if (uErr & X86_TRAP_PF_ID)
427 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
428 else if (uErr & X86_TRAP_PF_RSVD)
429 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
430 }
431#endif /* VBOX_WITH_STATISTICS */
432
433 /*
434 * Call the worker.
435 */
436 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
437 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
438 rc = VINF_SUCCESS;
439 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
440 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
441 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
442 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
443 return rc;
444}
445#endif /* !IN_RING3 */
446
447
448/**
449 * Prefetch a page
450 *
451 * Typically used to sync commonly used pages before entering raw mode
452 * after a CR3 reload.
453 *
454 * @returns VBox status code suitable for scheduling.
455 * @retval VINF_SUCCESS on success.
456 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
457 * @param pVM VM handle.
458 * @param GCPtrPage Page to invalidate.
459 */
460VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
461{
462 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
463 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
464 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
465 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
466 return rc;
467}
468
469
470/**
471 * Gets the mapping corresponding to the specified address (if any).
472 *
473 * @returns Pointer to the mapping.
474 * @returns NULL if not
475 *
476 * @param pVM The virtual machine.
477 * @param GCPtr The guest context pointer.
478 */
479PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
480{
481 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
482 while (pMapping)
483 {
484 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
485 break;
486 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
487 return pMapping;
488 pMapping = pMapping->CTX_SUFF(pNext);
489 }
490 return NULL;
491}
492
493
494/**
495 * Verifies a range of pages for read or write access
496 *
497 * Only checks the guest's page tables
498 *
499 * @returns VBox status code.
500 * @param pVM VM handle.
501 * @param Addr Guest virtual address to check
502 * @param cbSize Access size
503 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
504 * @remarks Current not in use.
505 */
506VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
507{
508 /*
509 * Validate input.
510 */
511 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
512 {
513 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
514 return VERR_INVALID_PARAMETER;
515 }
516
517 uint64_t fPage;
518 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
519 if (RT_FAILURE(rc))
520 {
521 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
522 return VINF_EM_RAW_GUEST_TRAP;
523 }
524
525 /*
526 * Check if the access would cause a page fault
527 *
528 * Note that hypervisor page directories are not present in the guest's tables, so this check
529 * is sufficient.
530 */
531 bool fWrite = !!(fAccess & X86_PTE_RW);
532 bool fUser = !!(fAccess & X86_PTE_US);
533 if ( !(fPage & X86_PTE_P)
534 || (fWrite && !(fPage & X86_PTE_RW))
535 || (fUser && !(fPage & X86_PTE_US)) )
536 {
537 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
538 return VINF_EM_RAW_GUEST_TRAP;
539 }
540 if ( RT_SUCCESS(rc)
541 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
542 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
543 return rc;
544}
545
546
547/**
548 * Verifies a range of pages for read or write access
549 *
550 * Supports handling of pages marked for dirty bit tracking and CSAM
551 *
552 * @returns VBox status code.
553 * @param pVM VM handle.
554 * @param Addr Guest virtual address to check
555 * @param cbSize Access size
556 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
557 */
558VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
559{
560 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
561
562 /*
563 * Get going.
564 */
565 uint64_t fPageGst;
566 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
567 if (RT_FAILURE(rc))
568 {
569 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
570 return VINF_EM_RAW_GUEST_TRAP;
571 }
572
573 /*
574 * Check if the access would cause a page fault
575 *
576 * Note that hypervisor page directories are not present in the guest's tables, so this check
577 * is sufficient.
578 */
579 const bool fWrite = !!(fAccess & X86_PTE_RW);
580 const bool fUser = !!(fAccess & X86_PTE_US);
581 if ( !(fPageGst & X86_PTE_P)
582 || (fWrite && !(fPageGst & X86_PTE_RW))
583 || (fUser && !(fPageGst & X86_PTE_US)) )
584 {
585 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
586 return VINF_EM_RAW_GUEST_TRAP;
587 }
588
589 if (!HWACCMIsNestedPagingActive(pVM))
590 {
591 /*
592 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
593 */
594 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
595 if ( rc == VERR_PAGE_NOT_PRESENT
596 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
597 {
598 /*
599 * Page is not present in our page tables.
600 * Try to sync it!
601 */
602 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
603 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
604 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
605 if (rc != VINF_SUCCESS)
606 return rc;
607 }
608 else
609 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
610 }
611
612#if 0 /* def VBOX_STRICT; triggers too often now */
613 /*
614 * This check is a bit paranoid, but useful.
615 */
616 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
617 uint64_t fPageShw;
618 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
619 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
620 || (fWrite && !(fPageShw & X86_PTE_RW))
621 || (fUser && !(fPageShw & X86_PTE_US)) )
622 {
623 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
624 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
625 return VINF_EM_RAW_GUEST_TRAP;
626 }
627#endif
628
629 if ( RT_SUCCESS(rc)
630 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
631 || Addr + cbSize < Addr))
632 {
633 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
634 for (;;)
635 {
636 Addr += PAGE_SIZE;
637 if (cbSize > PAGE_SIZE)
638 cbSize -= PAGE_SIZE;
639 else
640 cbSize = 1;
641 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
642 if (rc != VINF_SUCCESS)
643 break;
644 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
645 break;
646 }
647 }
648 return rc;
649}
650
651
652/**
653 * Emulation of the invlpg instruction (HC only actually).
654 *
655 * @returns VBox status code, special care required.
656 * @retval VINF_PGM_SYNC_CR3 - handled.
657 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
658 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
659 *
660 * @param pVM VM handle.
661 * @param GCPtrPage Page to invalidate.
662 *
663 * @remark ASSUMES the page table entry or page directory is valid. Fairly
664 * safe, but there could be edge cases!
665 *
666 * @todo Flush page or page directory only if necessary!
667 */
668VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
669{
670 int rc;
671 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
672
673#ifndef IN_RING3
674 /*
675 * Notify the recompiler so it can record this instruction.
676 * Failure happens when it's out of space. We'll return to HC in that case.
677 */
678 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
679 if (rc != VINF_SUCCESS)
680 return rc;
681#endif /* !IN_RING3 */
682
683
684#ifdef IN_RC
685 /*
686 * Check for conflicts and pending CR3 monitoring updates.
687 */
688 if (!pVM->pgm.s.fMappingsFixed)
689 {
690 if ( pgmGetMapping(pVM, GCPtrPage)
691 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
692 {
693 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
694 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
695 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
696 return VINF_PGM_SYNC_CR3;
697 }
698
699 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
700 {
701 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
702 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
703 return VINF_EM_RAW_EMULATE_INSTR;
704 }
705 }
706#endif /* IN_RC */
707
708 /*
709 * Call paging mode specific worker.
710 */
711 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
712 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
713 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
714
715#ifdef IN_RING3
716 /*
717 * Check if we have a pending update of the CR3 monitoring.
718 */
719 if ( RT_SUCCESS(rc)
720 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
721 {
722 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
723 Assert(!pVM->pgm.s.fMappingsFixed);
724 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
725#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
726 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
727#endif
728 }
729
730 /*
731 * Inform CSAM about the flush
732 *
733 * Note: This is to check if monitored pages have been changed; when we implement
734 * callbacks for virtual handlers, this is no longer required.
735 */
736 CSAMR3FlushPage(pVM, GCPtrPage);
737#endif /* IN_RING3 */
738 return rc;
739}
740
741
742/**
743 * Executes an instruction using the interpreter.
744 *
745 * @returns VBox status code (appropriate for trap handling and GC return).
746 * @param pVM VM handle.
747 * @param pRegFrame Register frame.
748 * @param pvFault Fault address.
749 */
750VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
751{
752 uint32_t cb;
753 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
754 if (rc == VERR_EM_INTERPRETER)
755 rc = VINF_EM_RAW_EMULATE_INSTR;
756 if (rc != VINF_SUCCESS)
757 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
758 return rc;
759}
760
761
762/**
763 * Gets effective page information (from the VMM page directory).
764 *
765 * @returns VBox status.
766 * @param pVM VM Handle.
767 * @param GCPtr Guest Context virtual address of the page.
768 * @param pfFlags Where to store the flags. These are X86_PTE_*.
769 * @param pHCPhys Where to store the HC physical address of the page.
770 * This is page aligned.
771 * @remark You should use PGMMapGetPage() for pages in a mapping.
772 */
773VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
774{
775 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
776}
777
778
779/**
780 * Sets (replaces) the page flags for a range of pages in the shadow context.
781 *
782 * @returns VBox status.
783 * @param pVM VM handle.
784 * @param GCPtr The address of the first page.
785 * @param cb The size of the range in bytes.
786 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
787 * @remark You must use PGMMapSetPage() for pages in a mapping.
788 */
789VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
790{
791 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
792}
793
794
795/**
796 * Modify page flags for a range of pages in the shadow context.
797 *
798 * The existing flags are ANDed with the fMask and ORed with the fFlags.
799 *
800 * @returns VBox status code.
801 * @param pVM VM handle.
802 * @param GCPtr Virtual address of the first page in the range.
803 * @param cb Size (in bytes) of the range to apply the modification to.
804 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
805 * @param fMask The AND mask - page flags X86_PTE_*.
806 * Be very CAREFUL when ~'ing constants which could be 32-bit!
807 * @remark You must use PGMMapModifyPage() for pages in a mapping.
808 */
809VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
810{
811 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
812 Assert(cb);
813
814 /*
815 * Align the input.
816 */
817 cb += GCPtr & PAGE_OFFSET_MASK;
818 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
819 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
820
821 /*
822 * Call worker.
823 */
824 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
825}
826
827
828/**
829 * Gets the SHADOW page directory pointer for the specified address.
830 *
831 * @returns VBox status.
832 * @param pVM VM handle.
833 * @param GCPtr The address.
834 * @param ppPdpt Receives address of pdpt
835 * @param ppPD Receives address of page directory
836 * @remarks Unused.
837 */
838DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
839{
840 PPGM pPGM = &pVM->pgm.s;
841 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
842 PPGMPOOLPAGE pShwPage;
843
844 Assert(!HWACCMIsNestedPagingActive(pVM));
845
846 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
847 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
848 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
849
850 *ppPdpt = pPdpt;
851 if (!pPdpe->n.u1Present)
852 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
853
854 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
855 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
856
857 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
858 return VINF_SUCCESS;
859}
860
861#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
862
863/**
864 * Gets the shadow page directory for the specified address, PAE.
865 *
866 * @returns Pointer to the shadow PD.
867 * @param pVM VM handle.
868 * @param GCPtr The address.
869 * @param pGstPdpe Guest PDPT entry
870 * @param ppPD Receives address of page directory
871 */
872DECLINLINE(int) pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
873{
874 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
875 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
876 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
877 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
878 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
879 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
880 PPGMPOOLPAGE pShwPage;
881 int rc;
882
883 /* Allocate page directory if not present. */
884 if ( !pPdpe->n.u1Present
885 && !(pPdpe->u & X86_PDPE_PG_MASK))
886 {
887 if (!fNestedPaging)
888 {
889 Assert(pGstPdpe);
890 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
891 /* Create a reference back to the PDPT by using the index in its shadow page. */
892 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
893 }
894 else
895 {
896 /* AMD-V nested paging or real/protected mode without paging */
897 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
898
899 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_PAE_PD_PHYS, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
900 }
901
902 if (rc == VERR_PGM_POOL_FLUSHED)
903 {
904 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
905 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
906 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
907 return VINF_PGM_SYNC_CR3;
908 }
909 AssertRCReturn(rc, rc);
910 }
911 else
912 {
913 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
914 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
915 }
916 /* The PD was cached or created; hook it up now. */
917 pPdpe->u |= pShwPage->Core.Key
918 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
919
920 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
921 return VINF_SUCCESS;
922}
923
924/**
925 * Gets the pointer to the shadow page directory entry for an address, PAE.
926 *
927 * @returns Pointer to the PDE.
928 * @param pPGM Pointer to the PGM instance data.
929 * @param GCPtr The address.
930 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
931 */
932DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
933{
934 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
935 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
936 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
937 if (!pPdpt->a[iPdPt].n.u1Present)
938 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
939
940 /* Fetch the pgm pool shadow descriptor. */
941 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
942 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
943
944 *ppShwPde = pShwPde;
945 return VINF_SUCCESS;
946}
947#endif
948
949#ifndef IN_RC
950
951/**
952 * Syncs the SHADOW page directory pointer for the specified address.
953 *
954 * Allocates backing pages in case the PDPT or PML4 entry is missing.
955 *
956 * The caller is responsible for making sure the guest has a valid PD before
957 * calling this function.
958 *
959 * @returns VBox status.
960 * @param pVM VM handle.
961 * @param GCPtr The address.
962 * @param pGstPml4e Guest PML4 entry
963 * @param pGstPdpe Guest PDPT entry
964 * @param ppPD Receives address of page directory
965 */
966DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
967{
968 PPGM pPGM = &pVM->pgm.s;
969 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
970 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
971 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
972 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
973 PPGMPOOLPAGE pShwPage;
974 int rc;
975
976 /* Allocate page directory pointer table if not present. */
977 if ( !pPml4e->n.u1Present
978 && !(pPml4e->u & X86_PML4E_PG_MASK))
979 {
980 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
981 if (!fNestedPaging)
982 {
983 Assert(pGstPml4e && pGstPdpe);
984 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
985
986 rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
987 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
988 }
989 else
990 {
991 /* AMD-V nested paging. (Intel EPT never comes here) */
992 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
993 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
994 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
995 }
996
997 if (rc == VERR_PGM_POOL_FLUSHED)
998 {
999 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1000 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1001 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1002 return VINF_PGM_SYNC_CR3;
1003 }
1004 AssertRCReturn(rc, rc);
1005 }
1006 else
1007 {
1008 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1009 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1010 }
1011 /* The PDPT was cached or created; hook it up now. */
1012 pPml4e->u |= pShwPage->Core.Key
1013 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1014
1015 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1016 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1017 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1018
1019 /* Allocate page directory if not present. */
1020 if ( !pPdpe->n.u1Present
1021 && !(pPdpe->u & X86_PDPE_PG_MASK))
1022 {
1023 if (!fNestedPaging)
1024 {
1025 Assert(pGstPml4e && pGstPdpe);
1026 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
1027 /* Create a reference back to the PDPT by using the index in its shadow page. */
1028 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
1029 }
1030 else
1031 {
1032 /* AMD-V nested paging. (Intel EPT never comes here) */
1033 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1034
1035 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1036 }
1037
1038 if (rc == VERR_PGM_POOL_FLUSHED)
1039 {
1040 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1041 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1042 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1043 return VINF_PGM_SYNC_CR3;
1044 }
1045 AssertRCReturn(rc, rc);
1046 }
1047 else
1048 {
1049 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1050 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1051 }
1052 /* The PD was cached or created; hook it up now. */
1053 pPdpe->u |= pShwPage->Core.Key
1054 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1055
1056 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1057 return VINF_SUCCESS;
1058}
1059
1060
1061/**
1062 * Gets the SHADOW page directory pointer for the specified address (long mode).
1063 *
1064 * @returns VBox status.
1065 * @param pVM VM handle.
1066 * @param GCPtr The address.
1067 * @param ppPdpt Receives address of pdpt
1068 * @param ppPD Receives address of page directory
1069 */
1070DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1071{
1072 PPGM pPGM = &pVM->pgm.s;
1073 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1074 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1075 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1076 if (ppPml4e)
1077 *ppPml4e = (PX86PML4E)pPml4e;
1078 if (!pPml4e->n.u1Present)
1079 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1080
1081 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1082 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1083 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1084
1085 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1086 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1087 if (!pPdpt->a[iPdPt].n.u1Present)
1088 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1089
1090 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1091 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1092
1093 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1094 return VINF_SUCCESS;
1095}
1096
1097
1098/**
1099 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1100 * backing pages in case the PDPT or PML4 entry is missing.
1101 *
1102 * @returns VBox status.
1103 * @param pVM VM handle.
1104 * @param GCPtr The address.
1105 * @param ppPdpt Receives address of pdpt
1106 * @param ppPD Receives address of page directory
1107 */
1108DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1109{
1110 PPGM pPGM = &pVM->pgm.s;
1111 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1112 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1113 PEPTPML4 pPml4;
1114 PEPTPML4E pPml4e;
1115 PPGMPOOLPAGE pShwPage;
1116 int rc;
1117
1118 Assert(HWACCMIsNestedPagingActive(pVM));
1119
1120# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1121 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
1122 AssertRCReturn(rc, rc);
1123# else
1124 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
1125# endif
1126 Assert(pPml4);
1127
1128 /* Allocate page directory pointer table if not present. */
1129 pPml4e = &pPml4->a[iPml4];
1130 if ( !pPml4e->n.u1Present
1131 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1132 {
1133 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1134 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1135
1136 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1137 if (rc == VERR_PGM_POOL_FLUSHED)
1138 {
1139 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1140 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1141 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1142 return VINF_PGM_SYNC_CR3;
1143 }
1144 AssertRCReturn(rc, rc);
1145 }
1146 else
1147 {
1148 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1149 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1150 }
1151 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1152 pPml4e->u = pShwPage->Core.Key;
1153 pPml4e->n.u1Present = 1;
1154 pPml4e->n.u1Write = 1;
1155 pPml4e->n.u1Execute = 1;
1156
1157 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1158 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1159 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1160
1161 if (ppPdpt)
1162 *ppPdpt = pPdpt;
1163
1164 /* Allocate page directory if not present. */
1165 if ( !pPdpe->n.u1Present
1166 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1167 {
1168 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1169
1170 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1171 if (rc == VERR_PGM_POOL_FLUSHED)
1172 {
1173 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1174 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1175 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1176 return VINF_PGM_SYNC_CR3;
1177 }
1178 AssertRCReturn(rc, rc);
1179 }
1180 else
1181 {
1182 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1183 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1184 }
1185 /* The PD was cached or created; hook it up now and fill with the default value. */
1186 pPdpe->u = pShwPage->Core.Key;
1187 pPdpe->n.u1Present = 1;
1188 pPdpe->n.u1Write = 1;
1189 pPdpe->n.u1Execute = 1;
1190
1191 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1192 return VINF_SUCCESS;
1193}
1194
1195#endif /* IN_RC */
1196
1197/**
1198 * Gets effective Guest OS page information.
1199 *
1200 * When GCPtr is in a big page, the function will return as if it was a normal
1201 * 4KB page. If the need for distinguishing between big and normal page becomes
1202 * necessary at a later point, a PGMGstGetPage() will be created for that
1203 * purpose.
1204 *
1205 * @returns VBox status.
1206 * @param pVM VM Handle.
1207 * @param GCPtr Guest Context virtual address of the page.
1208 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1209 * @param pGCPhys Where to store the GC physical address of the page.
1210 * This is page aligned. The fact that the
1211 */
1212VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1213{
1214 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1215}
1216
1217
1218/**
1219 * Checks if the page is present.
1220 *
1221 * @returns true if the page is present.
1222 * @returns false if the page is not present.
1223 * @param pVM The VM handle.
1224 * @param GCPtr Address within the page.
1225 */
1226VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1227{
1228 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1229 return RT_SUCCESS(rc);
1230}
1231
1232
1233/**
1234 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1235 *
1236 * @returns VBox status.
1237 * @param pVM VM handle.
1238 * @param GCPtr The address of the first page.
1239 * @param cb The size of the range in bytes.
1240 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1241 */
1242VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1243{
1244 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1245}
1246
1247
1248/**
1249 * Modify page flags for a range of pages in the guest's tables
1250 *
1251 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1252 *
1253 * @returns VBox status code.
1254 * @param pVM VM handle.
1255 * @param GCPtr Virtual address of the first page in the range.
1256 * @param cb Size (in bytes) of the range to apply the modification to.
1257 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1258 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1259 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1260 */
1261VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1262{
1263 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1264
1265 /*
1266 * Validate input.
1267 */
1268 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1269 Assert(cb);
1270
1271 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1272
1273 /*
1274 * Adjust input.
1275 */
1276 cb += GCPtr & PAGE_OFFSET_MASK;
1277 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1278 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1279
1280 /*
1281 * Call worker.
1282 */
1283 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1284
1285 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1286 return rc;
1287}
1288
1289
1290/**
1291 * Gets the specified page directory pointer table entry.
1292 *
1293 * @returns PDP entry
1294 * @param pPGM Pointer to the PGM instance data.
1295 * @param iPdpt PDPT index
1296 */
1297VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1298{
1299 Assert(iPdpt <= 3);
1300 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1301}
1302
1303
1304/**
1305 * Gets the current CR3 register value for the shadow memory context.
1306 * @returns CR3 value.
1307 * @param pVM The VM handle.
1308 */
1309VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1310{
1311#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1312 return pVM->pgm.s.HCPhysShwCR3;
1313#else
1314 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1315 switch (enmShadowMode)
1316 {
1317 case PGMMODE_32_BIT:
1318 return pVM->pgm.s.HCPhysShw32BitPD;
1319
1320 case PGMMODE_PAE:
1321 case PGMMODE_PAE_NX:
1322 return pVM->pgm.s.HCPhysShwPaePdpt;
1323
1324 case PGMMODE_AMD64:
1325 case PGMMODE_AMD64_NX:
1326 return pVM->pgm.s.HCPhysShwCR3;
1327
1328 case PGMMODE_EPT:
1329 return pVM->pgm.s.HCPhysShwNestedRoot;
1330
1331 case PGMMODE_NESTED:
1332 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1333
1334 default:
1335 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1336 return ~0;
1337 }
1338#endif
1339}
1340
1341
1342/**
1343 * Gets the current CR3 register value for the nested memory context.
1344 * @returns CR3 value.
1345 * @param pVM The VM handle.
1346 */
1347VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1348{
1349#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1350 return pVM->pgm.s.HCPhysShwCR3;
1351#else
1352 switch (enmShadowMode)
1353 {
1354 case PGMMODE_32_BIT:
1355 return pVM->pgm.s.HCPhysShw32BitPD;
1356
1357 case PGMMODE_PAE:
1358 case PGMMODE_PAE_NX:
1359 return pVM->pgm.s.HCPhysShwPaePdpt;
1360
1361 case PGMMODE_AMD64:
1362 case PGMMODE_AMD64_NX:
1363 return pVM->pgm.s.HCPhysShwCR3;
1364
1365 default:
1366 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1367 return ~0;
1368 }
1369#endif
1370}
1371
1372
1373/**
1374 * Gets the current CR3 register value for the EPT paging memory context.
1375 * @returns CR3 value.
1376 * @param pVM The VM handle.
1377 */
1378VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
1379{
1380 return pVM->pgm.s.HCPhysShwNestedRoot;
1381}
1382
1383
1384/**
1385 * Gets the CR3 register value for the 32-Bit shadow memory context.
1386 * @returns CR3 value.
1387 * @param pVM The VM handle.
1388 */
1389VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1390{
1391#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1392 return pVM->pgm.s.HCPhysShwCR3;
1393#else
1394 return pVM->pgm.s.HCPhysShw32BitPD;
1395#endif
1396}
1397
1398
1399/**
1400 * Gets the CR3 register value for the PAE shadow memory context.
1401 * @returns CR3 value.
1402 * @param pVM The VM handle.
1403 */
1404VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1405{
1406#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1407 return pVM->pgm.s.HCPhysShwCR3;
1408#else
1409 return pVM->pgm.s.HCPhysShwPaePdpt;
1410#endif
1411}
1412
1413
1414/**
1415 * Gets the CR3 register value for the AMD64 shadow memory context.
1416 * @returns CR3 value.
1417 * @param pVM The VM handle.
1418 */
1419VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1420{
1421 return pVM->pgm.s.HCPhysShwCR3;
1422}
1423
1424/**
1425 * Gets the current CR3 register value for the HC intermediate memory context.
1426 * @returns CR3 value.
1427 * @param pVM The VM handle.
1428 */
1429VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1430{
1431 switch (pVM->pgm.s.enmHostMode)
1432 {
1433 case SUPPAGINGMODE_32_BIT:
1434 case SUPPAGINGMODE_32_BIT_GLOBAL:
1435 return pVM->pgm.s.HCPhysInterPD;
1436
1437 case SUPPAGINGMODE_PAE:
1438 case SUPPAGINGMODE_PAE_GLOBAL:
1439 case SUPPAGINGMODE_PAE_NX:
1440 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1441 return pVM->pgm.s.HCPhysInterPaePDPT;
1442
1443 case SUPPAGINGMODE_AMD64:
1444 case SUPPAGINGMODE_AMD64_GLOBAL:
1445 case SUPPAGINGMODE_AMD64_NX:
1446 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1447 return pVM->pgm.s.HCPhysInterPaePDPT;
1448
1449 default:
1450 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1451 return ~0;
1452 }
1453}
1454
1455
1456/**
1457 * Gets the current CR3 register value for the RC intermediate memory context.
1458 * @returns CR3 value.
1459 * @param pVM The VM handle.
1460 */
1461VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1462{
1463 switch (pVM->pgm.s.enmShadowMode)
1464 {
1465 case PGMMODE_32_BIT:
1466 return pVM->pgm.s.HCPhysInterPD;
1467
1468 case PGMMODE_PAE:
1469 case PGMMODE_PAE_NX:
1470 return pVM->pgm.s.HCPhysInterPaePDPT;
1471
1472 case PGMMODE_AMD64:
1473 case PGMMODE_AMD64_NX:
1474 return pVM->pgm.s.HCPhysInterPaePML4;
1475
1476 case PGMMODE_EPT:
1477 case PGMMODE_NESTED:
1478 return 0; /* not relevant */
1479
1480 default:
1481 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1482 return ~0;
1483 }
1484}
1485
1486
1487/**
1488 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1489 * @returns CR3 value.
1490 * @param pVM The VM handle.
1491 */
1492VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1493{
1494 return pVM->pgm.s.HCPhysInterPD;
1495}
1496
1497
1498/**
1499 * Gets the CR3 register value for the PAE intermediate memory context.
1500 * @returns CR3 value.
1501 * @param pVM The VM handle.
1502 */
1503VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1504{
1505 return pVM->pgm.s.HCPhysInterPaePDPT;
1506}
1507
1508
1509/**
1510 * Gets the CR3 register value for the AMD64 intermediate memory context.
1511 * @returns CR3 value.
1512 * @param pVM The VM handle.
1513 */
1514VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1515{
1516 return pVM->pgm.s.HCPhysInterPaePML4;
1517}
1518
1519
1520/**
1521 * Performs and schedules necessary updates following a CR3 load or reload.
1522 *
1523 * This will normally involve mapping the guest PD or nPDPT
1524 *
1525 * @returns VBox status code.
1526 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1527 * safely be ignored and overridden since the FF will be set too then.
1528 * @param pVM VM handle.
1529 * @param cr3 The new cr3.
1530 * @param fGlobal Indicates whether this is a global flush or not.
1531 */
1532VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1533{
1534 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1535
1536 /*
1537 * Always flag the necessary updates; necessary for hardware acceleration
1538 */
1539 /** @todo optimize this, it shouldn't always be necessary. */
1540 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1541 if (fGlobal)
1542 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1543 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1544
1545 /*
1546 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1547 */
1548 int rc = VINF_SUCCESS;
1549 RTGCPHYS GCPhysCR3;
1550 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1551 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1552 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1553 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1554 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1555 else
1556 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1557 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1558 {
1559#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1560 /* Unmap the old CR3 value before activating the new one. */
1561 rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
1562 AssertRC(rc);
1563#endif
1564 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1565 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1566 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1567 if (RT_LIKELY(rc == VINF_SUCCESS))
1568 {
1569 if (!pVM->pgm.s.fMappingsFixed)
1570 {
1571 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1572#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1573 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1574#endif
1575 }
1576 }
1577 else
1578 {
1579 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1580 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1581 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1582 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1583 if (!pVM->pgm.s.fMappingsFixed)
1584 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1585 }
1586
1587 if (fGlobal)
1588 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1589 else
1590 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1591 }
1592 else
1593 {
1594 /*
1595 * Check if we have a pending update of the CR3 monitoring.
1596 */
1597 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1598 {
1599 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1600 Assert(!pVM->pgm.s.fMappingsFixed);
1601#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1602 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1603#endif
1604 }
1605 if (fGlobal)
1606 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1607 else
1608 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1609 }
1610
1611 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1612 return rc;
1613}
1614
1615
1616/**
1617 * Performs and schedules necessary updates following a CR3 load or reload when
1618 * using nested or extended paging.
1619 *
1620 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1621 * TLB and triggering a SyncCR3.
1622 *
1623 * This will normally involve mapping the guest PD or nPDPT
1624 *
1625 * @returns VBox status code.
1626 * @retval VINF_SUCCESS.
1627 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1628 * requires a CR3 sync. This can safely be ignored and overridden since
1629 * the FF will be set too then.)
1630 * @param pVM VM handle.
1631 * @param cr3 The new cr3.
1632 */
1633VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1634{
1635 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1636
1637 /* We assume we're only called in nested paging mode. */
1638 Assert(pVM->pgm.s.fMappingsFixed);
1639 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1640 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1641
1642 /*
1643 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1644 */
1645 int rc = VINF_SUCCESS;
1646 RTGCPHYS GCPhysCR3;
1647 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1648 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1649 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1650 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1651 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1652 else
1653 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1654 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1655 {
1656 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1657 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1658 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */
1659 }
1660 return rc;
1661}
1662
1663
1664/**
1665 * Synchronize the paging structures.
1666 *
1667 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1668 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1669 * in several places, most importantly whenever the CR3 is loaded.
1670 *
1671 * @returns VBox status code.
1672 * @param pVM The virtual machine.
1673 * @param cr0 Guest context CR0 register
1674 * @param cr3 Guest context CR3 register
1675 * @param cr4 Guest context CR4 register
1676 * @param fGlobal Including global page directories or not
1677 */
1678VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1679{
1680 int rc;
1681
1682 /*
1683 * We might be called when we shouldn't.
1684 *
1685 * The mode switching will ensure that the PD is resynced
1686 * after every mode switch. So, if we find ourselves here
1687 * when in protected or real mode we can safely disable the
1688 * FF and return immediately.
1689 */
1690 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1691 {
1692 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1693 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1694 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1695 return VINF_SUCCESS;
1696 }
1697
1698 /* If global pages are not supported, then all flushes are global. */
1699 if (!(cr4 & X86_CR4_PGE))
1700 fGlobal = true;
1701 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1702 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1703
1704#ifdef PGMPOOL_WITH_MONITORING
1705 /*
1706 * The pool may have pending stuff and even require a return to ring-3 to
1707 * clear the whole thing.
1708 */
1709 rc = pgmPoolSyncCR3(pVM);
1710 if (rc != VINF_SUCCESS)
1711 return rc;
1712#endif
1713
1714 /*
1715 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1716 * This should be done before SyncCR3.
1717 */
1718 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1719 {
1720 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1721
1722 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1723 RTGCPHYS GCPhysCR3;
1724 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1725 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1726 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1727 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1728 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1729 else
1730 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1731
1732#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
1733 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1734 {
1735 /* Unmap the old CR3 value before activating the new one. */
1736 rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
1737 AssertRC(rc);
1738 }
1739#endif
1740
1741 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1742 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1743#ifdef IN_RING3
1744 if (rc == VINF_PGM_SYNC_CR3)
1745 rc = pgmPoolSyncCR3(pVM);
1746#else
1747 if (rc == VINF_PGM_SYNC_CR3)
1748 {
1749 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1750 return rc;
1751 }
1752#endif
1753 AssertRCReturn(rc, rc);
1754 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1755 }
1756
1757 /*
1758 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1759 */
1760 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1761 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1762 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1763 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1764 if (rc == VINF_SUCCESS)
1765 {
1766 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1767 {
1768 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1769 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1770 }
1771
1772 /*
1773 * Check if we have a pending update of the CR3 monitoring.
1774 */
1775 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1776 {
1777 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1778 Assert(!pVM->pgm.s.fMappingsFixed);
1779 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1780#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
1781 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1782#endif
1783 }
1784 }
1785
1786 /*
1787 * Now flush the CR3 (guest context).
1788 */
1789 if (rc == VINF_SUCCESS)
1790 PGM_INVL_GUEST_TLBS();
1791 return rc;
1792}
1793
1794
1795/**
1796 * Called whenever CR0 or CR4 in a way which may change
1797 * the paging mode.
1798 *
1799 * @returns VBox status code fit for scheduling in GC and R0.
1800 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1801 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1802 * @param pVM VM handle.
1803 * @param cr0 The new cr0.
1804 * @param cr4 The new cr4.
1805 * @param efer The new extended feature enable register.
1806 */
1807VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1808{
1809 PGMMODE enmGuestMode;
1810
1811 /*
1812 * Calc the new guest mode.
1813 */
1814 if (!(cr0 & X86_CR0_PE))
1815 enmGuestMode = PGMMODE_REAL;
1816 else if (!(cr0 & X86_CR0_PG))
1817 enmGuestMode = PGMMODE_PROTECTED;
1818 else if (!(cr4 & X86_CR4_PAE))
1819 enmGuestMode = PGMMODE_32_BIT;
1820 else if (!(efer & MSR_K6_EFER_LME))
1821 {
1822 if (!(efer & MSR_K6_EFER_NXE))
1823 enmGuestMode = PGMMODE_PAE;
1824 else
1825 enmGuestMode = PGMMODE_PAE_NX;
1826 }
1827 else
1828 {
1829 if (!(efer & MSR_K6_EFER_NXE))
1830 enmGuestMode = PGMMODE_AMD64;
1831 else
1832 enmGuestMode = PGMMODE_AMD64_NX;
1833 }
1834
1835 /*
1836 * Did it change?
1837 */
1838 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1839 return VINF_SUCCESS;
1840
1841 /* Flush the TLB */
1842 PGM_INVL_GUEST_TLBS();
1843
1844#ifdef IN_RING3
1845 return PGMR3ChangeMode(pVM, enmGuestMode);
1846#else
1847 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1848 return VINF_PGM_CHANGE_MODE;
1849#endif
1850}
1851
1852
1853/**
1854 * Gets the current guest paging mode.
1855 *
1856 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1857 *
1858 * @returns The current paging mode.
1859 * @param pVM The VM handle.
1860 */
1861VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1862{
1863 return pVM->pgm.s.enmGuestMode;
1864}
1865
1866
1867/**
1868 * Gets the current shadow paging mode.
1869 *
1870 * @returns The current paging mode.
1871 * @param pVM The VM handle.
1872 */
1873VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1874{
1875 return pVM->pgm.s.enmShadowMode;
1876}
1877
1878/**
1879 * Gets the current host paging mode.
1880 *
1881 * @returns The current paging mode.
1882 * @param pVM The VM handle.
1883 */
1884VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1885{
1886 switch (pVM->pgm.s.enmHostMode)
1887 {
1888 case SUPPAGINGMODE_32_BIT:
1889 case SUPPAGINGMODE_32_BIT_GLOBAL:
1890 return PGMMODE_32_BIT;
1891
1892 case SUPPAGINGMODE_PAE:
1893 case SUPPAGINGMODE_PAE_GLOBAL:
1894 return PGMMODE_PAE;
1895
1896 case SUPPAGINGMODE_PAE_NX:
1897 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1898 return PGMMODE_PAE_NX;
1899
1900 case SUPPAGINGMODE_AMD64:
1901 case SUPPAGINGMODE_AMD64_GLOBAL:
1902 return PGMMODE_AMD64;
1903
1904 case SUPPAGINGMODE_AMD64_NX:
1905 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1906 return PGMMODE_AMD64_NX;
1907
1908 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1909 }
1910
1911 return PGMMODE_INVALID;
1912}
1913
1914
1915/**
1916 * Get mode name.
1917 *
1918 * @returns read-only name string.
1919 * @param enmMode The mode which name is desired.
1920 */
1921VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1922{
1923 switch (enmMode)
1924 {
1925 case PGMMODE_REAL: return "Real";
1926 case PGMMODE_PROTECTED: return "Protected";
1927 case PGMMODE_32_BIT: return "32-bit";
1928 case PGMMODE_PAE: return "PAE";
1929 case PGMMODE_PAE_NX: return "PAE+NX";
1930 case PGMMODE_AMD64: return "AMD64";
1931 case PGMMODE_AMD64_NX: return "AMD64+NX";
1932 case PGMMODE_NESTED: return "Nested";
1933 case PGMMODE_EPT: return "EPT";
1934 default: return "unknown mode value";
1935 }
1936}
1937
1938
1939/**
1940 * Acquire the PGM lock.
1941 *
1942 * @returns VBox status code
1943 * @param pVM The VM to operate on.
1944 */
1945int pgmLock(PVM pVM)
1946{
1947 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1948#ifdef IN_RC
1949 if (rc == VERR_SEM_BUSY)
1950 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1951#elif defined(IN_RING0)
1952 if (rc == VERR_SEM_BUSY)
1953 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1954#endif
1955 AssertRC(rc);
1956 return rc;
1957}
1958
1959
1960/**
1961 * Release the PGM lock.
1962 *
1963 * @returns VBox status code
1964 * @param pVM The VM to operate on.
1965 */
1966void pgmUnlock(PVM pVM)
1967{
1968 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1969}
1970
1971#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1972
1973/**
1974 * Temporarily maps one guest page specified by GC physical address.
1975 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1976 *
1977 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1978 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1979 *
1980 * @returns VBox status.
1981 * @param pVM VM handle.
1982 * @param GCPhys GC Physical address of the page.
1983 * @param ppv Where to store the address of the mapping.
1984 */
1985VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1986{
1987 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
1988
1989 /*
1990 * Get the ram range.
1991 */
1992 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1993 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1994 pRam = pRam->CTX_SUFF(pNext);
1995 if (!pRam)
1996 {
1997 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1998 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1999 }
2000
2001 /*
2002 * Pass it on to PGMDynMapHCPage.
2003 */
2004 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2005 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2006#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2007 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2008#else
2009 PGMDynMapHCPage(pVM, HCPhys, ppv);
2010#endif
2011 return VINF_SUCCESS;
2012}
2013
2014
2015/**
2016 * Temporarily maps one guest page specified by unaligned GC physical address.
2017 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2018 *
2019 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2020 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2021 *
2022 * The caller is aware that only the speicifed page is mapped and that really bad things
2023 * will happen if writing beyond the page!
2024 *
2025 * @returns VBox status.
2026 * @param pVM VM handle.
2027 * @param GCPhys GC Physical address within the page to be mapped.
2028 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2029 */
2030VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2031{
2032 /*
2033 * Get the ram range.
2034 */
2035 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2036 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2037 pRam = pRam->CTX_SUFF(pNext);
2038 if (!pRam)
2039 {
2040 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2041 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2042 }
2043
2044 /*
2045 * Pass it on to PGMDynMapHCPage.
2046 */
2047 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2048#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2049 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2050#else
2051 PGMDynMapHCPage(pVM, HCPhys, ppv);
2052#endif
2053 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2054 return VINF_SUCCESS;
2055}
2056
2057
2058# ifdef IN_RC
2059/**
2060 * Temporarily maps one host page specified by HC physical address.
2061 *
2062 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2063 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2064 *
2065 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2066 * @param pVM VM handle.
2067 * @param HCPhys HC Physical address of the page.
2068 * @param ppv Where to store the address of the mapping. This is the
2069 * address of the PAGE not the exact address corresponding
2070 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2071 * page offset.
2072 */
2073VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2074{
2075 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2076
2077 /*
2078 * Check the cache.
2079 */
2080 register unsigned iCache;
2081 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys
2082 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys
2083 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys
2084 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)
2085 {
2086 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2087 {
2088 { 0, 5, 6, 7 },
2089 { 0, 1, 6, 7 },
2090 { 0, 1, 2, 7 },
2091 { 0, 1, 2, 3 },
2092 { 4, 1, 2, 3 },
2093 { 4, 5, 2, 3 },
2094 { 4, 5, 6, 3 },
2095 { 4, 5, 6, 7 },
2096 };
2097 Assert(RT_ELEMENTS(au8Trans) == 8);
2098 Assert(RT_ELEMENTS(au8Trans[0]) == 4);
2099 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2100 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2101 *ppv = pv;
2102 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2103 //Log(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2104 return VINF_SUCCESS;
2105 }
2106 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);
2107 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2108
2109 /*
2110 * Update the page tables.
2111 */
2112 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2113 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2114 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);
2115
2116 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2117 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2118 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2119
2120 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2121 *ppv = pv;
2122 ASMInvalidatePage(pv);
2123 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2124 return VINF_SUCCESS;
2125}
2126# endif /* IN_RC */
2127
2128#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2129#ifdef VBOX_STRICT
2130
2131/**
2132 * Asserts that there are no mapping conflicts.
2133 *
2134 * @returns Number of conflicts.
2135 * @param pVM The VM Handle.
2136 */
2137VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2138{
2139 unsigned cErrors = 0;
2140
2141 /*
2142 * Check for mapping conflicts.
2143 */
2144 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2145 pMapping;
2146 pMapping = pMapping->CTX_SUFF(pNext))
2147 {
2148 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2149 for (RTGCPTR GCPtr = pMapping->GCPtr;
2150 GCPtr <= pMapping->GCPtrLast;
2151 GCPtr += PAGE_SIZE)
2152 {
2153 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2154 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2155 {
2156 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2157 cErrors++;
2158 break;
2159 }
2160 }
2161 }
2162
2163 return cErrors;
2164}
2165
2166
2167/**
2168 * Asserts that everything related to the guest CR3 is correctly shadowed.
2169 *
2170 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2171 * and assert the correctness of the guest CR3 mapping before asserting that the
2172 * shadow page tables is in sync with the guest page tables.
2173 *
2174 * @returns Number of conflicts.
2175 * @param pVM The VM Handle.
2176 * @param cr3 The current guest CR3 register value.
2177 * @param cr4 The current guest CR4 register value.
2178 */
2179VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2180{
2181 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2182 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2183 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2184 return cErrors;
2185 return 0;
2186}
2187
2188#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette