VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 16177

Last change on this file since 16177 was 16172, checked in by vboxsync, 16 years ago

Moved amd64 paging data to unified shadow paging section.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 67.6 KB
Line 
1/* $Id: PGMAll.cpp 16172 2009-01-22 15:09:31Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
74DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
75DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
76
77
78/*
79 * Shadow - 32-bit mode
80 */
81#define PGM_SHW_TYPE PGM_TYPE_32BIT
82#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
83#include "PGMAllShw.h"
84
85/* Guest - real mode */
86#define PGM_GST_TYPE PGM_TYPE_REAL
87#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
88#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
89#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef PGM_BTH_NAME
94#undef PGM_GST_TYPE
95#undef PGM_GST_NAME
96
97/* Guest - protected mode */
98#define PGM_GST_TYPE PGM_TYPE_PROT
99#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
100#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
101#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
102#include "PGMAllGst.h"
103#include "PGMAllBth.h"
104#undef BTH_PGMPOOLKIND_PT_FOR_PT
105#undef PGM_BTH_NAME
106#undef PGM_GST_TYPE
107#undef PGM_GST_NAME
108
109/* Guest - 32-bit mode */
110#define PGM_GST_TYPE PGM_TYPE_32BIT
111#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
112#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
113#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
114#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
115#include "PGMAllGst.h"
116#include "PGMAllBth.h"
117#undef BTH_PGMPOOLKIND_PT_FOR_BIG
118#undef BTH_PGMPOOLKIND_PT_FOR_PT
119#undef PGM_BTH_NAME
120#undef PGM_GST_TYPE
121#undef PGM_GST_NAME
122
123#undef PGM_SHW_TYPE
124#undef PGM_SHW_NAME
125
126
127/*
128 * Shadow - PAE mode
129 */
130#define PGM_SHW_TYPE PGM_TYPE_PAE
131#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
132#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
133#include "PGMAllShw.h"
134
135/* Guest - real mode */
136#define PGM_GST_TYPE PGM_TYPE_REAL
137#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
140#include "PGMAllBth.h"
141#undef BTH_PGMPOOLKIND_PT_FOR_PT
142#undef PGM_BTH_NAME
143#undef PGM_GST_TYPE
144#undef PGM_GST_NAME
145
146/* Guest - protected mode */
147#define PGM_GST_TYPE PGM_TYPE_PROT
148#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
149#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
150#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
151#include "PGMAllBth.h"
152#undef BTH_PGMPOOLKIND_PT_FOR_PT
153#undef PGM_BTH_NAME
154#undef PGM_GST_TYPE
155#undef PGM_GST_NAME
156
157/* Guest - 32-bit mode */
158#define PGM_GST_TYPE PGM_TYPE_32BIT
159#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
160#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
161#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
162#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_BIG
165#undef BTH_PGMPOOLKIND_PT_FOR_PT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170
171/* Guest - PAE mode */
172#define PGM_GST_TYPE PGM_TYPE_PAE
173#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
174#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
175#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
176#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
177#include "PGMAllGst.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef PGM_BTH_NAME
182#undef PGM_GST_TYPE
183#undef PGM_GST_NAME
184
185#undef PGM_SHW_TYPE
186#undef PGM_SHW_NAME
187
188
189#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
190/*
191 * Shadow - AMD64 mode
192 */
193# define PGM_SHW_TYPE PGM_TYPE_AMD64
194# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
195# include "PGMAllShw.h"
196
197/* Guest - protected mode */
198# define PGM_GST_TYPE PGM_TYPE_PROT
199# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
200# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
201# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
202# include "PGMAllBth.h"
203# undef BTH_PGMPOOLKIND_PT_FOR_PT
204# undef PGM_BTH_NAME
205# undef PGM_GST_TYPE
206# undef PGM_GST_NAME
207
208# ifdef VBOX_WITH_64_BITS_GUESTS
209/* Guest - AMD64 mode */
210# define PGM_GST_TYPE PGM_TYPE_AMD64
211# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
212# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
213# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
214# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
215# include "PGMAllGst.h"
216# include "PGMAllBth.h"
217# undef BTH_PGMPOOLKIND_PT_FOR_BIG
218# undef BTH_PGMPOOLKIND_PT_FOR_PT
219# undef PGM_BTH_NAME
220# undef PGM_GST_TYPE
221# undef PGM_GST_NAME
222# endif /* VBOX_WITH_64_BITS_GUESTS */
223
224# undef PGM_SHW_TYPE
225# undef PGM_SHW_NAME
226
227
228/*
229 * Shadow - Nested paging mode
230 */
231# define PGM_SHW_TYPE PGM_TYPE_NESTED
232# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
233# include "PGMAllShw.h"
234
235/* Guest - real mode */
236# define PGM_GST_TYPE PGM_TYPE_REAL
237# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
238# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
239# include "PGMAllBth.h"
240# undef PGM_BTH_NAME
241# undef PGM_GST_TYPE
242# undef PGM_GST_NAME
243
244/* Guest - protected mode */
245# define PGM_GST_TYPE PGM_TYPE_PROT
246# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
247# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
248# include "PGMAllBth.h"
249# undef PGM_BTH_NAME
250# undef PGM_GST_TYPE
251# undef PGM_GST_NAME
252
253/* Guest - 32-bit mode */
254# define PGM_GST_TYPE PGM_TYPE_32BIT
255# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
256# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
257# include "PGMAllBth.h"
258# undef PGM_BTH_NAME
259# undef PGM_GST_TYPE
260# undef PGM_GST_NAME
261
262/* Guest - PAE mode */
263# define PGM_GST_TYPE PGM_TYPE_PAE
264# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
265# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
266# include "PGMAllBth.h"
267# undef PGM_BTH_NAME
268# undef PGM_GST_TYPE
269# undef PGM_GST_NAME
270
271# ifdef VBOX_WITH_64_BITS_GUESTS
272/* Guest - AMD64 mode */
273# define PGM_GST_TYPE PGM_TYPE_AMD64
274# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
275# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
276# include "PGMAllBth.h"
277# undef PGM_BTH_NAME
278# undef PGM_GST_TYPE
279# undef PGM_GST_NAME
280# endif /* VBOX_WITH_64_BITS_GUESTS */
281
282# undef PGM_SHW_TYPE
283# undef PGM_SHW_NAME
284
285
286/*
287 * Shadow - EPT
288 */
289# define PGM_SHW_TYPE PGM_TYPE_EPT
290# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
291# include "PGMAllShw.h"
292
293/* Guest - real mode */
294# define PGM_GST_TYPE PGM_TYPE_REAL
295# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
296# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
297# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
298# include "PGMAllBth.h"
299# undef BTH_PGMPOOLKIND_PT_FOR_PT
300# undef PGM_BTH_NAME
301# undef PGM_GST_TYPE
302# undef PGM_GST_NAME
303
304/* Guest - protected mode */
305# define PGM_GST_TYPE PGM_TYPE_PROT
306# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
307# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
308# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
309# include "PGMAllBth.h"
310# undef BTH_PGMPOOLKIND_PT_FOR_PT
311# undef PGM_BTH_NAME
312# undef PGM_GST_TYPE
313# undef PGM_GST_NAME
314
315/* Guest - 32-bit mode */
316# define PGM_GST_TYPE PGM_TYPE_32BIT
317# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
318# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
319# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
320# include "PGMAllBth.h"
321# undef BTH_PGMPOOLKIND_PT_FOR_PT
322# undef PGM_BTH_NAME
323# undef PGM_GST_TYPE
324# undef PGM_GST_NAME
325
326/* Guest - PAE mode */
327# define PGM_GST_TYPE PGM_TYPE_PAE
328# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
329# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
330# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
331# include "PGMAllBth.h"
332# undef BTH_PGMPOOLKIND_PT_FOR_PT
333# undef PGM_BTH_NAME
334# undef PGM_GST_TYPE
335# undef PGM_GST_NAME
336
337# ifdef VBOX_WITH_64_BITS_GUESTS
338/* Guest - AMD64 mode */
339# define PGM_GST_TYPE PGM_TYPE_AMD64
340# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
341# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
342# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
343# include "PGMAllBth.h"
344# undef BTH_PGMPOOLKIND_PT_FOR_PT
345# undef PGM_BTH_NAME
346# undef PGM_GST_TYPE
347# undef PGM_GST_NAME
348# endif /* VBOX_WITH_64_BITS_GUESTS */
349
350# undef PGM_SHW_TYPE
351# undef PGM_SHW_NAME
352
353#endif /* !IN_RC */
354
355
356#ifndef IN_RING3
357/**
358 * #PF Handler.
359 *
360 * @returns VBox status code (appropriate for trap handling and GC return).
361 * @param pVM VM Handle.
362 * @param uErr The trap error code.
363 * @param pRegFrame Trap register frame.
364 * @param pvFault The fault address.
365 */
366VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
367{
368 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
369 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
370 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
371
372
373#ifdef VBOX_WITH_STATISTICS
374 /*
375 * Error code stats.
376 */
377 if (uErr & X86_TRAP_PF_US)
378 {
379 if (!(uErr & X86_TRAP_PF_P))
380 {
381 if (uErr & X86_TRAP_PF_RW)
382 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
383 else
384 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
385 }
386 else if (uErr & X86_TRAP_PF_RW)
387 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
388 else if (uErr & X86_TRAP_PF_RSVD)
389 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
390 else if (uErr & X86_TRAP_PF_ID)
391 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
392 else
393 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
394 }
395 else
396 { /* Supervisor */
397 if (!(uErr & X86_TRAP_PF_P))
398 {
399 if (uErr & X86_TRAP_PF_RW)
400 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
401 else
402 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
403 }
404 else if (uErr & X86_TRAP_PF_RW)
405 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
406 else if (uErr & X86_TRAP_PF_ID)
407 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
408 else if (uErr & X86_TRAP_PF_RSVD)
409 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
410 }
411#endif /* VBOX_WITH_STATISTICS */
412
413 /*
414 * Call the worker.
415 */
416 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
417 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
418 rc = VINF_SUCCESS;
419 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
420 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
421 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
422 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
423 return rc;
424}
425#endif /* !IN_RING3 */
426
427
428/**
429 * Prefetch a page
430 *
431 * Typically used to sync commonly used pages before entering raw mode
432 * after a CR3 reload.
433 *
434 * @returns VBox status code suitable for scheduling.
435 * @retval VINF_SUCCESS on success.
436 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
437 * @param pVM VM handle.
438 * @param GCPtrPage Page to invalidate.
439 */
440VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
441{
442 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
443 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
444 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
445 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
446 return rc;
447}
448
449
450/**
451 * Gets the mapping corresponding to the specified address (if any).
452 *
453 * @returns Pointer to the mapping.
454 * @returns NULL if not
455 *
456 * @param pVM The virtual machine.
457 * @param GCPtr The guest context pointer.
458 */
459PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
460{
461 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
462 while (pMapping)
463 {
464 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
465 break;
466 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
467 return pMapping;
468 pMapping = pMapping->CTX_SUFF(pNext);
469 }
470 return NULL;
471}
472
473
474/**
475 * Verifies a range of pages for read or write access
476 *
477 * Only checks the guest's page tables
478 *
479 * @returns VBox status code.
480 * @param pVM VM handle.
481 * @param Addr Guest virtual address to check
482 * @param cbSize Access size
483 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
484 * @remarks Current not in use.
485 */
486VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
487{
488 /*
489 * Validate input.
490 */
491 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
492 {
493 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
494 return VERR_INVALID_PARAMETER;
495 }
496
497 uint64_t fPage;
498 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
499 if (RT_FAILURE(rc))
500 {
501 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
502 return VINF_EM_RAW_GUEST_TRAP;
503 }
504
505 /*
506 * Check if the access would cause a page fault
507 *
508 * Note that hypervisor page directories are not present in the guest's tables, so this check
509 * is sufficient.
510 */
511 bool fWrite = !!(fAccess & X86_PTE_RW);
512 bool fUser = !!(fAccess & X86_PTE_US);
513 if ( !(fPage & X86_PTE_P)
514 || (fWrite && !(fPage & X86_PTE_RW))
515 || (fUser && !(fPage & X86_PTE_US)) )
516 {
517 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
518 return VINF_EM_RAW_GUEST_TRAP;
519 }
520 if ( RT_SUCCESS(rc)
521 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
522 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
523 return rc;
524}
525
526
527/**
528 * Verifies a range of pages for read or write access
529 *
530 * Supports handling of pages marked for dirty bit tracking and CSAM
531 *
532 * @returns VBox status code.
533 * @param pVM VM handle.
534 * @param Addr Guest virtual address to check
535 * @param cbSize Access size
536 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
537 */
538VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
539{
540 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
541
542 /*
543 * Get going.
544 */
545 uint64_t fPageGst;
546 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
547 if (RT_FAILURE(rc))
548 {
549 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
550 return VINF_EM_RAW_GUEST_TRAP;
551 }
552
553 /*
554 * Check if the access would cause a page fault
555 *
556 * Note that hypervisor page directories are not present in the guest's tables, so this check
557 * is sufficient.
558 */
559 const bool fWrite = !!(fAccess & X86_PTE_RW);
560 const bool fUser = !!(fAccess & X86_PTE_US);
561 if ( !(fPageGst & X86_PTE_P)
562 || (fWrite && !(fPageGst & X86_PTE_RW))
563 || (fUser && !(fPageGst & X86_PTE_US)) )
564 {
565 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
566 return VINF_EM_RAW_GUEST_TRAP;
567 }
568
569 if (!HWACCMIsNestedPagingActive(pVM))
570 {
571 /*
572 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
573 */
574 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
575 if ( rc == VERR_PAGE_NOT_PRESENT
576 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
577 {
578 /*
579 * Page is not present in our page tables.
580 * Try to sync it!
581 */
582 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
583 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
584 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
585 if (rc != VINF_SUCCESS)
586 return rc;
587 }
588 else
589 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
590 }
591
592#if 0 /* def VBOX_STRICT; triggers too often now */
593 /*
594 * This check is a bit paranoid, but useful.
595 */
596 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
597 uint64_t fPageShw;
598 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
599 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
600 || (fWrite && !(fPageShw & X86_PTE_RW))
601 || (fUser && !(fPageShw & X86_PTE_US)) )
602 {
603 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
604 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
605 return VINF_EM_RAW_GUEST_TRAP;
606 }
607#endif
608
609 if ( RT_SUCCESS(rc)
610 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
611 || Addr + cbSize < Addr))
612 {
613 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
614 for (;;)
615 {
616 Addr += PAGE_SIZE;
617 if (cbSize > PAGE_SIZE)
618 cbSize -= PAGE_SIZE;
619 else
620 cbSize = 1;
621 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
622 if (rc != VINF_SUCCESS)
623 break;
624 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
625 break;
626 }
627 }
628 return rc;
629}
630
631
632/**
633 * Emulation of the invlpg instruction (HC only actually).
634 *
635 * @returns VBox status code, special care required.
636 * @retval VINF_PGM_SYNC_CR3 - handled.
637 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
638 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
639 *
640 * @param pVM VM handle.
641 * @param GCPtrPage Page to invalidate.
642 *
643 * @remark ASSUMES the page table entry or page directory is valid. Fairly
644 * safe, but there could be edge cases!
645 *
646 * @todo Flush page or page directory only if necessary!
647 */
648VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
649{
650 int rc;
651 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
652
653#ifndef IN_RING3
654 /*
655 * Notify the recompiler so it can record this instruction.
656 * Failure happens when it's out of space. We'll return to HC in that case.
657 */
658 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
659 if (rc != VINF_SUCCESS)
660 return rc;
661#endif /* !IN_RING3 */
662
663
664#ifdef IN_RC
665 /*
666 * Check for conflicts and pending CR3 monitoring updates.
667 */
668 if (!pVM->pgm.s.fMappingsFixed)
669 {
670 if ( pgmGetMapping(pVM, GCPtrPage)
671 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
672 {
673 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
674 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
675 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
676 return VINF_PGM_SYNC_CR3;
677 }
678
679 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
680 {
681 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
682 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
683 return VINF_EM_RAW_EMULATE_INSTR;
684 }
685 }
686#endif /* IN_RC */
687
688 /*
689 * Call paging mode specific worker.
690 */
691 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
692 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
693 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
694
695#ifdef IN_RING3
696 /*
697 * Check if we have a pending update of the CR3 monitoring.
698 */
699 if ( RT_SUCCESS(rc)
700 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
701 {
702 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
703 Assert(!pVM->pgm.s.fMappingsFixed);
704 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
705 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
706 }
707
708 /*
709 * Inform CSAM about the flush
710 *
711 * Note: This is to check if monitored pages have been changed; when we implement
712 * callbacks for virtual handlers, this is no longer required.
713 */
714 CSAMR3FlushPage(pVM, GCPtrPage);
715#endif /* IN_RING3 */
716 return rc;
717}
718
719
720/**
721 * Executes an instruction using the interpreter.
722 *
723 * @returns VBox status code (appropriate for trap handling and GC return).
724 * @param pVM VM handle.
725 * @param pRegFrame Register frame.
726 * @param pvFault Fault address.
727 */
728VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
729{
730 uint32_t cb;
731 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
732 if (rc == VERR_EM_INTERPRETER)
733 rc = VINF_EM_RAW_EMULATE_INSTR;
734 if (rc != VINF_SUCCESS)
735 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
736 return rc;
737}
738
739
740/**
741 * Gets effective page information (from the VMM page directory).
742 *
743 * @returns VBox status.
744 * @param pVM VM Handle.
745 * @param GCPtr Guest Context virtual address of the page.
746 * @param pfFlags Where to store the flags. These are X86_PTE_*.
747 * @param pHCPhys Where to store the HC physical address of the page.
748 * This is page aligned.
749 * @remark You should use PGMMapGetPage() for pages in a mapping.
750 */
751VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
752{
753 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
754}
755
756
757/**
758 * Sets (replaces) the page flags for a range of pages in the shadow context.
759 *
760 * @returns VBox status.
761 * @param pVM VM handle.
762 * @param GCPtr The address of the first page.
763 * @param cb The size of the range in bytes.
764 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
765 * @remark You must use PGMMapSetPage() for pages in a mapping.
766 */
767VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
768{
769 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
770}
771
772
773/**
774 * Modify page flags for a range of pages in the shadow context.
775 *
776 * The existing flags are ANDed with the fMask and ORed with the fFlags.
777 *
778 * @returns VBox status code.
779 * @param pVM VM handle.
780 * @param GCPtr Virtual address of the first page in the range.
781 * @param cb Size (in bytes) of the range to apply the modification to.
782 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
783 * @param fMask The AND mask - page flags X86_PTE_*.
784 * Be very CAREFUL when ~'ing constants which could be 32-bit!
785 * @remark You must use PGMMapModifyPage() for pages in a mapping.
786 */
787VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
788{
789 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
790 Assert(cb);
791
792 /*
793 * Align the input.
794 */
795 cb += GCPtr & PAGE_OFFSET_MASK;
796 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
797 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
798
799 /*
800 * Call worker.
801 */
802 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
803}
804
805
806/**
807 * Syncs the SHADOW page directory pointer for the specified address.
808 *
809 * Allocates backing pages in case the PDPT entry is missing.
810 *
811 * @returns VBox status.
812 * @param pVM VM handle.
813 * @param GCPtr The address.
814 * @param pGstPdpe Guest PDPT entry
815 * @param ppPD Receives address of page directory
816 * @remarks Unused.
817 */
818DECLINLINE(int) pgmShwSyncPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
819{
820 PPGM pPGM = &pVM->pgm.s;
821 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
822 PPGMPOOLPAGE pShwPage;
823 int rc;
824
825 Assert(!HWACCMIsNestedPagingActive(pVM));
826
827 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
828 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
829 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
830
831 /* Allocate page directory if not present. */
832 if ( !pPdpe->n.u1Present
833 && !(pPdpe->u & X86_PDPE_PG_MASK))
834 {
835 PX86PDPE pPdptGst = pgmGstGetPaePDPEPtr(pPGM, GCPtr);
836
837 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
838 /* Create a reference back to the PDPT by using the index in its shadow page. */
839 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
840 if (rc == VERR_PGM_POOL_FLUSHED)
841 {
842 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
843 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
844 return VINF_PGM_SYNC_CR3;
845 }
846 AssertRCReturn(rc, rc);
847 }
848 else
849 {
850 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
851 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
852 }
853 /* The PD was cached or created; hook it up now. */
854 pPdpe->u |= pShwPage->Core.Key
855 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
856
857 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
858 return VINF_SUCCESS;
859}
860
861
862/**
863 * Gets the SHADOW page directory pointer for the specified address.
864 *
865 * @returns VBox status.
866 * @param pVM VM handle.
867 * @param GCPtr The address.
868 * @param ppPdpt Receives address of pdpt
869 * @param ppPD Receives address of page directory
870 * @remarks Unused.
871 */
872DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
873{
874 PPGM pPGM = &pVM->pgm.s;
875 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
876 PPGMPOOLPAGE pShwPage;
877
878 Assert(!HWACCMIsNestedPagingActive(pVM));
879
880 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
881 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
882 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
883
884 *ppPdpt = pPdpt;
885 if (!pPdpe->n.u1Present)
886 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
887
888 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
889 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
890
891 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
892 return VINF_SUCCESS;
893}
894
895#ifndef IN_RC
896
897/**
898 * Syncs the SHADOW page directory pointer for the specified address.
899 *
900 * Allocates backing pages in case the PDPT or PML4 entry is missing.
901 *
902 * The caller is responsible for making sure the guest has a valid PD before
903 * calling this function.
904 *
905 * @returns VBox status.
906 * @param pVM VM handle.
907 * @param GCPtr The address.
908 * @param pGstPml4e Guest PML4 entry
909 * @param pGstPdpe Guest PDPT entry
910 * @param ppPD Receives address of page directory
911 */
912DECLINLINE(int) pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
913{
914 PPGM pPGM = &pVM->pgm.s;
915 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
916 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
917 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
918 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
919 PPGMPOOLPAGE pShwPage;
920 X86PML4E Pml4eGst;
921 int rc;
922
923 /* Allocate page directory pointer table if not present. */
924 if ( !pPml4e->n.u1Present
925 && !(pPml4e->u & X86_PML4E_PG_MASK))
926 {
927 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
928 if (!fNestedPaging)
929 {
930 /** @todo why are we looking up the guest PML4E here? Isn't pGstPml4e
931 * trustworthy? (Remove pgmGstGetLongModePML4E if pGstPml4e and pGstPdpe
932 * are fine.) */
933 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
934 Pml4eGst = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4);
935
936 rc = pgmPoolAlloc(pVM, Pml4eGst.u & X86_PML4E_PG_MASK,
937 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
938 }
939 else
940 {
941 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
942 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
943 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
944 }
945
946 if (rc == VERR_PGM_POOL_FLUSHED)
947 {
948 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
949 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
950 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
951 return VINF_PGM_SYNC_CR3;
952 }
953 AssertRCReturn(rc, rc);
954 }
955 else
956 {
957 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
958 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
959 }
960 /* The PDPT was cached or created; hook it up now. */
961 pPml4e->u |= pShwPage->Core.Key
962 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
963
964 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
965 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
966 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
967
968 /* Allocate page directory if not present. */
969 if ( !pPdpe->n.u1Present
970 && !(pPdpe->u & X86_PDPE_PG_MASK))
971 {
972 if (!fNestedPaging)
973 {
974 /** @todo why are we looking up the guest PDPTE here? Isn't pGstPdpe
975 * trustworthy? */
976 Pml4eGst = pgmGstGetLongModePML4E(&pVM->pgm.s, iPml4);
977 PX86PDPT pPdptGst;
978 rc = PGM_GCPHYS_2_PTR(pVM, Pml4eGst.u & X86_PML4E_PG_MASK, &pPdptGst);
979 AssertRCReturn(rc, rc);
980
981 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
982 /* Create a reference back to the PDPT by using the index in its shadow page. */
983 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
984 }
985 else
986 {
987 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
988
989 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
990 }
991
992 if (rc == VERR_PGM_POOL_FLUSHED)
993 {
994 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
995 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
996 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
997 return VINF_PGM_SYNC_CR3;
998 }
999 AssertRCReturn(rc, rc);
1000 }
1001 else
1002 {
1003 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1004 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1005 }
1006 /* The PD was cached or created; hook it up now. */
1007 pPdpe->u |= pShwPage->Core.Key
1008 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1009
1010 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015/**
1016 * Gets the SHADOW page directory pointer for the specified address (long mode).
1017 *
1018 * @returns VBox status.
1019 * @param pVM VM handle.
1020 * @param GCPtr The address.
1021 * @param ppPdpt Receives address of pdpt
1022 * @param ppPD Receives address of page directory
1023 */
1024DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1025{
1026 PPGM pPGM = &pVM->pgm.s;
1027 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1028 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1029 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1030 if (ppPml4e)
1031 *ppPml4e = (PX86PML4E)pPml4e;
1032 if (!pPml4e->n.u1Present)
1033 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1034
1035 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1036 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1037 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1038
1039 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1040 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1041 if (!pPdpt->a[iPdPt].n.u1Present)
1042 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1043
1044 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1045 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1046
1047 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1054 * backing pages in case the PDPT or PML4 entry is missing.
1055 *
1056 * @returns VBox status.
1057 * @param pVM VM handle.
1058 * @param GCPtr The address.
1059 * @param ppPdpt Receives address of pdpt
1060 * @param ppPD Receives address of page directory
1061 */
1062DECLINLINE(int) pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1063{
1064 PPGM pPGM = &pVM->pgm.s;
1065 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1066 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1067 PEPTPML4 pPml4;
1068 PEPTPML4E pPml4e;
1069 PPGMPOOLPAGE pShwPage;
1070 int rc;
1071
1072 Assert(HWACCMIsNestedPagingActive(pVM));
1073
1074# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1075 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
1076 AssertRCReturn(rc, rc);
1077# else
1078 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
1079# endif
1080 Assert(pPml4);
1081
1082 /* Allocate page directory pointer table if not present. */
1083 pPml4e = &pPml4->a[iPml4];
1084 if ( !pPml4e->n.u1Present
1085 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1086 {
1087 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1088 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1089
1090 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1091 if (rc == VERR_PGM_POOL_FLUSHED)
1092 {
1093 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1094 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1095 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1096 return VINF_PGM_SYNC_CR3;
1097 }
1098 AssertRCReturn(rc, rc);
1099 }
1100 else
1101 {
1102 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1103 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1104 }
1105 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1106 pPml4e->u = pShwPage->Core.Key;
1107 pPml4e->n.u1Present = 1;
1108 pPml4e->n.u1Write = 1;
1109 pPml4e->n.u1Execute = 1;
1110
1111 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1112 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1113 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1114
1115 if (ppPdpt)
1116 *ppPdpt = pPdpt;
1117
1118 /* Allocate page directory if not present. */
1119 if ( !pPdpe->n.u1Present
1120 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1121 {
1122 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1123
1124 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1125 if (rc == VERR_PGM_POOL_FLUSHED)
1126 {
1127 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1128 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1129 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1130 return VINF_PGM_SYNC_CR3;
1131 }
1132 AssertRCReturn(rc, rc);
1133 }
1134 else
1135 {
1136 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1137 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1138 }
1139 /* The PD was cached or created; hook it up now and fill with the default value. */
1140 pPdpe->u = pShwPage->Core.Key;
1141 pPdpe->n.u1Present = 1;
1142 pPdpe->n.u1Write = 1;
1143 pPdpe->n.u1Execute = 1;
1144
1145 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1146 return VINF_SUCCESS;
1147}
1148
1149#endif /* IN_RC */
1150
1151/**
1152 * Gets effective Guest OS page information.
1153 *
1154 * When GCPtr is in a big page, the function will return as if it was a normal
1155 * 4KB page. If the need for distinguishing between big and normal page becomes
1156 * necessary at a later point, a PGMGstGetPage() will be created for that
1157 * purpose.
1158 *
1159 * @returns VBox status.
1160 * @param pVM VM Handle.
1161 * @param GCPtr Guest Context virtual address of the page.
1162 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1163 * @param pGCPhys Where to store the GC physical address of the page.
1164 * This is page aligned. The fact that the
1165 */
1166VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1167{
1168 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1169}
1170
1171
1172/**
1173 * Checks if the page is present.
1174 *
1175 * @returns true if the page is present.
1176 * @returns false if the page is not present.
1177 * @param pVM The VM handle.
1178 * @param GCPtr Address within the page.
1179 */
1180VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1181{
1182 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1183 return RT_SUCCESS(rc);
1184}
1185
1186
1187/**
1188 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1189 *
1190 * @returns VBox status.
1191 * @param pVM VM handle.
1192 * @param GCPtr The address of the first page.
1193 * @param cb The size of the range in bytes.
1194 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1195 */
1196VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1197{
1198 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1199}
1200
1201
1202/**
1203 * Modify page flags for a range of pages in the guest's tables
1204 *
1205 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1206 *
1207 * @returns VBox status code.
1208 * @param pVM VM handle.
1209 * @param GCPtr Virtual address of the first page in the range.
1210 * @param cb Size (in bytes) of the range to apply the modification to.
1211 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1212 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1213 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1214 */
1215VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1216{
1217 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1218
1219 /*
1220 * Validate input.
1221 */
1222 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1223 Assert(cb);
1224
1225 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1226
1227 /*
1228 * Adjust input.
1229 */
1230 cb += GCPtr & PAGE_OFFSET_MASK;
1231 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1232 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1233
1234 /*
1235 * Call worker.
1236 */
1237 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1238
1239 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1240 return rc;
1241}
1242
1243
1244/**
1245 * Gets the specified page directory pointer table entry.
1246 *
1247 * @returns PDP entry
1248 * @param pPGM Pointer to the PGM instance data.
1249 * @param iPdpt PDPT index
1250 */
1251VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1252{
1253 Assert(iPdpt <= 3);
1254 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1255}
1256
1257
1258/**
1259 * Gets the current CR3 register value for the shadow memory context.
1260 * @returns CR3 value.
1261 * @param pVM The VM handle.
1262 */
1263VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1264{
1265 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1266 switch (enmShadowMode)
1267 {
1268 case PGMMODE_32_BIT:
1269 return pVM->pgm.s.HCPhysShw32BitPD;
1270
1271 case PGMMODE_PAE:
1272 case PGMMODE_PAE_NX:
1273 return pVM->pgm.s.HCPhysShwPaePdpt;
1274
1275 case PGMMODE_AMD64:
1276 case PGMMODE_AMD64_NX:
1277 return pVM->pgm.s.HCPhysShwCR3;
1278
1279 case PGMMODE_EPT:
1280 return pVM->pgm.s.HCPhysShwNestedRoot;
1281
1282 case PGMMODE_NESTED:
1283 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1284
1285 default:
1286 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1287 return ~0;
1288 }
1289}
1290
1291
1292/**
1293 * Gets the current CR3 register value for the nested memory context.
1294 * @returns CR3 value.
1295 * @param pVM The VM handle.
1296 */
1297VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1298{
1299 switch (enmShadowMode)
1300 {
1301 case PGMMODE_32_BIT:
1302 return pVM->pgm.s.HCPhysShw32BitPD;
1303
1304 case PGMMODE_PAE:
1305 case PGMMODE_PAE_NX:
1306 return pVM->pgm.s.HCPhysShwPaePdpt;
1307
1308 case PGMMODE_AMD64:
1309 case PGMMODE_AMD64_NX:
1310 return pVM->pgm.s.HCPhysShwCR3;
1311
1312 default:
1313 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1314 return ~0;
1315 }
1316}
1317
1318
1319/**
1320 * Gets the current CR3 register value for the EPT paging memory context.
1321 * @returns CR3 value.
1322 * @param pVM The VM handle.
1323 */
1324VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
1325{
1326 return pVM->pgm.s.HCPhysShwNestedRoot;
1327}
1328
1329
1330/**
1331 * Gets the CR3 register value for the 32-Bit shadow memory context.
1332 * @returns CR3 value.
1333 * @param pVM The VM handle.
1334 */
1335VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1336{
1337 return pVM->pgm.s.HCPhysShw32BitPD;
1338}
1339
1340
1341/**
1342 * Gets the CR3 register value for the PAE shadow memory context.
1343 * @returns CR3 value.
1344 * @param pVM The VM handle.
1345 */
1346VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1347{
1348 return pVM->pgm.s.HCPhysShwPaePdpt;
1349}
1350
1351
1352/**
1353 * Gets the CR3 register value for the AMD64 shadow memory context.
1354 * @returns CR3 value.
1355 * @param pVM The VM handle.
1356 */
1357VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1358{
1359 return pVM->pgm.s.HCPhysShwCR3;
1360}
1361
1362
1363/**
1364 * Gets the current CR3 register value for the HC intermediate memory context.
1365 * @returns CR3 value.
1366 * @param pVM The VM handle.
1367 */
1368VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1369{
1370 switch (pVM->pgm.s.enmHostMode)
1371 {
1372 case SUPPAGINGMODE_32_BIT:
1373 case SUPPAGINGMODE_32_BIT_GLOBAL:
1374 return pVM->pgm.s.HCPhysInterPD;
1375
1376 case SUPPAGINGMODE_PAE:
1377 case SUPPAGINGMODE_PAE_GLOBAL:
1378 case SUPPAGINGMODE_PAE_NX:
1379 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1380 return pVM->pgm.s.HCPhysInterPaePDPT;
1381
1382 case SUPPAGINGMODE_AMD64:
1383 case SUPPAGINGMODE_AMD64_GLOBAL:
1384 case SUPPAGINGMODE_AMD64_NX:
1385 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1386 return pVM->pgm.s.HCPhysInterPaePDPT;
1387
1388 default:
1389 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1390 return ~0;
1391 }
1392}
1393
1394
1395/**
1396 * Gets the current CR3 register value for the RC intermediate memory context.
1397 * @returns CR3 value.
1398 * @param pVM The VM handle.
1399 */
1400VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1401{
1402 switch (pVM->pgm.s.enmShadowMode)
1403 {
1404 case PGMMODE_32_BIT:
1405 return pVM->pgm.s.HCPhysInterPD;
1406
1407 case PGMMODE_PAE:
1408 case PGMMODE_PAE_NX:
1409 return pVM->pgm.s.HCPhysInterPaePDPT;
1410
1411 case PGMMODE_AMD64:
1412 case PGMMODE_AMD64_NX:
1413 return pVM->pgm.s.HCPhysInterPaePML4;
1414
1415 case PGMMODE_EPT:
1416 case PGMMODE_NESTED:
1417 return 0; /* not relevant */
1418
1419 default:
1420 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1421 return ~0;
1422 }
1423}
1424
1425
1426/**
1427 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1428 * @returns CR3 value.
1429 * @param pVM The VM handle.
1430 */
1431VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1432{
1433 return pVM->pgm.s.HCPhysInterPD;
1434}
1435
1436
1437/**
1438 * Gets the CR3 register value for the PAE intermediate memory context.
1439 * @returns CR3 value.
1440 * @param pVM The VM handle.
1441 */
1442VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1443{
1444 return pVM->pgm.s.HCPhysInterPaePDPT;
1445}
1446
1447
1448/**
1449 * Gets the CR3 register value for the AMD64 intermediate memory context.
1450 * @returns CR3 value.
1451 * @param pVM The VM handle.
1452 */
1453VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1454{
1455 return pVM->pgm.s.HCPhysInterPaePML4;
1456}
1457
1458
1459/**
1460 * Performs and schedules necessary updates following a CR3 load or reload.
1461 *
1462 * This will normally involve mapping the guest PD or nPDPT
1463 *
1464 * @returns VBox status code.
1465 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1466 * safely be ignored and overridden since the FF will be set too then.
1467 * @param pVM VM handle.
1468 * @param cr3 The new cr3.
1469 * @param fGlobal Indicates whether this is a global flush or not.
1470 */
1471VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1472{
1473 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1474
1475 /*
1476 * Always flag the necessary updates; necessary for hardware acceleration
1477 */
1478 /** @todo optimize this, it shouldn't always be necessary. */
1479 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1480 if (fGlobal)
1481 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1482 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1483
1484 /*
1485 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1486 */
1487 int rc = VINF_SUCCESS;
1488 RTGCPHYS GCPhysCR3;
1489 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1490 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1491 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1492 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1493 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1494 else
1495 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1496 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1497 {
1498 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1499 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1500 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1501 if (RT_LIKELY(rc == VINF_SUCCESS))
1502 {
1503 if (!pVM->pgm.s.fMappingsFixed)
1504 {
1505 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1506 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1507 }
1508 }
1509 else
1510 {
1511 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1512 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1513 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1514 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1515 if (!pVM->pgm.s.fMappingsFixed)
1516 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1517 }
1518
1519 if (fGlobal)
1520 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1521 else
1522 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1523 }
1524 else
1525 {
1526 /*
1527 * Check if we have a pending update of the CR3 monitoring.
1528 */
1529 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1530 {
1531 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1532 Assert(!pVM->pgm.s.fMappingsFixed);
1533 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1534 }
1535 if (fGlobal)
1536 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1537 else
1538 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1539 }
1540
1541 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1542 return rc;
1543}
1544
1545
1546/**
1547 * Performs and schedules necessary updates following a CR3 load or reload when
1548 * using nested or extended paging.
1549 *
1550 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1551 * TLB and triggering a SyncCR3.
1552 *
1553 * This will normally involve mapping the guest PD or nPDPT
1554 *
1555 * @returns VBox status code.
1556 * @retval VINF_SUCCESS.
1557 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1558 * requires a CR3 sync. This can safely be ignored and overridden since
1559 * the FF will be set too then.)
1560 * @param pVM VM handle.
1561 * @param cr3 The new cr3.
1562 */
1563VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1564{
1565 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1566
1567 /* We assume we're only called in nested paging mode. */
1568 Assert(pVM->pgm.s.fMappingsFixed);
1569 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1570 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1571
1572 /*
1573 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1574 */
1575 int rc = VINF_SUCCESS;
1576 RTGCPHYS GCPhysCR3;
1577 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1578 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1579 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1580 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1581 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1582 else
1583 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1584 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1585 {
1586 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1587 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1588 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */
1589 }
1590 return rc;
1591}
1592
1593
1594/**
1595 * Synchronize the paging structures.
1596 *
1597 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1598 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1599 * in several places, most importantly whenever the CR3 is loaded.
1600 *
1601 * @returns VBox status code.
1602 * @param pVM The virtual machine.
1603 * @param cr0 Guest context CR0 register
1604 * @param cr3 Guest context CR3 register
1605 * @param cr4 Guest context CR4 register
1606 * @param fGlobal Including global page directories or not
1607 */
1608VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1609{
1610 int rc;
1611
1612 /*
1613 * We might be called when we shouldn't.
1614 *
1615 * The mode switching will ensure that the PD is resynced
1616 * after every mode switch. So, if we find ourselves here
1617 * when in protected or real mode we can safely disable the
1618 * FF and return immediately.
1619 */
1620 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1621 {
1622 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1623 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1624 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1625 return VINF_SUCCESS;
1626 }
1627
1628 /* If global pages are not supported, then all flushes are global. */
1629 if (!(cr4 & X86_CR4_PGE))
1630 fGlobal = true;
1631 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1632 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1633
1634#ifdef PGMPOOL_WITH_MONITORING
1635 /*
1636 * The pool may have pending stuff and even require a return to ring-3 to
1637 * clear the whole thing.
1638 */
1639 rc = pgmPoolSyncCR3(pVM);
1640 if (rc != VINF_SUCCESS)
1641 return rc;
1642#endif
1643
1644 /*
1645 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1646 * This should be done before SyncCR3.
1647 */
1648 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1649 {
1650 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1651
1652 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1653 RTGCPHYS GCPhysCR3;
1654 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1655 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1656 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1657 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1658 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1659 else
1660 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1661 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1662 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1663#ifdef IN_RING3
1664 if (rc == VINF_PGM_SYNC_CR3)
1665 rc = pgmPoolSyncCR3(pVM);
1666#else
1667 if (rc == VINF_PGM_SYNC_CR3)
1668 {
1669 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1670 return rc;
1671 }
1672#endif
1673 AssertRCReturn(rc, rc);
1674 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1675 }
1676
1677 /*
1678 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1679 */
1680 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1681 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1682 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1683 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1684 if (rc == VINF_SUCCESS)
1685 {
1686 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1687 {
1688 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1689 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1690 }
1691
1692 /*
1693 * Check if we have a pending update of the CR3 monitoring.
1694 */
1695 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1696 {
1697 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1698 Assert(!pVM->pgm.s.fMappingsFixed);
1699 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1700 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1701 }
1702 }
1703
1704 /*
1705 * Now flush the CR3 (guest context).
1706 */
1707 if (rc == VINF_SUCCESS)
1708 PGM_INVL_GUEST_TLBS();
1709 return rc;
1710}
1711
1712
1713/**
1714 * Called whenever CR0 or CR4 in a way which may change
1715 * the paging mode.
1716 *
1717 * @returns VBox status code fit for scheduling in GC and R0.
1718 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1719 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1720 * @param pVM VM handle.
1721 * @param cr0 The new cr0.
1722 * @param cr4 The new cr4.
1723 * @param efer The new extended feature enable register.
1724 */
1725VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1726{
1727 PGMMODE enmGuestMode;
1728
1729 /*
1730 * Calc the new guest mode.
1731 */
1732 if (!(cr0 & X86_CR0_PE))
1733 enmGuestMode = PGMMODE_REAL;
1734 else if (!(cr0 & X86_CR0_PG))
1735 enmGuestMode = PGMMODE_PROTECTED;
1736 else if (!(cr4 & X86_CR4_PAE))
1737 enmGuestMode = PGMMODE_32_BIT;
1738 else if (!(efer & MSR_K6_EFER_LME))
1739 {
1740 if (!(efer & MSR_K6_EFER_NXE))
1741 enmGuestMode = PGMMODE_PAE;
1742 else
1743 enmGuestMode = PGMMODE_PAE_NX;
1744 }
1745 else
1746 {
1747 if (!(efer & MSR_K6_EFER_NXE))
1748 enmGuestMode = PGMMODE_AMD64;
1749 else
1750 enmGuestMode = PGMMODE_AMD64_NX;
1751 }
1752
1753 /*
1754 * Did it change?
1755 */
1756 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1757 return VINF_SUCCESS;
1758
1759 /* Flush the TLB */
1760 PGM_INVL_GUEST_TLBS();
1761
1762#ifdef IN_RING3
1763 return PGMR3ChangeMode(pVM, enmGuestMode);
1764#else
1765 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1766 return VINF_PGM_CHANGE_MODE;
1767#endif
1768}
1769
1770
1771/**
1772 * Gets the current guest paging mode.
1773 *
1774 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1775 *
1776 * @returns The current paging mode.
1777 * @param pVM The VM handle.
1778 */
1779VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1780{
1781 return pVM->pgm.s.enmGuestMode;
1782}
1783
1784
1785/**
1786 * Gets the current shadow paging mode.
1787 *
1788 * @returns The current paging mode.
1789 * @param pVM The VM handle.
1790 */
1791VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1792{
1793 return pVM->pgm.s.enmShadowMode;
1794}
1795
1796/**
1797 * Gets the current host paging mode.
1798 *
1799 * @returns The current paging mode.
1800 * @param pVM The VM handle.
1801 */
1802VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1803{
1804 switch (pVM->pgm.s.enmHostMode)
1805 {
1806 case SUPPAGINGMODE_32_BIT:
1807 case SUPPAGINGMODE_32_BIT_GLOBAL:
1808 return PGMMODE_32_BIT;
1809
1810 case SUPPAGINGMODE_PAE:
1811 case SUPPAGINGMODE_PAE_GLOBAL:
1812 return PGMMODE_PAE;
1813
1814 case SUPPAGINGMODE_PAE_NX:
1815 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1816 return PGMMODE_PAE_NX;
1817
1818 case SUPPAGINGMODE_AMD64:
1819 case SUPPAGINGMODE_AMD64_GLOBAL:
1820 return PGMMODE_AMD64;
1821
1822 case SUPPAGINGMODE_AMD64_NX:
1823 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1824 return PGMMODE_AMD64_NX;
1825
1826 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1827 }
1828
1829 return PGMMODE_INVALID;
1830}
1831
1832
1833/**
1834 * Get mode name.
1835 *
1836 * @returns read-only name string.
1837 * @param enmMode The mode which name is desired.
1838 */
1839VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1840{
1841 switch (enmMode)
1842 {
1843 case PGMMODE_REAL: return "Real";
1844 case PGMMODE_PROTECTED: return "Protected";
1845 case PGMMODE_32_BIT: return "32-bit";
1846 case PGMMODE_PAE: return "PAE";
1847 case PGMMODE_PAE_NX: return "PAE+NX";
1848 case PGMMODE_AMD64: return "AMD64";
1849 case PGMMODE_AMD64_NX: return "AMD64+NX";
1850 case PGMMODE_NESTED: return "Nested";
1851 case PGMMODE_EPT: return "EPT";
1852 default: return "unknown mode value";
1853 }
1854}
1855
1856
1857/**
1858 * Acquire the PGM lock.
1859 *
1860 * @returns VBox status code
1861 * @param pVM The VM to operate on.
1862 */
1863int pgmLock(PVM pVM)
1864{
1865 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1866#ifdef IN_RC
1867 if (rc == VERR_SEM_BUSY)
1868 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1869#elif defined(IN_RING0)
1870 if (rc == VERR_SEM_BUSY)
1871 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1872#endif
1873 AssertRC(rc);
1874 return rc;
1875}
1876
1877
1878/**
1879 * Release the PGM lock.
1880 *
1881 * @returns VBox status code
1882 * @param pVM The VM to operate on.
1883 */
1884void pgmUnlock(PVM pVM)
1885{
1886 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1887}
1888
1889#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1890
1891/**
1892 * Temporarily maps one guest page specified by GC physical address.
1893 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1894 *
1895 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1896 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1897 *
1898 * @returns VBox status.
1899 * @param pVM VM handle.
1900 * @param GCPhys GC Physical address of the page.
1901 * @param ppv Where to store the address of the mapping.
1902 */
1903VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1904{
1905 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
1906
1907 /*
1908 * Get the ram range.
1909 */
1910 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1911 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1912 pRam = pRam->CTX_SUFF(pNext);
1913 if (!pRam)
1914 {
1915 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1916 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1917 }
1918
1919 /*
1920 * Pass it on to PGMDynMapHCPage.
1921 */
1922 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1923 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
1924#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1925 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
1926#else
1927 PGMDynMapHCPage(pVM, HCPhys, ppv);
1928#endif
1929 return VINF_SUCCESS;
1930}
1931
1932
1933/**
1934 * Temporarily maps one guest page specified by unaligned GC physical address.
1935 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1936 *
1937 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1938 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1939 *
1940 * The caller is aware that only the speicifed page is mapped and that really bad things
1941 * will happen if writing beyond the page!
1942 *
1943 * @returns VBox status.
1944 * @param pVM VM handle.
1945 * @param GCPhys GC Physical address within the page to be mapped.
1946 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
1947 */
1948VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1949{
1950 /*
1951 * Get the ram range.
1952 */
1953 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1954 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1955 pRam = pRam->CTX_SUFF(pNext);
1956 if (!pRam)
1957 {
1958 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1959 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1960 }
1961
1962 /*
1963 * Pass it on to PGMDynMapHCPage.
1964 */
1965 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1966#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1967 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
1968#else
1969 PGMDynMapHCPage(pVM, HCPhys, ppv);
1970#endif
1971 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
1972 return VINF_SUCCESS;
1973}
1974
1975
1976# ifdef IN_RC
1977/**
1978 * Temporarily maps one host page specified by HC physical address.
1979 *
1980 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1981 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1982 *
1983 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
1984 * @param pVM VM handle.
1985 * @param HCPhys HC Physical address of the page.
1986 * @param ppv Where to store the address of the mapping. This is the
1987 * address of the PAGE not the exact address corresponding
1988 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
1989 * page offset.
1990 */
1991VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1992{
1993 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1994
1995 /*
1996 * Check the cache.
1997 */
1998 register unsigned iCache;
1999 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys
2000 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys
2001 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys
2002 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)
2003 {
2004 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2005 {
2006 { 0, 5, 6, 7 },
2007 { 0, 1, 6, 7 },
2008 { 0, 1, 2, 7 },
2009 { 0, 1, 2, 3 },
2010 { 4, 1, 2, 3 },
2011 { 4, 5, 2, 3 },
2012 { 4, 5, 6, 3 },
2013 { 4, 5, 6, 7 },
2014 };
2015 Assert(RT_ELEMENTS(au8Trans) == 8);
2016 Assert(RT_ELEMENTS(au8Trans[0]) == 4);
2017 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2018 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2019 *ppv = pv;
2020 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2021 //Log(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2022 return VINF_SUCCESS;
2023 }
2024 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);
2025 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2026
2027 /*
2028 * Update the page tables.
2029 */
2030 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2031 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2032 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);
2033
2034 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2035 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2036 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2037
2038 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2039 *ppv = pv;
2040 ASMInvalidatePage(pv);
2041 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2042 return VINF_SUCCESS;
2043}
2044# endif /* IN_RC */
2045
2046#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2047#ifdef VBOX_STRICT
2048
2049/**
2050 * Asserts that there are no mapping conflicts.
2051 *
2052 * @returns Number of conflicts.
2053 * @param pVM The VM Handle.
2054 */
2055VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2056{
2057 unsigned cErrors = 0;
2058
2059 /*
2060 * Check for mapping conflicts.
2061 */
2062 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2063 pMapping;
2064 pMapping = pMapping->CTX_SUFF(pNext))
2065 {
2066 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2067 for (RTGCPTR GCPtr = pMapping->GCPtr;
2068 GCPtr <= pMapping->GCPtrLast;
2069 GCPtr += PAGE_SIZE)
2070 {
2071 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2072 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2073 {
2074 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2075 cErrors++;
2076 break;
2077 }
2078 }
2079 }
2080
2081 return cErrors;
2082}
2083
2084
2085/**
2086 * Asserts that everything related to the guest CR3 is correctly shadowed.
2087 *
2088 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2089 * and assert the correctness of the guest CR3 mapping before asserting that the
2090 * shadow page tables is in sync with the guest page tables.
2091 *
2092 * @returns Number of conflicts.
2093 * @param pVM The VM Handle.
2094 * @param cr3 The current guest CR3 register value.
2095 * @param cr4 The current guest CR4 register value.
2096 */
2097VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2098{
2099 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2100 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2101 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2102 return cErrors;
2103 return 0;
2104}
2105
2106#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette