VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 17586

Last change on this file since 17586 was 17586, checked in by vboxsync, 16 years ago

Removed all dead non-VBOX_WITH_PGMPOOL_PAGING_ONLY code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 77.5 KB
Line 
1/* $Id: PGMAll.cpp 17586 2009-03-09 15:28:25Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
72DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
73DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
74
75/*
76 * Shadow - 32-bit mode
77 */
78#define PGM_SHW_TYPE PGM_TYPE_32BIT
79#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
80#include "PGMAllShw.h"
81
82/* Guest - real mode */
83#define PGM_GST_TYPE PGM_TYPE_REAL
84#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
85#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
86#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
87#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
88#include "PGMGstDefs.h"
89#include "PGMAllGst.h"
90#include "PGMAllBth.h"
91#undef BTH_PGMPOOLKIND_PT_FOR_PT
92#undef BTH_PGMPOOLKIND_ROOT
93#undef PGM_BTH_NAME
94#undef PGM_GST_TYPE
95#undef PGM_GST_NAME
96
97/* Guest - protected mode */
98#define PGM_GST_TYPE PGM_TYPE_PROT
99#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
100#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
101#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
102#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
103#include "PGMGstDefs.h"
104#include "PGMAllGst.h"
105#include "PGMAllBth.h"
106#undef BTH_PGMPOOLKIND_PT_FOR_PT
107#undef BTH_PGMPOOLKIND_ROOT
108#undef PGM_BTH_NAME
109#undef PGM_GST_TYPE
110#undef PGM_GST_NAME
111
112/* Guest - 32-bit mode */
113#define PGM_GST_TYPE PGM_TYPE_32BIT
114#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
115#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
116#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
117#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
118#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
119#include "PGMGstDefs.h"
120#include "PGMAllGst.h"
121#include "PGMAllBth.h"
122#undef BTH_PGMPOOLKIND_PT_FOR_BIG
123#undef BTH_PGMPOOLKIND_PT_FOR_PT
124#undef BTH_PGMPOOLKIND_ROOT
125#undef PGM_BTH_NAME
126#undef PGM_GST_TYPE
127#undef PGM_GST_NAME
128
129#undef PGM_SHW_TYPE
130#undef PGM_SHW_NAME
131
132
133/*
134 * Shadow - PAE mode
135 */
136#define PGM_SHW_TYPE PGM_TYPE_PAE
137#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
138#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
139#include "PGMAllShw.h"
140
141/* Guest - real mode */
142#define PGM_GST_TYPE PGM_TYPE_REAL
143#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
147#include "PGMGstDefs.h"
148#include "PGMAllBth.h"
149#undef BTH_PGMPOOLKIND_PT_FOR_PT
150#undef BTH_PGMPOOLKIND_ROOT
151#undef PGM_BTH_NAME
152#undef PGM_GST_TYPE
153#undef PGM_GST_NAME
154
155/* Guest - protected mode */
156#define PGM_GST_TYPE PGM_TYPE_PROT
157#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
158#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
159#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
160#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
161#include "PGMGstDefs.h"
162#include "PGMAllBth.h"
163#undef BTH_PGMPOOLKIND_PT_FOR_PT
164#undef BTH_PGMPOOLKIND_ROOT
165#undef PGM_BTH_NAME
166#undef PGM_GST_TYPE
167#undef PGM_GST_NAME
168
169/* Guest - 32-bit mode */
170#define PGM_GST_TYPE PGM_TYPE_32BIT
171#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
172#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
173#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
174#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
175#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
176#include "PGMGstDefs.h"
177#include "PGMAllBth.h"
178#undef BTH_PGMPOOLKIND_PT_FOR_BIG
179#undef BTH_PGMPOOLKIND_PT_FOR_PT
180#undef BTH_PGMPOOLKIND_ROOT
181#undef PGM_BTH_NAME
182#undef PGM_GST_TYPE
183#undef PGM_GST_NAME
184
185
186/* Guest - PAE mode */
187#define PGM_GST_TYPE PGM_TYPE_PAE
188#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
189#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
190#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
191#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
192#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
193#include "PGMGstDefs.h"
194#include "PGMAllGst.h"
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_BIG
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef BTH_PGMPOOLKIND_ROOT
199#undef PGM_BTH_NAME
200#undef PGM_GST_TYPE
201#undef PGM_GST_NAME
202
203#undef PGM_SHW_TYPE
204#undef PGM_SHW_NAME
205
206
207#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
208/*
209 * Shadow - AMD64 mode
210 */
211# define PGM_SHW_TYPE PGM_TYPE_AMD64
212# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
213# include "PGMAllShw.h"
214
215/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
216# define PGM_GST_TYPE PGM_TYPE_PROT
217# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
218# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
219# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
220# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
221# include "PGMGstDefs.h"
222# include "PGMAllBth.h"
223# undef BTH_PGMPOOLKIND_PT_FOR_PT
224# undef BTH_PGMPOOLKIND_ROOT
225# undef PGM_BTH_NAME
226# undef PGM_GST_TYPE
227# undef PGM_GST_NAME
228
229# ifdef VBOX_WITH_64_BITS_GUESTS
230/* Guest - AMD64 mode */
231# define PGM_GST_TYPE PGM_TYPE_AMD64
232# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
233# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
234# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
235# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
236# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
237# include "PGMGstDefs.h"
238# include "PGMAllGst.h"
239# include "PGMAllBth.h"
240# undef BTH_PGMPOOLKIND_PT_FOR_BIG
241# undef BTH_PGMPOOLKIND_PT_FOR_PT
242# undef BTH_PGMPOOLKIND_ROOT
243# undef PGM_BTH_NAME
244# undef PGM_GST_TYPE
245# undef PGM_GST_NAME
246# endif /* VBOX_WITH_64_BITS_GUESTS */
247
248# undef PGM_SHW_TYPE
249# undef PGM_SHW_NAME
250
251
252/*
253 * Shadow - Nested paging mode
254 */
255# define PGM_SHW_TYPE PGM_TYPE_NESTED
256# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
257# include "PGMAllShw.h"
258
259/* Guest - real mode */
260# define PGM_GST_TYPE PGM_TYPE_REAL
261# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
262# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
263# include "PGMGstDefs.h"
264# include "PGMAllBth.h"
265# undef PGM_BTH_NAME
266# undef PGM_GST_TYPE
267# undef PGM_GST_NAME
268
269/* Guest - protected mode */
270# define PGM_GST_TYPE PGM_TYPE_PROT
271# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
272# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
273# include "PGMGstDefs.h"
274# include "PGMAllBth.h"
275# undef PGM_BTH_NAME
276# undef PGM_GST_TYPE
277# undef PGM_GST_NAME
278
279/* Guest - 32-bit mode */
280# define PGM_GST_TYPE PGM_TYPE_32BIT
281# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
282# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
283# include "PGMGstDefs.h"
284# include "PGMAllBth.h"
285# undef PGM_BTH_NAME
286# undef PGM_GST_TYPE
287# undef PGM_GST_NAME
288
289/* Guest - PAE mode */
290# define PGM_GST_TYPE PGM_TYPE_PAE
291# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
292# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
293# include "PGMGstDefs.h"
294# include "PGMAllBth.h"
295# undef PGM_BTH_NAME
296# undef PGM_GST_TYPE
297# undef PGM_GST_NAME
298
299# ifdef VBOX_WITH_64_BITS_GUESTS
300/* Guest - AMD64 mode */
301# define PGM_GST_TYPE PGM_TYPE_AMD64
302# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
303# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
304# include "PGMGstDefs.h"
305# include "PGMAllBth.h"
306# undef PGM_BTH_NAME
307# undef PGM_GST_TYPE
308# undef PGM_GST_NAME
309# endif /* VBOX_WITH_64_BITS_GUESTS */
310
311# undef PGM_SHW_TYPE
312# undef PGM_SHW_NAME
313
314
315/*
316 * Shadow - EPT
317 */
318# define PGM_SHW_TYPE PGM_TYPE_EPT
319# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
320# include "PGMAllShw.h"
321
322/* Guest - real mode */
323# define PGM_GST_TYPE PGM_TYPE_REAL
324# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
325# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
326# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
327# include "PGMGstDefs.h"
328# include "PGMAllBth.h"
329# undef BTH_PGMPOOLKIND_PT_FOR_PT
330# undef PGM_BTH_NAME
331# undef PGM_GST_TYPE
332# undef PGM_GST_NAME
333
334/* Guest - protected mode */
335# define PGM_GST_TYPE PGM_TYPE_PROT
336# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
337# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
338# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
339# include "PGMGstDefs.h"
340# include "PGMAllBth.h"
341# undef BTH_PGMPOOLKIND_PT_FOR_PT
342# undef PGM_BTH_NAME
343# undef PGM_GST_TYPE
344# undef PGM_GST_NAME
345
346/* Guest - 32-bit mode */
347# define PGM_GST_TYPE PGM_TYPE_32BIT
348# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
349# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
350# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
351# include "PGMGstDefs.h"
352# include "PGMAllBth.h"
353# undef BTH_PGMPOOLKIND_PT_FOR_PT
354# undef PGM_BTH_NAME
355# undef PGM_GST_TYPE
356# undef PGM_GST_NAME
357
358/* Guest - PAE mode */
359# define PGM_GST_TYPE PGM_TYPE_PAE
360# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
361# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
362# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
363# include "PGMGstDefs.h"
364# include "PGMAllBth.h"
365# undef BTH_PGMPOOLKIND_PT_FOR_PT
366# undef PGM_BTH_NAME
367# undef PGM_GST_TYPE
368# undef PGM_GST_NAME
369
370# ifdef VBOX_WITH_64_BITS_GUESTS
371/* Guest - AMD64 mode */
372# define PGM_GST_TYPE PGM_TYPE_AMD64
373# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
374# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
375# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
376# include "PGMGstDefs.h"
377# include "PGMAllBth.h"
378# undef BTH_PGMPOOLKIND_PT_FOR_PT
379# undef PGM_BTH_NAME
380# undef PGM_GST_TYPE
381# undef PGM_GST_NAME
382# endif /* VBOX_WITH_64_BITS_GUESTS */
383
384# undef PGM_SHW_TYPE
385# undef PGM_SHW_NAME
386
387#endif /* !IN_RC */
388
389
390#ifndef IN_RING3
391/**
392 * #PF Handler.
393 *
394 * @returns VBox status code (appropriate for trap handling and GC return).
395 * @param pVM VM Handle.
396 * @param uErr The trap error code.
397 * @param pRegFrame Trap register frame.
398 * @param pvFault The fault address.
399 */
400VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
401{
402 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip));
403 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
404 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
405
406
407#ifdef VBOX_WITH_STATISTICS
408 /*
409 * Error code stats.
410 */
411 if (uErr & X86_TRAP_PF_US)
412 {
413 if (!(uErr & X86_TRAP_PF_P))
414 {
415 if (uErr & X86_TRAP_PF_RW)
416 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
417 else
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
419 }
420 else if (uErr & X86_TRAP_PF_RW)
421 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
422 else if (uErr & X86_TRAP_PF_RSVD)
423 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
424 else if (uErr & X86_TRAP_PF_ID)
425 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
426 else
427 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
428 }
429 else
430 { /* Supervisor */
431 if (!(uErr & X86_TRAP_PF_P))
432 {
433 if (uErr & X86_TRAP_PF_RW)
434 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
435 else
436 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
437 }
438 else if (uErr & X86_TRAP_PF_RW)
439 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
440 else if (uErr & X86_TRAP_PF_ID)
441 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
442 else if (uErr & X86_TRAP_PF_RSVD)
443 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
444 }
445#endif /* VBOX_WITH_STATISTICS */
446
447 /*
448 * Call the worker.
449 */
450 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
451 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
452 rc = VINF_SUCCESS;
453 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
454 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
455 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
456 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
457 return rc;
458}
459#endif /* !IN_RING3 */
460
461
462/**
463 * Prefetch a page
464 *
465 * Typically used to sync commonly used pages before entering raw mode
466 * after a CR3 reload.
467 *
468 * @returns VBox status code suitable for scheduling.
469 * @retval VINF_SUCCESS on success.
470 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
471 * @param pVM VM handle.
472 * @param GCPtrPage Page to invalidate.
473 */
474VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
475{
476 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
477 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, GCPtrPage);
478 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
479 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
480 return rc;
481}
482
483
484/**
485 * Gets the mapping corresponding to the specified address (if any).
486 *
487 * @returns Pointer to the mapping.
488 * @returns NULL if not
489 *
490 * @param pVM The virtual machine.
491 * @param GCPtr The guest context pointer.
492 */
493PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
494{
495 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
496 while (pMapping)
497 {
498 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
499 break;
500 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
501 return pMapping;
502 pMapping = pMapping->CTX_SUFF(pNext);
503 }
504 return NULL;
505}
506
507
508/**
509 * Verifies a range of pages for read or write access
510 *
511 * Only checks the guest's page tables
512 *
513 * @returns VBox status code.
514 * @param pVM VM handle.
515 * @param Addr Guest virtual address to check
516 * @param cbSize Access size
517 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
518 * @remarks Current not in use.
519 */
520VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
521{
522 /*
523 * Validate input.
524 */
525 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
526 {
527 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
528 return VERR_INVALID_PARAMETER;
529 }
530
531 uint64_t fPage;
532 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
533 if (RT_FAILURE(rc))
534 {
535 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
536 return VINF_EM_RAW_GUEST_TRAP;
537 }
538
539 /*
540 * Check if the access would cause a page fault
541 *
542 * Note that hypervisor page directories are not present in the guest's tables, so this check
543 * is sufficient.
544 */
545 bool fWrite = !!(fAccess & X86_PTE_RW);
546 bool fUser = !!(fAccess & X86_PTE_US);
547 if ( !(fPage & X86_PTE_P)
548 || (fWrite && !(fPage & X86_PTE_RW))
549 || (fUser && !(fPage & X86_PTE_US)) )
550 {
551 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
552 return VINF_EM_RAW_GUEST_TRAP;
553 }
554 if ( RT_SUCCESS(rc)
555 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
556 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
557 return rc;
558}
559
560
561/**
562 * Verifies a range of pages for read or write access
563 *
564 * Supports handling of pages marked for dirty bit tracking and CSAM
565 *
566 * @returns VBox status code.
567 * @param pVM VM handle.
568 * @param Addr Guest virtual address to check
569 * @param cbSize Access size
570 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
571 */
572VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
573{
574 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
575
576 /*
577 * Get going.
578 */
579 uint64_t fPageGst;
580 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
581 if (RT_FAILURE(rc))
582 {
583 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
584 return VINF_EM_RAW_GUEST_TRAP;
585 }
586
587 /*
588 * Check if the access would cause a page fault
589 *
590 * Note that hypervisor page directories are not present in the guest's tables, so this check
591 * is sufficient.
592 */
593 const bool fWrite = !!(fAccess & X86_PTE_RW);
594 const bool fUser = !!(fAccess & X86_PTE_US);
595 if ( !(fPageGst & X86_PTE_P)
596 || (fWrite && !(fPageGst & X86_PTE_RW))
597 || (fUser && !(fPageGst & X86_PTE_US)) )
598 {
599 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
600 return VINF_EM_RAW_GUEST_TRAP;
601 }
602
603 if (!HWACCMIsNestedPagingActive(pVM))
604 {
605 /*
606 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
607 */
608 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
609 if ( rc == VERR_PAGE_NOT_PRESENT
610 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
611 {
612 /*
613 * Page is not present in our page tables.
614 * Try to sync it!
615 */
616 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
617 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
618 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
619 if (rc != VINF_SUCCESS)
620 return rc;
621 }
622 else
623 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
624 }
625
626#if 0 /* def VBOX_STRICT; triggers too often now */
627 /*
628 * This check is a bit paranoid, but useful.
629 */
630 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
631 uint64_t fPageShw;
632 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
633 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
634 || (fWrite && !(fPageShw & X86_PTE_RW))
635 || (fUser && !(fPageShw & X86_PTE_US)) )
636 {
637 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
638 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
639 return VINF_EM_RAW_GUEST_TRAP;
640 }
641#endif
642
643 if ( RT_SUCCESS(rc)
644 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
645 || Addr + cbSize < Addr))
646 {
647 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
648 for (;;)
649 {
650 Addr += PAGE_SIZE;
651 if (cbSize > PAGE_SIZE)
652 cbSize -= PAGE_SIZE;
653 else
654 cbSize = 1;
655 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
656 if (rc != VINF_SUCCESS)
657 break;
658 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
659 break;
660 }
661 }
662 return rc;
663}
664
665
666/**
667 * Emulation of the invlpg instruction (HC only actually).
668 *
669 * @returns VBox status code, special care required.
670 * @retval VINF_PGM_SYNC_CR3 - handled.
671 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
672 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
673 *
674 * @param pVM VM handle.
675 * @param GCPtrPage Page to invalidate.
676 *
677 * @remark ASSUMES the page table entry or page directory is valid. Fairly
678 * safe, but there could be edge cases!
679 *
680 * @todo Flush page or page directory only if necessary!
681 */
682VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
683{
684 int rc;
685 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
686
687#ifndef IN_RING3
688 /*
689 * Notify the recompiler so it can record this instruction.
690 * Failure happens when it's out of space. We'll return to HC in that case.
691 */
692 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
693 if (rc != VINF_SUCCESS)
694 return rc;
695#endif /* !IN_RING3 */
696
697
698#ifdef IN_RC
699 /*
700 * Check for conflicts and pending CR3 monitoring updates.
701 */
702 if (!pVM->pgm.s.fMappingsFixed)
703 {
704 if ( pgmGetMapping(pVM, GCPtrPage)
705 && PGMGstGetPage(pVM, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
706 {
707 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
708 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
709 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
710 return VINF_PGM_SYNC_CR3;
711 }
712
713 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
714 {
715 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
716 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
717 return VINF_EM_RAW_EMULATE_INSTR;
718 }
719 }
720#endif /* IN_RC */
721
722 /*
723 * Call paging mode specific worker.
724 */
725 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
726 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
727 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
728
729#ifdef IN_RING3
730 /*
731 * Check if we have a pending update of the CR3 monitoring.
732 */
733 if ( RT_SUCCESS(rc)
734 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
735 {
736 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
737 Assert(!pVM->pgm.s.fMappingsFixed);
738 }
739
740 /*
741 * Inform CSAM about the flush
742 *
743 * Note: This is to check if monitored pages have been changed; when we implement
744 * callbacks for virtual handlers, this is no longer required.
745 */
746 CSAMR3FlushPage(pVM, GCPtrPage);
747#endif /* IN_RING3 */
748 return rc;
749}
750
751
752/**
753 * Executes an instruction using the interpreter.
754 *
755 * @returns VBox status code (appropriate for trap handling and GC return).
756 * @param pVM VM handle.
757 * @param pRegFrame Register frame.
758 * @param pvFault Fault address.
759 */
760VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
761{
762 uint32_t cb;
763 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
764 if (rc == VERR_EM_INTERPRETER)
765 rc = VINF_EM_RAW_EMULATE_INSTR;
766 if (rc != VINF_SUCCESS)
767 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
768 return rc;
769}
770
771
772/**
773 * Gets effective page information (from the VMM page directory).
774 *
775 * @returns VBox status.
776 * @param pVM VM Handle.
777 * @param GCPtr Guest Context virtual address of the page.
778 * @param pfFlags Where to store the flags. These are X86_PTE_*.
779 * @param pHCPhys Where to store the HC physical address of the page.
780 * This is page aligned.
781 * @remark You should use PGMMapGetPage() for pages in a mapping.
782 */
783VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
784{
785 return PGM_SHW_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pHCPhys);
786}
787
788
789/**
790 * Sets (replaces) the page flags for a range of pages in the shadow context.
791 *
792 * @returns VBox status.
793 * @param pVM VM handle.
794 * @param GCPtr The address of the first page.
795 * @param cb The size of the range in bytes.
796 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
797 * @remark You must use PGMMapSetPage() for pages in a mapping.
798 */
799VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
800{
801 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
802}
803
804
805/**
806 * Modify page flags for a range of pages in the shadow context.
807 *
808 * The existing flags are ANDed with the fMask and ORed with the fFlags.
809 *
810 * @returns VBox status code.
811 * @param pVM VM handle.
812 * @param GCPtr Virtual address of the first page in the range.
813 * @param cb Size (in bytes) of the range to apply the modification to.
814 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
815 * @param fMask The AND mask - page flags X86_PTE_*.
816 * Be very CAREFUL when ~'ing constants which could be 32-bit!
817 * @remark You must use PGMMapModifyPage() for pages in a mapping.
818 */
819VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
820{
821 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
822 Assert(cb);
823
824 /*
825 * Align the input.
826 */
827 cb += GCPtr & PAGE_OFFSET_MASK;
828 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
829 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
830
831 /*
832 * Call worker.
833 */
834 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
835}
836
837
838/**
839 * Gets the SHADOW page directory pointer for the specified address.
840 *
841 * @returns VBox status.
842 * @param pVM VM handle.
843 * @param GCPtr The address.
844 * @param ppPdpt Receives address of pdpt
845 * @param ppPD Receives address of page directory
846 * @remarks Unused.
847 */
848DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
849{
850 PPGM pPGM = &pVM->pgm.s;
851 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
852 PPGMPOOLPAGE pShwPage;
853
854 Assert(!HWACCMIsNestedPagingActive(pVM));
855
856 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
857 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
858 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
859
860 *ppPdpt = pPdpt;
861 if (!pPdpe->n.u1Present)
862 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
863
864 Assert(pPdpe->u & X86_PDPE_PG_MASK);
865 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
866 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
867
868 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
869 return VINF_SUCCESS;
870}
871
872/**
873 * Gets the shadow page directory for the specified address, PAE.
874 *
875 * @returns Pointer to the shadow PD.
876 * @param pVM VM handle.
877 * @param GCPtr The address.
878 * @param pGstPdpe Guest PDPT entry
879 * @param ppPD Receives address of page directory
880 */
881int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
882{
883 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
884 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
885 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
886 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
887 PPGMPOOLPAGE pShwPage;
888 int rc;
889
890 /* Allocate page directory if not present. */
891 if ( !pPdpe->n.u1Present
892 && !(pPdpe->u & X86_PDPE_PG_MASK))
893 {
894 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
895 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
896 RTGCPTR64 GCPdPt;
897 PGMPOOLKIND enmKind;
898
899# if defined(IN_RC)
900 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
901 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
902# endif
903
904 if (fNestedPaging || !fPaging)
905 {
906 /* AMD-V nested paging or real/protected mode without paging */
907 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
908 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
909 }
910 else
911 {
912 Assert(pGstPdpe);
913
914 if (CPUMGetGuestCR4(pVM) & X86_CR4_PAE)
915 {
916 if (!pGstPdpe->n.u1Present)
917 {
918 /* PD not present; guest must reload CR3 to change it.
919 * No need to monitor anything in this case.
920 */
921 Assert(!HWACCMIsEnabled(pVM));
922
923 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
924 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
925 pGstPdpe->n.u1Present = 1;
926 }
927 else
928 {
929 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
930 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
931 }
932 }
933 else
934 {
935 GCPdPt = CPUMGetGuestCR3(pVM);
936 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
937 }
938 }
939
940 /* Create a reference back to the PDPT by using the index in its shadow page. */
941 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
942 if (rc == VERR_PGM_POOL_FLUSHED)
943 {
944 Log(("pgmShwSyncPaePDPtr: PGM pool flushed -> signal sync cr3\n"));
945 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
946 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
947# if defined(IN_RC)
948 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
949# endif
950 return VINF_PGM_SYNC_CR3;
951 }
952 AssertRCReturn(rc, rc);
953
954 /* The PD was cached or created; hook it up now. */
955 pPdpe->u |= pShwPage->Core.Key
956 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
957
958# if defined(IN_RC)
959 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
960 * non-present PDPT will continue to cause page faults.
961 */
962 ASMReloadCR3();
963 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
964# endif
965 }
966 else
967 {
968 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
969 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
970
971 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
972 }
973 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
974 return VINF_SUCCESS;
975}
976
977
978/**
979 * Gets the pointer to the shadow page directory entry for an address, PAE.
980 *
981 * @returns Pointer to the PDE.
982 * @param pPGM Pointer to the PGM instance data.
983 * @param GCPtr The address.
984 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
985 */
986DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
987{
988 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
989 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
990 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
991 if (!pPdpt->a[iPdPt].n.u1Present)
992 {
993 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
994 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
995 }
996
997 /* Fetch the pgm pool shadow descriptor. */
998 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
999 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1000
1001 *ppShwPde = pShwPde;
1002 return VINF_SUCCESS;
1003}
1004
1005#ifndef IN_RC
1006
1007/**
1008 * Syncs the SHADOW page directory pointer for the specified address.
1009 *
1010 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1011 *
1012 * The caller is responsible for making sure the guest has a valid PD before
1013 * calling this function.
1014 *
1015 * @returns VBox status.
1016 * @param pVM VM handle.
1017 * @param GCPtr The address.
1018 * @param pGstPml4e Guest PML4 entry
1019 * @param pGstPdpe Guest PDPT entry
1020 * @param ppPD Receives address of page directory
1021 */
1022int pgmShwSyncLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1023{
1024 PPGM pPGM = &pVM->pgm.s;
1025 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1026 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1027 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1028 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
1029 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
1030 PPGMPOOLPAGE pShwPage;
1031 int rc;
1032
1033 /* Allocate page directory pointer table if not present. */
1034 if ( !pPml4e->n.u1Present
1035 && !(pPml4e->u & X86_PML4E_PG_MASK))
1036 {
1037 RTGCPTR64 GCPml4;
1038 PGMPOOLKIND enmKind;
1039
1040 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1041
1042 if (fNestedPaging || !fPaging)
1043 {
1044 /* AMD-V nested paging or real/protected mode without paging */
1045 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1046 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1047 }
1048 else
1049 {
1050 Assert(pGstPml4e && pGstPdpe);
1051
1052 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1053 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1054 }
1055
1056 /* Create a reference back to the PDPT by using the index in its shadow page. */
1057 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1058 if (rc == VERR_PGM_POOL_FLUSHED)
1059 {
1060 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1061 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1062 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1063 return VINF_PGM_SYNC_CR3;
1064 }
1065 AssertRCReturn(rc, rc);
1066 }
1067 else
1068 {
1069 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1070 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1071 }
1072 /* The PDPT was cached or created; hook it up now. */
1073 pPml4e->u |= pShwPage->Core.Key
1074 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1075
1076 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1077 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1078 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1079
1080 /* Allocate page directory if not present. */
1081 if ( !pPdpe->n.u1Present
1082 && !(pPdpe->u & X86_PDPE_PG_MASK))
1083 {
1084 RTGCPTR64 GCPdPt;
1085 PGMPOOLKIND enmKind;
1086
1087 if (fNestedPaging || !fPaging)
1088 {
1089 /* AMD-V nested paging or real/protected mode without paging */
1090 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1091 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1092 }
1093 else
1094 {
1095 Assert(pGstPdpe);
1096
1097 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1098 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1099 }
1100
1101 /* Create a reference back to the PDPT by using the index in its shadow page. */
1102 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1103 if (rc == VERR_PGM_POOL_FLUSHED)
1104 {
1105 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1106 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1107 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1108 return VINF_PGM_SYNC_CR3;
1109 }
1110 AssertRCReturn(rc, rc);
1111 }
1112 else
1113 {
1114 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1115 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1116 }
1117 /* The PD was cached or created; hook it up now. */
1118 pPdpe->u |= pShwPage->Core.Key
1119 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1120
1121 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1122 return VINF_SUCCESS;
1123}
1124
1125
1126/**
1127 * Gets the SHADOW page directory pointer for the specified address (long mode).
1128 *
1129 * @returns VBox status.
1130 * @param pVM VM handle.
1131 * @param GCPtr The address.
1132 * @param ppPdpt Receives address of pdpt
1133 * @param ppPD Receives address of page directory
1134 */
1135DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1136{
1137 PPGM pPGM = &pVM->pgm.s;
1138 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1139 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1140 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1141 if (ppPml4e)
1142 *ppPml4e = (PX86PML4E)pPml4e;
1143 if (!pPml4e->n.u1Present)
1144 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1145
1146 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1147 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1148 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1149
1150 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1151 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1152 if (!pPdpt->a[iPdPt].n.u1Present)
1153 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1154
1155 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1156 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1157
1158 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1159 return VINF_SUCCESS;
1160}
1161
1162
1163/**
1164 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1165 * backing pages in case the PDPT or PML4 entry is missing.
1166 *
1167 * @returns VBox status.
1168 * @param pVM VM handle.
1169 * @param GCPtr The address.
1170 * @param ppPdpt Receives address of pdpt
1171 * @param ppPD Receives address of page directory
1172 */
1173int pgmShwGetEPTPDPtr(PVM pVM, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1174{
1175 PPGM pPGM = &pVM->pgm.s;
1176 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1177 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1178 PEPTPML4 pPml4;
1179 PEPTPML4E pPml4e;
1180 PPGMPOOLPAGE pShwPage;
1181 int rc;
1182
1183 Assert(HWACCMIsNestedPagingActive(pVM));
1184
1185 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1186 Assert(pPml4);
1187
1188 /* Allocate page directory pointer table if not present. */
1189 pPml4e = &pPml4->a[iPml4];
1190 if ( !pPml4e->n.u1Present
1191 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1192 {
1193 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1194 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1195
1196 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1197 if (rc == VERR_PGM_POOL_FLUSHED)
1198 {
1199 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1200 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1201 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1202 return VINF_PGM_SYNC_CR3;
1203 }
1204 AssertRCReturn(rc, rc);
1205 }
1206 else
1207 {
1208 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1209 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1210 }
1211 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1212 pPml4e->u = pShwPage->Core.Key;
1213 pPml4e->n.u1Present = 1;
1214 pPml4e->n.u1Write = 1;
1215 pPml4e->n.u1Execute = 1;
1216
1217 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1218 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1219 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1220
1221 if (ppPdpt)
1222 *ppPdpt = pPdpt;
1223
1224 /* Allocate page directory if not present. */
1225 if ( !pPdpe->n.u1Present
1226 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1227 {
1228 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1229
1230 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1231 if (rc == VERR_PGM_POOL_FLUSHED)
1232 {
1233 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1234 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1235 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1236 return VINF_PGM_SYNC_CR3;
1237 }
1238 AssertRCReturn(rc, rc);
1239 }
1240 else
1241 {
1242 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1243 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1244 }
1245 /* The PD was cached or created; hook it up now and fill with the default value. */
1246 pPdpe->u = pShwPage->Core.Key;
1247 pPdpe->n.u1Present = 1;
1248 pPdpe->n.u1Write = 1;
1249 pPdpe->n.u1Execute = 1;
1250
1251 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1252 return VINF_SUCCESS;
1253}
1254
1255#endif /* IN_RC */
1256
1257/**
1258 * Gets effective Guest OS page information.
1259 *
1260 * When GCPtr is in a big page, the function will return as if it was a normal
1261 * 4KB page. If the need for distinguishing between big and normal page becomes
1262 * necessary at a later point, a PGMGstGetPage() will be created for that
1263 * purpose.
1264 *
1265 * @returns VBox status.
1266 * @param pVM VM Handle.
1267 * @param GCPtr Guest Context virtual address of the page.
1268 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1269 * @param pGCPhys Where to store the GC physical address of the page.
1270 * This is page aligned. The fact that the
1271 */
1272VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1273{
1274 return PGM_GST_PFN(GetPage,pVM)(pVM, GCPtr, pfFlags, pGCPhys);
1275}
1276
1277
1278/**
1279 * Checks if the page is present.
1280 *
1281 * @returns true if the page is present.
1282 * @returns false if the page is not present.
1283 * @param pVM The VM handle.
1284 * @param GCPtr Address within the page.
1285 */
1286VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1287{
1288 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1289 return RT_SUCCESS(rc);
1290}
1291
1292
1293/**
1294 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1295 *
1296 * @returns VBox status.
1297 * @param pVM VM handle.
1298 * @param GCPtr The address of the first page.
1299 * @param cb The size of the range in bytes.
1300 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1301 */
1302VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1303{
1304 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1305}
1306
1307
1308/**
1309 * Modify page flags for a range of pages in the guest's tables
1310 *
1311 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1312 *
1313 * @returns VBox status code.
1314 * @param pVM VM handle.
1315 * @param GCPtr Virtual address of the first page in the range.
1316 * @param cb Size (in bytes) of the range to apply the modification to.
1317 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1318 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1319 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1320 */
1321VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1322{
1323 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1324
1325 /*
1326 * Validate input.
1327 */
1328 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1329 Assert(cb);
1330
1331 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1332
1333 /*
1334 * Adjust input.
1335 */
1336 cb += GCPtr & PAGE_OFFSET_MASK;
1337 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1338 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1339
1340 /*
1341 * Call worker.
1342 */
1343 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, GCPtr, cb, fFlags, fMask);
1344
1345 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1346 return rc;
1347}
1348
1349
1350/**
1351 * Gets the specified page directory pointer table entry.
1352 *
1353 * @returns PDP entry
1354 * @param pPGM Pointer to the PGM instance data.
1355 * @param iPdpt PDPT index
1356 */
1357VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVM pVM, unsigned iPdpt)
1358{
1359 Assert(iPdpt <= 3);
1360 return pgmGstGetPaePDPTPtr(&pVM->pgm.s)->a[iPdpt & 3];
1361}
1362
1363
1364/**
1365 * Gets the current CR3 register value for the shadow memory context.
1366 * @returns CR3 value.
1367 * @param pVM The VM handle.
1368 */
1369VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1370{
1371 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1372 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1373}
1374
1375
1376/**
1377 * Gets the current CR3 register value for the nested memory context.
1378 * @returns CR3 value.
1379 * @param pVM The VM handle.
1380 */
1381VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1382{
1383 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1384 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1385}
1386
1387
1388/**
1389 * Gets the CR3 register value for the 32-Bit shadow memory context.
1390 * @returns CR3 value.
1391 * @param pVM The VM handle.
1392 */
1393VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1394{
1395 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1396 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1397}
1398
1399
1400/**
1401 * Gets the CR3 register value for the PAE shadow memory context.
1402 * @returns CR3 value.
1403 * @param pVM The VM handle.
1404 */
1405VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1406{
1407 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1408 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1409}
1410
1411
1412/**
1413 * Gets the CR3 register value for the AMD64 shadow memory context.
1414 * @returns CR3 value.
1415 * @param pVM The VM handle.
1416 */
1417VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1418{
1419 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
1420 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1421}
1422
1423
1424/**
1425 * Gets the current CR3 register value for the HC intermediate memory context.
1426 * @returns CR3 value.
1427 * @param pVM The VM handle.
1428 */
1429VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1430{
1431 switch (pVM->pgm.s.enmHostMode)
1432 {
1433 case SUPPAGINGMODE_32_BIT:
1434 case SUPPAGINGMODE_32_BIT_GLOBAL:
1435 return pVM->pgm.s.HCPhysInterPD;
1436
1437 case SUPPAGINGMODE_PAE:
1438 case SUPPAGINGMODE_PAE_GLOBAL:
1439 case SUPPAGINGMODE_PAE_NX:
1440 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1441 return pVM->pgm.s.HCPhysInterPaePDPT;
1442
1443 case SUPPAGINGMODE_AMD64:
1444 case SUPPAGINGMODE_AMD64_GLOBAL:
1445 case SUPPAGINGMODE_AMD64_NX:
1446 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1447 return pVM->pgm.s.HCPhysInterPaePDPT;
1448
1449 default:
1450 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1451 return ~0;
1452 }
1453}
1454
1455
1456/**
1457 * Gets the current CR3 register value for the RC intermediate memory context.
1458 * @returns CR3 value.
1459 * @param pVM The VM handle.
1460 */
1461VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM)
1462{
1463 switch (pVM->pgm.s.enmShadowMode)
1464 {
1465 case PGMMODE_32_BIT:
1466 return pVM->pgm.s.HCPhysInterPD;
1467
1468 case PGMMODE_PAE:
1469 case PGMMODE_PAE_NX:
1470 return pVM->pgm.s.HCPhysInterPaePDPT;
1471
1472 case PGMMODE_AMD64:
1473 case PGMMODE_AMD64_NX:
1474 return pVM->pgm.s.HCPhysInterPaePML4;
1475
1476 case PGMMODE_EPT:
1477 case PGMMODE_NESTED:
1478 return 0; /* not relevant */
1479
1480 default:
1481 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1482 return ~0;
1483 }
1484}
1485
1486
1487/**
1488 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1489 * @returns CR3 value.
1490 * @param pVM The VM handle.
1491 */
1492VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1493{
1494 return pVM->pgm.s.HCPhysInterPD;
1495}
1496
1497
1498/**
1499 * Gets the CR3 register value for the PAE intermediate memory context.
1500 * @returns CR3 value.
1501 * @param pVM The VM handle.
1502 */
1503VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1504{
1505 return pVM->pgm.s.HCPhysInterPaePDPT;
1506}
1507
1508
1509/**
1510 * Gets the CR3 register value for the AMD64 intermediate memory context.
1511 * @returns CR3 value.
1512 * @param pVM The VM handle.
1513 */
1514VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1515{
1516 return pVM->pgm.s.HCPhysInterPaePML4;
1517}
1518
1519
1520/**
1521 * Performs and schedules necessary updates following a CR3 load or reload.
1522 *
1523 * This will normally involve mapping the guest PD or nPDPT
1524 *
1525 * @returns VBox status code.
1526 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1527 * safely be ignored and overridden since the FF will be set too then.
1528 * @param pVM VM handle.
1529 * @param cr3 The new cr3.
1530 * @param fGlobal Indicates whether this is a global flush or not.
1531 */
1532VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1533{
1534 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1535
1536 /*
1537 * Always flag the necessary updates; necessary for hardware acceleration
1538 */
1539 /** @todo optimize this, it shouldn't always be necessary. */
1540 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1541 if (fGlobal)
1542 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1543 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1544
1545 /*
1546 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1547 */
1548 int rc = VINF_SUCCESS;
1549 RTGCPHYS GCPhysCR3;
1550 switch (pVM->pgm.s.enmGuestMode)
1551 {
1552 case PGMMODE_PAE:
1553 case PGMMODE_PAE_NX:
1554 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1555 break;
1556 case PGMMODE_AMD64:
1557 case PGMMODE_AMD64_NX:
1558 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1559 break;
1560 default:
1561 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1562 break;
1563 }
1564
1565 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1566 {
1567 RTGCPHYS GCPhysOldCR3 = pVM->pgm.s.GCPhysCR3;
1568 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1569 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1570 if (RT_LIKELY(rc == VINF_SUCCESS))
1571 {
1572 if (!pVM->pgm.s.fMappingsFixed)
1573 {
1574 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1575 }
1576 }
1577 else
1578 {
1579 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1580 Assert(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_PGM_SYNC_CR3));
1581 pVM->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1582 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1583 if (!pVM->pgm.s.fMappingsFixed)
1584 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1585 }
1586
1587 if (fGlobal)
1588 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1589 else
1590 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1591 }
1592 else
1593 {
1594 /*
1595 * Check if we have a pending update of the CR3 monitoring.
1596 */
1597 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1598 {
1599 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1600 Assert(!pVM->pgm.s.fMappingsFixed);
1601 }
1602 if (fGlobal)
1603 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1604 else
1605 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1606 }
1607
1608 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1609 return rc;
1610}
1611
1612
1613/**
1614 * Performs and schedules necessary updates following a CR3 load or reload when
1615 * using nested or extended paging.
1616 *
1617 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1618 * TLB and triggering a SyncCR3.
1619 *
1620 * This will normally involve mapping the guest PD or nPDPT
1621 *
1622 * @returns VBox status code.
1623 * @retval VINF_SUCCESS.
1624 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1625 * requires a CR3 sync. This can safely be ignored and overridden since
1626 * the FF will be set too then.)
1627 * @param pVM VM handle.
1628 * @param cr3 The new cr3.
1629 */
1630VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1631{
1632 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1633
1634 /* We assume we're only called in nested paging mode. */
1635 Assert(pVM->pgm.s.fMappingsFixed);
1636 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1637 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1638
1639 /*
1640 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1641 */
1642 int rc = VINF_SUCCESS;
1643 RTGCPHYS GCPhysCR3;
1644 switch (pVM->pgm.s.enmGuestMode)
1645 {
1646 case PGMMODE_PAE:
1647 case PGMMODE_PAE_NX:
1648 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1649 break;
1650 case PGMMODE_AMD64:
1651 case PGMMODE_AMD64_NX:
1652 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1653 break;
1654 default:
1655 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1656 break;
1657 }
1658 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1659 {
1660 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1661 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1662 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1663 }
1664 return rc;
1665}
1666
1667
1668/**
1669 * Synchronize the paging structures.
1670 *
1671 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1672 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1673 * in several places, most importantly whenever the CR3 is loaded.
1674 *
1675 * @returns VBox status code.
1676 * @param pVM The virtual machine.
1677 * @param cr0 Guest context CR0 register
1678 * @param cr3 Guest context CR3 register
1679 * @param cr4 Guest context CR4 register
1680 * @param fGlobal Including global page directories or not
1681 */
1682VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1683{
1684 int rc;
1685
1686 /*
1687 * We might be called when we shouldn't.
1688 *
1689 * The mode switching will ensure that the PD is resynced
1690 * after every mode switch. So, if we find ourselves here
1691 * when in protected or real mode we can safely disable the
1692 * FF and return immediately.
1693 */
1694 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1695 {
1696 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1697 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1698 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1699 return VINF_SUCCESS;
1700 }
1701
1702 /* If global pages are not supported, then all flushes are global. */
1703 if (!(cr4 & X86_CR4_PGE))
1704 fGlobal = true;
1705 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1706 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1707
1708#ifdef PGMPOOL_WITH_MONITORING
1709 /*
1710 * The pool may have pending stuff and even require a return to ring-3 to
1711 * clear the whole thing.
1712 */
1713 rc = pgmPoolSyncCR3(pVM);
1714 if (rc != VINF_SUCCESS)
1715 return rc;
1716#endif
1717
1718 /*
1719 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1720 * This should be done before SyncCR3.
1721 */
1722 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1723 {
1724 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1725
1726 RTGCPHYS GCPhysCR3Old = pVM->pgm.s.GCPhysCR3;
1727 RTGCPHYS GCPhysCR3;
1728 switch (pVM->pgm.s.enmGuestMode)
1729 {
1730 case PGMMODE_PAE:
1731 case PGMMODE_PAE_NX:
1732 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1733 break;
1734 case PGMMODE_AMD64:
1735 case PGMMODE_AMD64_NX:
1736 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1737 break;
1738 default:
1739 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1740 break;
1741 }
1742
1743 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1744 {
1745 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1746 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1747 }
1748#ifdef IN_RING3
1749 if (rc == VINF_PGM_SYNC_CR3)
1750 rc = pgmPoolSyncCR3(pVM);
1751#else
1752 if (rc == VINF_PGM_SYNC_CR3)
1753 {
1754 pVM->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1755 return rc;
1756 }
1757#endif
1758 AssertRCReturn(rc, rc);
1759 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1760 }
1761
1762 /*
1763 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1764 */
1765 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1766 rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1767 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1768 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1769 if (rc == VINF_SUCCESS)
1770 {
1771 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1772 {
1773 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1774 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1775 }
1776
1777 /*
1778 * Check if we have a pending update of the CR3 monitoring.
1779 */
1780 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1781 {
1782 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1783 Assert(!pVM->pgm.s.fMappingsFixed);
1784 }
1785 }
1786
1787 /*
1788 * Now flush the CR3 (guest context).
1789 */
1790 if (rc == VINF_SUCCESS)
1791 PGM_INVL_GUEST_TLBS();
1792 return rc;
1793}
1794
1795
1796/**
1797 * Called whenever CR0 or CR4 in a way which may change
1798 * the paging mode.
1799 *
1800 * @returns VBox status code fit for scheduling in GC and R0.
1801 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1802 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1803 * @param pVM VM handle.
1804 * @param cr0 The new cr0.
1805 * @param cr4 The new cr4.
1806 * @param efer The new extended feature enable register.
1807 */
1808VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1809{
1810 PGMMODE enmGuestMode;
1811
1812 /*
1813 * Calc the new guest mode.
1814 */
1815 if (!(cr0 & X86_CR0_PE))
1816 enmGuestMode = PGMMODE_REAL;
1817 else if (!(cr0 & X86_CR0_PG))
1818 enmGuestMode = PGMMODE_PROTECTED;
1819 else if (!(cr4 & X86_CR4_PAE))
1820 enmGuestMode = PGMMODE_32_BIT;
1821 else if (!(efer & MSR_K6_EFER_LME))
1822 {
1823 if (!(efer & MSR_K6_EFER_NXE))
1824 enmGuestMode = PGMMODE_PAE;
1825 else
1826 enmGuestMode = PGMMODE_PAE_NX;
1827 }
1828 else
1829 {
1830 if (!(efer & MSR_K6_EFER_NXE))
1831 enmGuestMode = PGMMODE_AMD64;
1832 else
1833 enmGuestMode = PGMMODE_AMD64_NX;
1834 }
1835
1836 /*
1837 * Did it change?
1838 */
1839 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1840 return VINF_SUCCESS;
1841
1842 /* Flush the TLB */
1843 PGM_INVL_GUEST_TLBS();
1844
1845#ifdef IN_RING3
1846 return PGMR3ChangeMode(pVM, enmGuestMode);
1847#else
1848 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1849 return VINF_PGM_CHANGE_MODE;
1850#endif
1851}
1852
1853
1854/**
1855 * Gets the current guest paging mode.
1856 *
1857 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1858 *
1859 * @returns The current paging mode.
1860 * @param pVM The VM handle.
1861 */
1862VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1863{
1864 return pVM->pgm.s.enmGuestMode;
1865}
1866
1867
1868/**
1869 * Gets the current shadow paging mode.
1870 *
1871 * @returns The current paging mode.
1872 * @param pVM The VM handle.
1873 */
1874VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1875{
1876 return pVM->pgm.s.enmShadowMode;
1877}
1878
1879/**
1880 * Gets the current host paging mode.
1881 *
1882 * @returns The current paging mode.
1883 * @param pVM The VM handle.
1884 */
1885VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1886{
1887 switch (pVM->pgm.s.enmHostMode)
1888 {
1889 case SUPPAGINGMODE_32_BIT:
1890 case SUPPAGINGMODE_32_BIT_GLOBAL:
1891 return PGMMODE_32_BIT;
1892
1893 case SUPPAGINGMODE_PAE:
1894 case SUPPAGINGMODE_PAE_GLOBAL:
1895 return PGMMODE_PAE;
1896
1897 case SUPPAGINGMODE_PAE_NX:
1898 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1899 return PGMMODE_PAE_NX;
1900
1901 case SUPPAGINGMODE_AMD64:
1902 case SUPPAGINGMODE_AMD64_GLOBAL:
1903 return PGMMODE_AMD64;
1904
1905 case SUPPAGINGMODE_AMD64_NX:
1906 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1907 return PGMMODE_AMD64_NX;
1908
1909 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1910 }
1911
1912 return PGMMODE_INVALID;
1913}
1914
1915
1916/**
1917 * Get mode name.
1918 *
1919 * @returns read-only name string.
1920 * @param enmMode The mode which name is desired.
1921 */
1922VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1923{
1924 switch (enmMode)
1925 {
1926 case PGMMODE_REAL: return "Real";
1927 case PGMMODE_PROTECTED: return "Protected";
1928 case PGMMODE_32_BIT: return "32-bit";
1929 case PGMMODE_PAE: return "PAE";
1930 case PGMMODE_PAE_NX: return "PAE+NX";
1931 case PGMMODE_AMD64: return "AMD64";
1932 case PGMMODE_AMD64_NX: return "AMD64+NX";
1933 case PGMMODE_NESTED: return "Nested";
1934 case PGMMODE_EPT: return "EPT";
1935 default: return "unknown mode value";
1936 }
1937}
1938
1939
1940/**
1941 * Acquire the PGM lock.
1942 *
1943 * @returns VBox status code
1944 * @param pVM The VM to operate on.
1945 */
1946int pgmLock(PVM pVM)
1947{
1948 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1949#ifdef IN_RC
1950 if (rc == VERR_SEM_BUSY)
1951 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1952#elif defined(IN_RING0)
1953 if (rc == VERR_SEM_BUSY)
1954 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1955#endif
1956 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
1957 return rc;
1958}
1959
1960
1961/**
1962 * Release the PGM lock.
1963 *
1964 * @returns VBox status code
1965 * @param pVM The VM to operate on.
1966 */
1967void pgmUnlock(PVM pVM)
1968{
1969 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1970}
1971
1972#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1973
1974/**
1975 * Temporarily maps one guest page specified by GC physical address.
1976 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1977 *
1978 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1979 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1980 *
1981 * @returns VBox status.
1982 * @param pVM VM handle.
1983 * @param GCPhys GC Physical address of the page.
1984 * @param ppv Where to store the address of the mapping.
1985 */
1986VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1987{
1988 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
1989
1990 /*
1991 * Get the ram range.
1992 */
1993 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1994 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1995 pRam = pRam->CTX_SUFF(pNext);
1996 if (!pRam)
1997 {
1998 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1999 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2000 }
2001
2002 /*
2003 * Pass it on to PGMDynMapHCPage.
2004 */
2005 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2006 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2007#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2008 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2009#else
2010 PGMDynMapHCPage(pVM, HCPhys, ppv);
2011#endif
2012 return VINF_SUCCESS;
2013}
2014
2015
2016/**
2017 * Temporarily maps one guest page specified by unaligned GC physical address.
2018 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2019 *
2020 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2021 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2022 *
2023 * The caller is aware that only the speicifed page is mapped and that really bad things
2024 * will happen if writing beyond the page!
2025 *
2026 * @returns VBox status.
2027 * @param pVM VM handle.
2028 * @param GCPhys GC Physical address within the page to be mapped.
2029 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2030 */
2031VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2032{
2033 /*
2034 * Get the ram range.
2035 */
2036 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2037 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2038 pRam = pRam->CTX_SUFF(pNext);
2039 if (!pRam)
2040 {
2041 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2042 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2043 }
2044
2045 /*
2046 * Pass it on to PGMDynMapHCPage.
2047 */
2048 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2049#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2050 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2051#else
2052 PGMDynMapHCPage(pVM, HCPhys, ppv);
2053#endif
2054 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2055 return VINF_SUCCESS;
2056}
2057
2058# ifdef IN_RC
2059
2060/**
2061 * Temporarily maps one host page specified by HC physical address.
2062 *
2063 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2064 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2065 *
2066 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2067 * @param pVM VM handle.
2068 * @param HCPhys HC Physical address of the page.
2069 * @param ppv Where to store the address of the mapping. This is the
2070 * address of the PAGE not the exact address corresponding
2071 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2072 * page offset.
2073 */
2074VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2075{
2076 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2077
2078 /*
2079 * Check the cache.
2080 */
2081 register unsigned iCache;
2082 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2083 {
2084 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2085 {
2086 { 0, 9, 10, 11, 12, 13, 14, 15},
2087 { 0, 1, 10, 11, 12, 13, 14, 15},
2088 { 0, 1, 2, 11, 12, 13, 14, 15},
2089 { 0, 1, 2, 3, 12, 13, 14, 15},
2090 { 0, 1, 2, 3, 4, 13, 14, 15},
2091 { 0, 1, 2, 3, 4, 5, 14, 15},
2092 { 0, 1, 2, 3, 4, 5, 6, 15},
2093 { 0, 1, 2, 3, 4, 5, 6, 7},
2094 { 8, 1, 2, 3, 4, 5, 6, 7},
2095 { 8, 9, 2, 3, 4, 5, 6, 7},
2096 { 8, 9, 10, 3, 4, 5, 6, 7},
2097 { 8, 9, 10, 11, 4, 5, 6, 7},
2098 { 8, 9, 10, 11, 12, 5, 6, 7},
2099 { 8, 9, 10, 11, 12, 13, 6, 7},
2100 { 8, 9, 10, 11, 12, 13, 14, 7},
2101 { 8, 9, 10, 11, 12, 13, 14, 15},
2102 };
2103 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2104 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2105
2106 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2107 {
2108 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2109
2110 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2111 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2112 {
2113 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2114 *ppv = pv;
2115 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2116 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2117 return VINF_SUCCESS;
2118 }
2119 }
2120 }
2121 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2122 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2123 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2124
2125 /*
2126 * Update the page tables.
2127 */
2128 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2129 unsigned i;
2130 for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
2131 {
2132 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2133 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2134 break;
2135 iPage++;
2136 }
2137 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2138
2139 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2140 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2141 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2142 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2143
2144 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2145 *ppv = pv;
2146 ASMInvalidatePage(pv);
2147 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2148 return VINF_SUCCESS;
2149}
2150
2151
2152/**
2153 * Temporarily lock a dynamic page to prevent it from being reused.
2154 *
2155 * @param pVM VM handle.
2156 * @param GCPage GC address of page
2157 */
2158VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2159{
2160 unsigned iPage;
2161
2162 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2163 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2164 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2165}
2166
2167
2168/**
2169 * Unlock a dynamic page
2170 *
2171 * @param pVM VM handle.
2172 * @param GCPage GC address of page
2173 */
2174VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2175{
2176 unsigned iPage;
2177
2178 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache));
2179
2180 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2181 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2182 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2183 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2184}
2185
2186
2187# ifdef VBOX_STRICT
2188/**
2189 * Check for lock leaks.
2190 *
2191 * @param pVM VM handle.
2192 */
2193VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2194{
2195 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2196 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2197}
2198# endif /* VBOX_STRICT */
2199
2200# endif /* IN_RC */
2201#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2202
2203#if !defined(IN_R0) || defined(LOG_ENABLED)
2204
2205/** Format handler for PGMPAGE.
2206 * @copydoc FNRTSTRFORMATTYPE */
2207static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2208 const char *pszType, void const *pvValue,
2209 int cchWidth, int cchPrecision, unsigned fFlags,
2210 void *pvUser)
2211{
2212 size_t cch;
2213 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2214 if (VALID_PTR(pPage))
2215 {
2216 char szTmp[64+80];
2217
2218 cch = 0;
2219
2220 /* The single char state stuff. */
2221 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2222 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2223
2224#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2225 if (IS_PART_INCLUDED(5))
2226 {
2227 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2228 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2229 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2230 }
2231
2232 /* The type. */
2233 if (IS_PART_INCLUDED(4))
2234 {
2235 szTmp[cch++] = ':';
2236 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2237 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2238 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2239 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2240 }
2241
2242 /* The numbers. */
2243 if (IS_PART_INCLUDED(3))
2244 {
2245 szTmp[cch++] = ':';
2246 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2247 }
2248
2249 if (IS_PART_INCLUDED(2))
2250 {
2251 szTmp[cch++] = ':';
2252 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2253 }
2254
2255 if (IS_PART_INCLUDED(6))
2256 {
2257 szTmp[cch++] = ':';
2258 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2259 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2260 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2261 }
2262#undef IS_PART_INCLUDED
2263
2264 cch = pfnOutput(pvArgOutput, szTmp, cch);
2265 }
2266 else
2267 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2268 return cch;
2269}
2270
2271
2272/** Format handler for PGMRAMRANGE.
2273 * @copydoc FNRTSTRFORMATTYPE */
2274static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2275 const char *pszType, void const *pvValue,
2276 int cchWidth, int cchPrecision, unsigned fFlags,
2277 void *pvUser)
2278{
2279 size_t cch;
2280 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2281 if (VALID_PTR(pRam))
2282 {
2283 char szTmp[80];
2284 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2285 cch = pfnOutput(pvArgOutput, szTmp, cch);
2286 }
2287 else
2288 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2289 return cch;
2290}
2291
2292/** Format type andlers to be registered/deregistered. */
2293static const struct
2294{
2295 char szType[24];
2296 PFNRTSTRFORMATTYPE pfnHandler;
2297} g_aPgmFormatTypes[] =
2298{
2299 { "pgmpage", pgmFormatTypeHandlerPage },
2300 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2301};
2302
2303#endif /* !IN_R0 || LOG_ENABLED */
2304
2305
2306/**
2307 * Registers the global string format types.
2308 *
2309 * This should be called at module load time or in some other manner that ensure
2310 * that it's called exactly one time.
2311 *
2312 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2313 */
2314VMMDECL(int) PGMRegisterStringFormatTypes(void)
2315{
2316#if !defined(IN_R0) || defined(LOG_ENABLED)
2317 int rc = VINF_SUCCESS;
2318 unsigned i;
2319 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2320 {
2321 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2322# ifdef IN_RING0
2323 if (rc == VERR_ALREADY_EXISTS)
2324 {
2325 /* in case of cleanup failure in ring-0 */
2326 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2327 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2328 }
2329# endif
2330 }
2331 if (RT_FAILURE(rc))
2332 while (i-- > 0)
2333 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2334
2335 return rc;
2336#else
2337 return VINF_SUCCESS;
2338#endif
2339}
2340
2341
2342/**
2343 * Deregisters the global string format types.
2344 *
2345 * This should be called at module unload time or in some other manner that
2346 * ensure that it's called exactly one time.
2347 */
2348VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2349{
2350#if !defined(IN_R0) || defined(LOG_ENABLED)
2351 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2352 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2353#endif
2354}
2355
2356#ifdef VBOX_STRICT
2357
2358/**
2359 * Asserts that there are no mapping conflicts.
2360 *
2361 * @returns Number of conflicts.
2362 * @param pVM The VM Handle.
2363 */
2364VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2365{
2366 unsigned cErrors = 0;
2367
2368 /*
2369 * Check for mapping conflicts.
2370 */
2371 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2372 pMapping;
2373 pMapping = pMapping->CTX_SUFF(pNext))
2374 {
2375 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2376 for (RTGCPTR GCPtr = pMapping->GCPtr;
2377 GCPtr <= pMapping->GCPtrLast;
2378 GCPtr += PAGE_SIZE)
2379 {
2380 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
2381 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2382 {
2383 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2384 cErrors++;
2385 break;
2386 }
2387 }
2388 }
2389
2390 return cErrors;
2391}
2392
2393
2394/**
2395 * Asserts that everything related to the guest CR3 is correctly shadowed.
2396 *
2397 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2398 * and assert the correctness of the guest CR3 mapping before asserting that the
2399 * shadow page tables is in sync with the guest page tables.
2400 *
2401 * @returns Number of conflicts.
2402 * @param pVM The VM Handle.
2403 * @param cr3 The current guest CR3 register value.
2404 * @param cr4 The current guest CR4 register value.
2405 */
2406VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
2407{
2408 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2409 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCPTR)0);
2410 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2411 return cErrors;
2412}
2413
2414#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette