VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 13099

Last change on this file since 13099 was 13099, checked in by vboxsync, 16 years ago

PGM: Count real guest page faults.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 63.4 KB
Line 
1/* $Id: PGMAll.cpp 13099 2008-10-08 17:22:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71
72/*
73 * Shadow - 32-bit mode
74 */
75#define PGM_SHW_TYPE PGM_TYPE_32BIT
76#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
77#include "PGMAllShw.h"
78
79/* Guest - real mode */
80#define PGM_GST_TYPE PGM_TYPE_REAL
81#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
82#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
83#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - 32-bit mode */
104#define PGM_GST_TYPE PGM_TYPE_32BIT
105#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
108#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_BIG
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117#undef PGM_SHW_TYPE
118#undef PGM_SHW_NAME
119
120
121/*
122 * Shadow - PAE mode
123 */
124#define PGM_SHW_TYPE PGM_TYPE_PAE
125#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
126#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
127#include "PGMAllShw.h"
128
129/* Guest - real mode */
130#define PGM_GST_TYPE PGM_TYPE_REAL
131#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
132#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
133#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
134#include "PGMAllBth.h"
135#undef BTH_PGMPOOLKIND_PT_FOR_PT
136#undef PGM_BTH_NAME
137#undef PGM_GST_TYPE
138#undef PGM_GST_NAME
139
140/* Guest - protected mode */
141#define PGM_GST_TYPE PGM_TYPE_PROT
142#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
144#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
145#include "PGMAllBth.h"
146#undef BTH_PGMPOOLKIND_PT_FOR_PT
147#undef PGM_BTH_NAME
148#undef PGM_GST_TYPE
149#undef PGM_GST_NAME
150
151/* Guest - 32-bit mode */
152#define PGM_GST_TYPE PGM_TYPE_32BIT
153#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
154#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
155#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
156#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
157#include "PGMAllBth.h"
158#undef BTH_PGMPOOLKIND_PT_FOR_BIG
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef PGM_BTH_NAME
161#undef PGM_GST_TYPE
162#undef PGM_GST_NAME
163
164
165/* Guest - PAE mode */
166#define PGM_GST_TYPE PGM_TYPE_PAE
167#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
168#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
169#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
170#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
171#include "PGMAllGst.h"
172#include "PGMAllBth.h"
173#undef BTH_PGMPOOLKIND_PT_FOR_BIG
174#undef BTH_PGMPOOLKIND_PT_FOR_PT
175#undef PGM_BTH_NAME
176#undef PGM_GST_TYPE
177#undef PGM_GST_NAME
178
179#undef PGM_SHW_TYPE
180#undef PGM_SHW_NAME
181
182
183#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
184/*
185 * Shadow - AMD64 mode
186 */
187#define PGM_SHW_TYPE PGM_TYPE_AMD64
188#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
189#include "PGMAllShw.h"
190
191/* Guest - protected mode */
192#define PGM_GST_TYPE PGM_TYPE_PROT
193#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef PGM_BTH_NAME
199#undef PGM_GST_TYPE
200#undef PGM_GST_NAME
201
202/* Guest - AMD64 mode */
203#define PGM_GST_TYPE PGM_TYPE_AMD64
204#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
205#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
206#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
207#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
208#include "PGMAllGst.h"
209#include "PGMAllBth.h"
210#undef BTH_PGMPOOLKIND_PT_FOR_BIG
211#undef BTH_PGMPOOLKIND_PT_FOR_PT
212#undef PGM_BTH_NAME
213#undef PGM_GST_TYPE
214#undef PGM_GST_NAME
215
216#undef PGM_SHW_TYPE
217#undef PGM_SHW_NAME
218
219/*
220 * Shadow - Nested paging mode
221 */
222#define PGM_SHW_TYPE PGM_TYPE_NESTED
223#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
224#include "PGMAllShw.h"
225
226/* Guest - real mode */
227#define PGM_GST_TYPE PGM_TYPE_REAL
228#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
229#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
230#include "PGMAllBth.h"
231#undef PGM_BTH_NAME
232#undef PGM_GST_TYPE
233#undef PGM_GST_NAME
234
235/* Guest - protected mode */
236#define PGM_GST_TYPE PGM_TYPE_PROT
237#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
238#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
239#include "PGMAllBth.h"
240#undef PGM_BTH_NAME
241#undef PGM_GST_TYPE
242#undef PGM_GST_NAME
243
244/* Guest - 32-bit mode */
245#define PGM_GST_TYPE PGM_TYPE_32BIT
246#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
247#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
248#include "PGMAllBth.h"
249#undef PGM_BTH_NAME
250#undef PGM_GST_TYPE
251#undef PGM_GST_NAME
252
253/* Guest - PAE mode */
254#define PGM_GST_TYPE PGM_TYPE_PAE
255#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
256#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
257#include "PGMAllBth.h"
258#undef PGM_BTH_NAME
259#undef PGM_GST_TYPE
260#undef PGM_GST_NAME
261
262/* Guest - AMD64 mode */
263#define PGM_GST_TYPE PGM_TYPE_AMD64
264#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
265#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
266#include "PGMAllBth.h"
267#undef PGM_BTH_NAME
268#undef PGM_GST_TYPE
269#undef PGM_GST_NAME
270
271#undef PGM_SHW_TYPE
272#undef PGM_SHW_NAME
273
274/*
275 * Shadow - EPT
276 */
277#define PGM_SHW_TYPE PGM_TYPE_EPT
278#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
279#include "PGMAllShw.h"
280
281/* Guest - real mode */
282#define PGM_GST_TYPE PGM_TYPE_REAL
283#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
284#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
285#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
286#include "PGMAllBth.h"
287#undef BTH_PGMPOOLKIND_PT_FOR_PT
288#undef PGM_BTH_NAME
289#undef PGM_GST_TYPE
290#undef PGM_GST_NAME
291
292/* Guest - protected mode */
293#define PGM_GST_TYPE PGM_TYPE_PROT
294#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
295#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
296#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
297#include "PGMAllBth.h"
298#undef BTH_PGMPOOLKIND_PT_FOR_PT
299#undef PGM_BTH_NAME
300#undef PGM_GST_TYPE
301#undef PGM_GST_NAME
302
303/* Guest - 32-bit mode */
304#define PGM_GST_TYPE PGM_TYPE_32BIT
305#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
307#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
308#include "PGMAllBth.h"
309#undef BTH_PGMPOOLKIND_PT_FOR_PT
310#undef PGM_BTH_NAME
311#undef PGM_GST_TYPE
312#undef PGM_GST_NAME
313
314/* Guest - PAE mode */
315#define PGM_GST_TYPE PGM_TYPE_PAE
316#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
317#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
318#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
319#include "PGMAllBth.h"
320#undef BTH_PGMPOOLKIND_PT_FOR_PT
321#undef PGM_BTH_NAME
322#undef PGM_GST_TYPE
323#undef PGM_GST_NAME
324
325/* Guest - AMD64 mode */
326#define PGM_GST_TYPE PGM_TYPE_AMD64
327#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
328#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
329#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330#include "PGMAllBth.h"
331#undef BTH_PGMPOOLKIND_PT_FOR_PT
332#undef PGM_BTH_NAME
333#undef PGM_GST_TYPE
334#undef PGM_GST_NAME
335
336#undef PGM_SHW_TYPE
337#undef PGM_SHW_NAME
338
339#endif /* !IN_GC */
340
341
342#ifndef IN_RING3
343/**
344 * #PF Handler.
345 *
346 * @returns VBox status code (appropriate for trap handling and GC return).
347 * @param pVM VM Handle.
348 * @param uErr The trap error code.
349 * @param pRegFrame Trap register frame.
350 * @param pvFault The fault address.
351 */
352VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
353{
354 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->rip));
355 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
356 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
357
358
359#ifdef VBOX_WITH_STATISTICS
360 /*
361 * Error code stats.
362 */
363 if (uErr & X86_TRAP_PF_US)
364 {
365 if (!(uErr & X86_TRAP_PF_P))
366 {
367 if (uErr & X86_TRAP_PF_RW)
368 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
369 else
370 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
371 }
372 else if (uErr & X86_TRAP_PF_RW)
373 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
374 else if (uErr & X86_TRAP_PF_RSVD)
375 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
376 else if (uErr & X86_TRAP_PF_ID)
377 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
378 else
379 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
380 }
381 else
382 { /* Supervisor */
383 if (!(uErr & X86_TRAP_PF_P))
384 {
385 if (uErr & X86_TRAP_PF_RW)
386 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
387 else
388 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
389 }
390 else if (uErr & X86_TRAP_PF_RW)
391 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
392 else if (uErr & X86_TRAP_PF_ID)
393 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
394 else if (uErr & X86_TRAP_PF_RSVD)
395 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
396 }
397#endif
398
399 /*
400 * Call the worker.
401 */
402 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
403 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
404 rc = VINF_SUCCESS;
405 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
406 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
407 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
408 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
409 return rc;
410}
411#endif /* !IN_RING3 */
412
413
414/**
415 * Prefetch a page
416 *
417 * Typically used to sync commonly used pages before entering raw mode
418 * after a CR3 reload.
419 *
420 * @returns VBox status code suitable for scheduling.
421 * @retval VINF_SUCCESS on success.
422 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
423 * @param pVM VM handle.
424 * @param GCPtrPage Page to invalidate.
425 */
426VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
427{
428 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
429 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
430 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
431 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
432 return rc;
433}
434
435
436/**
437 * Gets the mapping corresponding to the specified address (if any).
438 *
439 * @returns Pointer to the mapping.
440 * @returns NULL if not
441 *
442 * @param pVM The virtual machine.
443 * @param GCPtr The guest context pointer.
444 */
445PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
446{
447 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
448 while (pMapping)
449 {
450 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
451 break;
452 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
453 return pMapping;
454 pMapping = pMapping->CTX_SUFF(pNext);
455 }
456 return NULL;
457}
458
459
460/**
461 * Verifies a range of pages for read or write access
462 *
463 * Only checks the guest's page tables
464 *
465 * @returns VBox status code.
466 * @param pVM VM handle.
467 * @param Addr Guest virtual address to check
468 * @param cbSize Access size
469 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
470 */
471VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
472{
473 /*
474 * Validate input.
475 */
476 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
477 {
478 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
479 return VERR_INVALID_PARAMETER;
480 }
481
482 uint64_t fPage;
483 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
484 if (VBOX_FAILURE(rc))
485 {
486 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
487 return VINF_EM_RAW_GUEST_TRAP;
488 }
489
490 /*
491 * Check if the access would cause a page fault
492 *
493 * Note that hypervisor page directories are not present in the guest's tables, so this check
494 * is sufficient.
495 */
496 bool fWrite = !!(fAccess & X86_PTE_RW);
497 bool fUser = !!(fAccess & X86_PTE_US);
498 if ( !(fPage & X86_PTE_P)
499 || (fWrite && !(fPage & X86_PTE_RW))
500 || (fUser && !(fPage & X86_PTE_US)) )
501 {
502 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
503 return VINF_EM_RAW_GUEST_TRAP;
504 }
505 if ( VBOX_SUCCESS(rc)
506 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
507 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
508 return rc;
509}
510
511
512/**
513 * Verifies a range of pages for read or write access
514 *
515 * Supports handling of pages marked for dirty bit tracking and CSAM
516 *
517 * @returns VBox status code.
518 * @param pVM VM handle.
519 * @param Addr Guest virtual address to check
520 * @param cbSize Access size
521 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
522 */
523VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
524{
525 /*
526 * Validate input.
527 */
528 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
529 {
530 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
531 return VERR_INVALID_PARAMETER;
532 }
533
534 uint64_t fPageGst;
535 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
536 if (VBOX_FAILURE(rc))
537 {
538 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
539 return VINF_EM_RAW_GUEST_TRAP;
540 }
541
542 /*
543 * Check if the access would cause a page fault
544 *
545 * Note that hypervisor page directories are not present in the guest's tables, so this check
546 * is sufficient.
547 */
548 const bool fWrite = !!(fAccess & X86_PTE_RW);
549 const bool fUser = !!(fAccess & X86_PTE_US);
550 if ( !(fPageGst & X86_PTE_P)
551 || (fWrite && !(fPageGst & X86_PTE_RW))
552 || (fUser && !(fPageGst & X86_PTE_US)) )
553 {
554 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
555 return VINF_EM_RAW_GUEST_TRAP;
556 }
557
558 if (!HWACCMIsNestedPagingActive(pVM))
559 {
560 /*
561 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
562 */
563 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
564 if ( rc == VERR_PAGE_NOT_PRESENT
565 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
566 {
567 /*
568 * Page is not present in our page tables.
569 * Try to sync it!
570 */
571 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
572 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
573 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
574 if (rc != VINF_SUCCESS)
575 return rc;
576 }
577 else
578 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
579 }
580
581#if 0 /* def VBOX_STRICT; triggers too often now */
582 /*
583 * This check is a bit paranoid, but useful.
584 */
585 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
586 uint64_t fPageShw;
587 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
588 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
589 || (fWrite && !(fPageShw & X86_PTE_RW))
590 || (fUser && !(fPageShw & X86_PTE_US)) )
591 {
592 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
593 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
594 return VINF_EM_RAW_GUEST_TRAP;
595 }
596#endif
597
598 if ( VBOX_SUCCESS(rc)
599 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
600 || Addr + cbSize < Addr))
601 {
602 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
603 for (;;)
604 {
605 Addr += PAGE_SIZE;
606 if (cbSize > PAGE_SIZE)
607 cbSize -= PAGE_SIZE;
608 else
609 cbSize = 1;
610 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
611 if (rc != VINF_SUCCESS)
612 break;
613 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
614 break;
615 }
616 }
617 return rc;
618}
619
620
621#ifndef IN_GC
622/**
623 * Emulation of the invlpg instruction (HC only actually).
624 *
625 * @returns VBox status code.
626 * @param pVM VM handle.
627 * @param GCPtrPage Page to invalidate.
628 * @remark ASSUMES the page table entry or page directory is
629 * valid. Fairly safe, but there could be edge cases!
630 * @todo Flush page or page directory only if necessary!
631 */
632VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
633{
634 int rc;
635
636 Log3(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
637
638 /** @todo merge PGMGCInvalidatePage with this one */
639
640#ifndef IN_RING3
641 /*
642 * Notify the recompiler so it can record this instruction.
643 * Failure happens when it's out of space. We'll return to HC in that case.
644 */
645 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
646 if (VBOX_FAILURE(rc))
647 return rc;
648#endif
649
650 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
651 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
652 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
653
654#ifndef IN_RING0
655 /*
656 * Check if we have a pending update of the CR3 monitoring.
657 */
658 if ( VBOX_SUCCESS(rc)
659 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
660 {
661 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
662 Assert(!pVM->pgm.s.fMappingsFixed);
663 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
664 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
665 }
666#endif
667
668#ifdef IN_RING3
669 /*
670 * Inform CSAM about the flush
671 */
672 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
673 CSAMR3FlushPage(pVM, GCPtrPage);
674#endif
675 return rc;
676}
677#endif
678
679
680/**
681 * Executes an instruction using the interpreter.
682 *
683 * @returns VBox status code (appropriate for trap handling and GC return).
684 * @param pVM VM handle.
685 * @param pRegFrame Register frame.
686 * @param pvFault Fault address.
687 */
688VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
689{
690 uint32_t cb;
691 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
692 if (rc == VERR_EM_INTERPRETER)
693 rc = VINF_EM_RAW_EMULATE_INSTR;
694 if (rc != VINF_SUCCESS)
695 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
696 return rc;
697}
698
699
700/**
701 * Gets effective page information (from the VMM page directory).
702 *
703 * @returns VBox status.
704 * @param pVM VM Handle.
705 * @param GCPtr Guest Context virtual address of the page.
706 * @param pfFlags Where to store the flags. These are X86_PTE_*.
707 * @param pHCPhys Where to store the HC physical address of the page.
708 * This is page aligned.
709 * @remark You should use PGMMapGetPage() for pages in a mapping.
710 */
711VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
712{
713 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
714}
715
716
717/**
718 * Sets (replaces) the page flags for a range of pages in the shadow context.
719 *
720 * @returns VBox status.
721 * @param pVM VM handle.
722 * @param GCPtr The address of the first page.
723 * @param cb The size of the range in bytes.
724 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
725 * @remark You must use PGMMapSetPage() for pages in a mapping.
726 */
727VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
728{
729 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
730}
731
732
733/**
734 * Modify page flags for a range of pages in the shadow context.
735 *
736 * The existing flags are ANDed with the fMask and ORed with the fFlags.
737 *
738 * @returns VBox status code.
739 * @param pVM VM handle.
740 * @param GCPtr Virtual address of the first page in the range.
741 * @param cb Size (in bytes) of the range to apply the modification to.
742 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
743 * @param fMask The AND mask - page flags X86_PTE_*.
744 * Be very CAREFUL when ~'ing constants which could be 32-bit!
745 * @remark You must use PGMMapModifyPage() for pages in a mapping.
746 */
747VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
748{
749 /*
750 * Validate input.
751 */
752 if (fFlags & X86_PTE_PAE_PG_MASK)
753 {
754 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
755 return VERR_INVALID_PARAMETER;
756 }
757 if (!cb)
758 {
759 AssertFailed();
760 return VERR_INVALID_PARAMETER;
761 }
762
763 /*
764 * Align the input.
765 */
766 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
767 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
768 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
769
770 /*
771 * Call worker.
772 */
773 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
774}
775
776/**
777 * Syncs the SHADOW page directory pointer for the specified address. Allocates
778 * backing pages in case the PDPT entry is missing.
779 *
780 * @returns VBox status.
781 * @param pVM VM handle.
782 * @param GCPtr The address.
783 * @param pGstPdpe Guest PDPT entry
784 * @param ppPD Receives address of page directory
785 */
786VMMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
787{
788 PPGM pPGM = &pVM->pgm.s;
789 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
790 PPGMPOOLPAGE pShwPage;
791 int rc;
792
793 Assert(!HWACCMIsNestedPagingActive(pVM));
794
795 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
796 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
797 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
798
799 /* Allocate page directory if not present. */
800 if ( !pPdpe->n.u1Present
801 && !(pPdpe->u & X86_PDPE_PG_MASK))
802 {
803 PX86PDPE pPdptGst = &CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt];
804
805 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
806 /* Create a reference back to the PDPT by using the index in its shadow page. */
807 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
808 if (rc == VERR_PGM_POOL_FLUSHED)
809 {
810 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
811 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
812 return VINF_PGM_SYNC_CR3;
813 }
814 AssertRCReturn(rc, rc);
815 }
816 else
817 {
818 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
819 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
820 }
821 /* The PD was cached or created; hook it up now. */
822 pPdpe->u |= pShwPage->Core.Key
823 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
824
825 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
826 return VINF_SUCCESS;
827}
828
829/**
830 * Gets the SHADOW page directory pointer for the specified address.
831 *
832 * @returns VBox status.
833 * @param pVM VM handle.
834 * @param GCPtr The address.
835 * @param ppPdpt Receives address of pdpt
836 * @param ppPD Receives address of page directory
837 */
838VMMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
839{
840 PPGM pPGM = &pVM->pgm.s;
841 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
842 PPGMPOOLPAGE pShwPage;
843
844 Assert(!HWACCMIsNestedPagingActive(pVM));
845
846 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
847 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
848 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
849
850 *ppPdpt = pPdpt;
851 if (!pPdpe->n.u1Present)
852 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
853
854 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
855 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
856
857 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
858 return VINF_SUCCESS;
859}
860
861#ifndef IN_GC
862/**
863 * Syncs the SHADOW page directory pointer for the specified address. Allocates
864 * backing pages in case the PDPT or PML4 entry is missing.
865 *
866 * @returns VBox status.
867 * @param pVM VM handle.
868 * @param GCPtr The address.
869 * @param pGstPml4e Guest PML4 entry
870 * @param pGstPdpe Guest PDPT entry
871 * @param ppPD Receives address of page directory
872 */
873VMMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
874{
875 PPGM pPGM = &pVM->pgm.s;
876 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
877 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
878 PX86PML4E pPml4e;
879 PPGMPOOLPAGE pShwPage;
880 int rc;
881 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
882
883 Assert(pVM->pgm.s.pHCPaePML4);
884
885 /* Allocate page directory pointer table if not present. */
886 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
887 if ( !pPml4e->n.u1Present
888 && !(pPml4e->u & X86_PML4E_PG_MASK))
889 {
890 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
891
892 if (!fNestedPaging)
893 {
894 Assert(pVM->pgm.s.pHCShwAmd64CR3);
895 Assert(pPGM->pGstPaePML4HC);
896
897 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
898
899 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage);
900 }
901 else
902 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
903
904 if (rc == VERR_PGM_POOL_FLUSHED)
905 {
906 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
907 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
908 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
909 return VINF_PGM_SYNC_CR3;
910 }
911 AssertRCReturn(rc, rc);
912 }
913 else
914 {
915 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
916 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
917 }
918 /* The PDPT was cached or created; hook it up now. */
919 pPml4e->u |= pShwPage->Core.Key
920 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
921
922 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
923 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
924 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
925
926 /* Allocate page directory if not present. */
927 if ( !pPdpe->n.u1Present
928 && !(pPdpe->u & X86_PDPE_PG_MASK))
929 {
930 if (!fNestedPaging)
931 {
932 Assert(pPGM->pGstPaePML4HC);
933
934 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
935 PX86PDPT pPdptGst;
936 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
937 AssertRCReturn(rc, rc);
938
939 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
940 /* Create a reference back to the PDPT by using the index in its shadow page. */
941 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
942 }
943 else
944 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
945
946 if (rc == VERR_PGM_POOL_FLUSHED)
947 {
948 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
949 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
950 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
951 return VINF_PGM_SYNC_CR3;
952 }
953 AssertRCReturn(rc, rc);
954 }
955 else
956 {
957 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
958 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
959 }
960 /* The PD was cached or created; hook it up now. */
961 pPdpe->u |= pShwPage->Core.Key
962 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
963
964 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
965 return VINF_SUCCESS;
966}
967
968/**
969 * Gets the SHADOW page directory pointer for the specified address.
970 *
971 * @returns VBox status.
972 * @param pVM VM handle.
973 * @param GCPtr The address.
974 * @param ppPdpt Receives address of pdpt
975 * @param ppPD Receives address of page directory
976 */
977VMMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
978{
979 PPGM pPGM = &pVM->pgm.s;
980 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
981 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
982 PX86PML4E pPml4e;
983 PPGMPOOLPAGE pShwPage;
984
985 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR);
986
987 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
988 if (!pPml4e->n.u1Present)
989 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
990
991 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
992 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
993
994 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
995 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
996 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
997
998 *ppPdpt = pPdpt;
999 if (!pPdpe->n.u1Present)
1000 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1001
1002 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1003 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1004
1005 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1006 return VINF_SUCCESS;
1007}
1008
1009/**
1010 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1011 * backing pages in case the PDPT or PML4 entry is missing.
1012 *
1013 * @returns VBox status.
1014 * @param pVM VM handle.
1015 * @param GCPtr The address.
1016 * @param ppPdpt Receives address of pdpt
1017 * @param ppPD Receives address of page directory
1018 */
1019VMMDECL(int) PGMShwGetEPTPDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1020{
1021 PPGM pPGM = &pVM->pgm.s;
1022 const unsigned iPml4e = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1023 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1024 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot;
1025 PEPTPML4E pPml4e;
1026 PPGMPOOLPAGE pShwPage;
1027 int rc;
1028
1029 Assert(HWACCMIsNestedPagingActive(pVM));
1030 Assert(pPml4);
1031
1032 /* Allocate page directory pointer table if not present. */
1033 pPml4e = &pPml4->a[iPml4e];
1034 if ( !pPml4e->n.u1Present
1035 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1036 {
1037 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1038
1039 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PML4E_PG_MASK) + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
1040 if (rc == VERR_PGM_POOL_FLUSHED)
1041 {
1042 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1043 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1044 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1045 return VINF_PGM_SYNC_CR3;
1046 }
1047 AssertRCReturn(rc, rc);
1048 }
1049 else
1050 {
1051 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1052 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1053 }
1054 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1055 pPml4e->u = pShwPage->Core.Key;
1056 pPml4e->n.u1Present = 1;
1057 pPml4e->n.u1Write = 1;
1058 pPml4e->n.u1Execute = 1;
1059
1060 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1061 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1062 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1063
1064 if (ppPdpt)
1065 *ppPdpt = pPdpt;
1066
1067 /* Allocate page directory if not present. */
1068 if ( !pPdpe->n.u1Present
1069 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1070 {
1071 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PDPTE_PG_MASK) + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1072 if (rc == VERR_PGM_POOL_FLUSHED)
1073 {
1074 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1075 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1076 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1077 return VINF_PGM_SYNC_CR3;
1078 }
1079 AssertRCReturn(rc, rc);
1080 }
1081 else
1082 {
1083 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1084 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1085 }
1086 /* The PD was cached or created; hook it up now and fill with the default value. */
1087 pPdpe->u = pShwPage->Core.Key;
1088 pPdpe->n.u1Present = 1;
1089 pPdpe->n.u1Write = 1;
1090 pPdpe->n.u1Execute = 1;
1091
1092 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1093 return VINF_SUCCESS;
1094}
1095
1096#endif
1097
1098/**
1099 * Gets effective Guest OS page information.
1100 *
1101 * When GCPtr is in a big page, the function will return as if it was a normal
1102 * 4KB page. If the need for distinguishing between big and normal page becomes
1103 * necessary at a later point, a PGMGstGetPage() will be created for that
1104 * purpose.
1105 *
1106 * @returns VBox status.
1107 * @param pVM VM Handle.
1108 * @param GCPtr Guest Context virtual address of the page.
1109 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1110 * @param pGCPhys Where to store the GC physical address of the page.
1111 * This is page aligned. The fact that the
1112 */
1113VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1114{
1115 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
1116}
1117
1118
1119/**
1120 * Checks if the page is present.
1121 *
1122 * @returns true if the page is present.
1123 * @returns false if the page is not present.
1124 * @param pVM The VM handle.
1125 * @param GCPtr Address within the page.
1126 */
1127VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1128{
1129 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1130 return VBOX_SUCCESS(rc);
1131}
1132
1133
1134/**
1135 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1136 *
1137 * @returns VBox status.
1138 * @param pVM VM handle.
1139 * @param GCPtr The address of the first page.
1140 * @param cb The size of the range in bytes.
1141 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1142 */
1143VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1144{
1145 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1146}
1147
1148
1149/**
1150 * Modify page flags for a range of pages in the guest's tables
1151 *
1152 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1153 *
1154 * @returns VBox status code.
1155 * @param pVM VM handle.
1156 * @param GCPtr Virtual address of the first page in the range.
1157 * @param cb Size (in bytes) of the range to apply the modification to.
1158 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1159 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1160 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1161 */
1162VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1163{
1164 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1165
1166 /*
1167 * Validate input.
1168 */
1169 if (fFlags & X86_PTE_PAE_PG_MASK)
1170 {
1171 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
1172 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1173 return VERR_INVALID_PARAMETER;
1174 }
1175
1176 if (!cb)
1177 {
1178 AssertFailed();
1179 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1180 return VERR_INVALID_PARAMETER;
1181 }
1182
1183 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1184
1185 /*
1186 * Adjust input.
1187 */
1188 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1189 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1190 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
1191
1192 /*
1193 * Call worker.
1194 */
1195 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
1196
1197 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1198 return rc;
1199}
1200
1201
1202/**
1203 * Gets the current CR3 register value for the shadow memory context.
1204 * @returns CR3 value.
1205 * @param pVM The VM handle.
1206 */
1207VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1208{
1209 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1210 switch (enmShadowMode)
1211 {
1212 case PGMMODE_32_BIT:
1213 return pVM->pgm.s.HCPhys32BitPD;
1214
1215 case PGMMODE_PAE:
1216 case PGMMODE_PAE_NX:
1217 return pVM->pgm.s.HCPhysPaePDPT;
1218
1219 case PGMMODE_AMD64:
1220 case PGMMODE_AMD64_NX:
1221 return pVM->pgm.s.HCPhysPaePML4;
1222
1223 case PGMMODE_EPT:
1224 return pVM->pgm.s.HCPhysNestedRoot;
1225
1226 case PGMMODE_NESTED:
1227 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1228
1229 default:
1230 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1231 return ~0;
1232 }
1233}
1234
1235/**
1236 * Gets the current CR3 register value for the nested memory context.
1237 * @returns CR3 value.
1238 * @param pVM The VM handle.
1239 */
1240VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1241{
1242 switch (enmShadowMode)
1243 {
1244 case PGMMODE_32_BIT:
1245 return pVM->pgm.s.HCPhys32BitPD;
1246
1247 case PGMMODE_PAE:
1248 case PGMMODE_PAE_NX:
1249 return pVM->pgm.s.HCPhysPaePDPT;
1250
1251 case PGMMODE_AMD64:
1252 case PGMMODE_AMD64_NX:
1253 return pVM->pgm.s.HCPhysPaePML4;
1254
1255 default:
1256 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1257 return ~0;
1258 }
1259}
1260
1261
1262/**
1263 * Gets the CR3 register value for the 32-Bit shadow memory context.
1264 * @returns CR3 value.
1265 * @param pVM The VM handle.
1266 */
1267VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1268{
1269 return pVM->pgm.s.HCPhys32BitPD;
1270}
1271
1272
1273/**
1274 * Gets the CR3 register value for the PAE shadow memory context.
1275 * @returns CR3 value.
1276 * @param pVM The VM handle.
1277 */
1278VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1279{
1280 return pVM->pgm.s.HCPhysPaePDPT;
1281}
1282
1283
1284/**
1285 * Gets the CR3 register value for the AMD64 shadow memory context.
1286 * @returns CR3 value.
1287 * @param pVM The VM handle.
1288 */
1289VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1290{
1291 return pVM->pgm.s.HCPhysPaePML4;
1292}
1293
1294
1295/**
1296 * Gets the current CR3 register value for the HC intermediate memory context.
1297 * @returns CR3 value.
1298 * @param pVM The VM handle.
1299 */
1300VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1301{
1302 switch (pVM->pgm.s.enmHostMode)
1303 {
1304 case SUPPAGINGMODE_32_BIT:
1305 case SUPPAGINGMODE_32_BIT_GLOBAL:
1306 return pVM->pgm.s.HCPhysInterPD;
1307
1308 case SUPPAGINGMODE_PAE:
1309 case SUPPAGINGMODE_PAE_GLOBAL:
1310 case SUPPAGINGMODE_PAE_NX:
1311 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1312 return pVM->pgm.s.HCPhysInterPaePDPT;
1313
1314 case SUPPAGINGMODE_AMD64:
1315 case SUPPAGINGMODE_AMD64_GLOBAL:
1316 case SUPPAGINGMODE_AMD64_NX:
1317 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1318 return pVM->pgm.s.HCPhysInterPaePDPT;
1319
1320 default:
1321 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1322 return ~0;
1323 }
1324}
1325
1326
1327/**
1328 * Gets the current CR3 register value for the GC intermediate memory context.
1329 * @returns CR3 value.
1330 * @param pVM The VM handle.
1331 */
1332VMMDECL(RTHCPHYS) PGMGetInterGCCR3(PVM pVM)
1333{
1334 switch (pVM->pgm.s.enmShadowMode)
1335 {
1336 case PGMMODE_32_BIT:
1337 return pVM->pgm.s.HCPhysInterPD;
1338
1339 case PGMMODE_PAE:
1340 case PGMMODE_PAE_NX:
1341 return pVM->pgm.s.HCPhysInterPaePDPT;
1342
1343 case PGMMODE_AMD64:
1344 case PGMMODE_AMD64_NX:
1345 return pVM->pgm.s.HCPhysInterPaePML4;
1346
1347 case PGMMODE_EPT:
1348 case PGMMODE_NESTED:
1349 return 0; /* not relevant */
1350
1351 default:
1352 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1353 return ~0;
1354 }
1355}
1356
1357
1358/**
1359 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1360 * @returns CR3 value.
1361 * @param pVM The VM handle.
1362 */
1363VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1364{
1365 return pVM->pgm.s.HCPhysInterPD;
1366}
1367
1368
1369/**
1370 * Gets the CR3 register value for the PAE intermediate memory context.
1371 * @returns CR3 value.
1372 * @param pVM The VM handle.
1373 */
1374VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1375{
1376 return pVM->pgm.s.HCPhysInterPaePDPT;
1377}
1378
1379
1380/**
1381 * Gets the CR3 register value for the AMD64 intermediate memory context.
1382 * @returns CR3 value.
1383 * @param pVM The VM handle.
1384 */
1385VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1386{
1387 return pVM->pgm.s.HCPhysInterPaePML4;
1388}
1389
1390
1391/**
1392 * Performs and schedules necessary updates following a CR3 load or reload.
1393 *
1394 * This will normally involve mapping the guest PD or nPDPT
1395 *
1396 * @returns VBox status code.
1397 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1398 * safely be ignored and overridden since the FF will be set too then.
1399 * @param pVM VM handle.
1400 * @param cr3 The new cr3.
1401 * @param fGlobal Indicates whether this is a global flush or not.
1402 */
1403VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1404{
1405 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1406
1407 /*
1408 * Always flag the necessary updates; necessary for hardware acceleration
1409 */
1410 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1411 if (fGlobal)
1412 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1413 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1414
1415 /*
1416 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1417 */
1418 int rc = VINF_SUCCESS;
1419 RTGCPHYS GCPhysCR3;
1420 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1421 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1422 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1423 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1424 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1425 else
1426 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1427 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1428 {
1429 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1430 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1431 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1432 {
1433 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1434 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1435 }
1436 if (fGlobal)
1437 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1438 else
1439 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1440 }
1441 else
1442 {
1443 /*
1444 * Check if we have a pending update of the CR3 monitoring.
1445 */
1446 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1447 {
1448 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1449 Assert(!pVM->pgm.s.fMappingsFixed);
1450 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1451 }
1452 if (fGlobal)
1453 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1454 else
1455 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1456 }
1457
1458 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1459 return rc;
1460}
1461
1462/**
1463 * Performs and schedules necessary updates following a CR3 load or reload,
1464 * without actually flushing the TLB as with PGMFlushTLB.
1465 *
1466 * This will normally involve mapping the guest PD or nPDPT
1467 *
1468 * @returns VBox status code.
1469 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1470 * safely be ignored and overridden since the FF will be set too then.
1471 * @param pVM VM handle.
1472 * @param cr3 The new cr3.
1473 */
1474VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1475{
1476 LogFlow(("PGMUpdateCR3: cr3=%VX64 OldCr3=%VX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1477
1478 /* We assume we're only called in nested paging mode. */
1479 Assert(pVM->pgm.s.fMappingsFixed);
1480 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1481 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1482
1483 /*
1484 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1485 */
1486 int rc = VINF_SUCCESS;
1487 RTGCPHYS GCPhysCR3;
1488 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1489 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1490 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1491 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1492 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1493 else
1494 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1495 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1496 {
1497 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1498 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1499 }
1500 AssertRC(rc);
1501 return rc;
1502}
1503
1504/**
1505 * Synchronize the paging structures.
1506 *
1507 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1508 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1509 * in several places, most importantly whenever the CR3 is loaded.
1510 *
1511 * @returns VBox status code.
1512 * @param pVM The virtual machine.
1513 * @param cr0 Guest context CR0 register
1514 * @param cr3 Guest context CR3 register
1515 * @param cr4 Guest context CR4 register
1516 * @param fGlobal Including global page directories or not
1517 */
1518VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1519{
1520 /*
1521 * We might be called when we shouldn't.
1522 *
1523 * The mode switching will ensure that the PD is resynced
1524 * after every mode switch. So, if we find ourselves here
1525 * when in protected or real mode we can safely disable the
1526 * FF and return immediately.
1527 */
1528 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1529 {
1530 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1531 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1532 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1533 return VINF_SUCCESS;
1534 }
1535
1536 /* If global pages are not supported, then all flushes are global */
1537 if (!(cr4 & X86_CR4_PGE))
1538 fGlobal = true;
1539 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1540 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1541
1542 /*
1543 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1544 */
1545 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1546 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1547 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1548 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1549 if (rc == VINF_SUCCESS)
1550 {
1551 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1552 {
1553 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1554 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1555 }
1556
1557 /*
1558 * Check if we have a pending update of the CR3 monitoring.
1559 */
1560 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1561 {
1562 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1563 Assert(!pVM->pgm.s.fMappingsFixed);
1564 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1565 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1566 }
1567 }
1568
1569 /*
1570 * Now flush the CR3 (guest context).
1571 */
1572 if (rc == VINF_SUCCESS)
1573 PGM_INVL_GUEST_TLBS();
1574 return rc;
1575}
1576
1577
1578/**
1579 * Called whenever CR0 or CR4 in a way which may change
1580 * the paging mode.
1581 *
1582 * @returns VBox status code fit for scheduling in GC and R0.
1583 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1584 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1585 * @param pVM VM handle.
1586 * @param cr0 The new cr0.
1587 * @param cr4 The new cr4.
1588 * @param efer The new extended feature enable register.
1589 */
1590VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1591{
1592 PGMMODE enmGuestMode;
1593
1594 /*
1595 * Calc the new guest mode.
1596 */
1597 if (!(cr0 & X86_CR0_PE))
1598 enmGuestMode = PGMMODE_REAL;
1599 else if (!(cr0 & X86_CR0_PG))
1600 enmGuestMode = PGMMODE_PROTECTED;
1601 else if (!(cr4 & X86_CR4_PAE))
1602 enmGuestMode = PGMMODE_32_BIT;
1603 else if (!(efer & MSR_K6_EFER_LME))
1604 {
1605 if (!(efer & MSR_K6_EFER_NXE))
1606 enmGuestMode = PGMMODE_PAE;
1607 else
1608 enmGuestMode = PGMMODE_PAE_NX;
1609 }
1610 else
1611 {
1612 if (!(efer & MSR_K6_EFER_NXE))
1613 enmGuestMode = PGMMODE_AMD64;
1614 else
1615 enmGuestMode = PGMMODE_AMD64_NX;
1616 }
1617
1618 /*
1619 * Did it change?
1620 */
1621 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1622 return VINF_SUCCESS;
1623
1624 /* Flush the TLB */
1625 PGM_INVL_GUEST_TLBS();
1626
1627#ifdef IN_RING3
1628 return PGMR3ChangeMode(pVM, enmGuestMode);
1629#else
1630 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1631 return VINF_PGM_CHANGE_MODE;
1632#endif
1633}
1634
1635
1636/**
1637 * Gets the current guest paging mode.
1638 *
1639 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1640 *
1641 * @returns The current paging mode.
1642 * @param pVM The VM handle.
1643 */
1644VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1645{
1646 return pVM->pgm.s.enmGuestMode;
1647}
1648
1649
1650/**
1651 * Gets the current shadow paging mode.
1652 *
1653 * @returns The current paging mode.
1654 * @param pVM The VM handle.
1655 */
1656VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1657{
1658 return pVM->pgm.s.enmShadowMode;
1659}
1660
1661/**
1662 * Gets the current host paging mode.
1663 *
1664 * @returns The current paging mode.
1665 * @param pVM The VM handle.
1666 */
1667VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1668{
1669 switch (pVM->pgm.s.enmHostMode)
1670 {
1671 case SUPPAGINGMODE_32_BIT:
1672 case SUPPAGINGMODE_32_BIT_GLOBAL:
1673 return PGMMODE_32_BIT;
1674
1675 case SUPPAGINGMODE_PAE:
1676 case SUPPAGINGMODE_PAE_GLOBAL:
1677 return PGMMODE_PAE;
1678
1679 case SUPPAGINGMODE_PAE_NX:
1680 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1681 return PGMMODE_PAE_NX;
1682
1683 case SUPPAGINGMODE_AMD64:
1684 case SUPPAGINGMODE_AMD64_GLOBAL:
1685 return PGMMODE_AMD64;
1686
1687 case SUPPAGINGMODE_AMD64_NX:
1688 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1689 return PGMMODE_AMD64_NX;
1690
1691 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1692 }
1693
1694 return PGMMODE_INVALID;
1695}
1696
1697
1698/**
1699 * Get mode name.
1700 *
1701 * @returns read-only name string.
1702 * @param enmMode The mode which name is desired.
1703 */
1704VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1705{
1706 switch (enmMode)
1707 {
1708 case PGMMODE_REAL: return "Real";
1709 case PGMMODE_PROTECTED: return "Protected";
1710 case PGMMODE_32_BIT: return "32-bit";
1711 case PGMMODE_PAE: return "PAE";
1712 case PGMMODE_PAE_NX: return "PAE+NX";
1713 case PGMMODE_AMD64: return "AMD64";
1714 case PGMMODE_AMD64_NX: return "AMD64+NX";
1715 case PGMMODE_NESTED: return "Nested";
1716 case PGMMODE_EPT: return "EPT";
1717 default: return "unknown mode value";
1718 }
1719}
1720
1721
1722/**
1723 * Acquire the PGM lock.
1724 *
1725 * @returns VBox status code
1726 * @param pVM The VM to operate on.
1727 */
1728int pgmLock(PVM pVM)
1729{
1730 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1731#ifdef IN_GC
1732 if (rc == VERR_SEM_BUSY)
1733 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1734#elif defined(IN_RING0)
1735 if (rc == VERR_SEM_BUSY)
1736 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1737#endif
1738 AssertRC(rc);
1739 return rc;
1740}
1741
1742
1743/**
1744 * Release the PGM lock.
1745 *
1746 * @returns VBox status code
1747 * @param pVM The VM to operate on.
1748 */
1749void pgmUnlock(PVM pVM)
1750{
1751 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1752}
1753
1754#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1755
1756/**
1757 * Temporarily maps one guest page specified by GC physical address.
1758 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1759 *
1760 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1761 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1762 *
1763 * @returns VBox status.
1764 * @param pVM VM handle.
1765 * @param GCPhys GC Physical address of the page.
1766 * @param ppv Where to store the address of the mapping.
1767 */
1768VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1769{
1770 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
1771
1772 /*
1773 * Get the ram range.
1774 */
1775 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1776 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1777 pRam = pRam->CTX_SUFF(pNext);
1778 if (!pRam)
1779 {
1780 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1781 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1782 }
1783
1784 /*
1785 * Pass it on to PGMDynMapHCPage.
1786 */
1787 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1788 //Log(("PGMDynMapGCPage: GCPhys=%VGp HCPhys=%VHp\n", GCPhys, HCPhys));
1789 return PGMDynMapHCPage(pVM, HCPhys, ppv);
1790}
1791
1792
1793/**
1794 * Temporarily maps one guest page specified by unaligned GC physical address.
1795 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1796 *
1797 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1798 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1799 *
1800 * The caller is aware that only the speicifed page is mapped and that really bad things
1801 * will happen if writing beyond the page!
1802 *
1803 * @returns VBox status.
1804 * @param pVM VM handle.
1805 * @param GCPhys GC Physical address within the page to be mapped.
1806 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
1807 */
1808VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1809{
1810 /*
1811 * Get the ram range.
1812 */
1813 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1814 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1815 pRam = pRam->CTX_SUFF(pNext);
1816 if (!pRam)
1817 {
1818 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys));
1819 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1820 }
1821
1822 /*
1823 * Pass it on to PGMDynMapHCPage.
1824 */
1825 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1826 int rc = PGMDynMapHCPage(pVM, HCPhys, ppv);
1827 if (RT_SUCCESS(rc))
1828 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
1829 return rc;
1830}
1831
1832
1833/**
1834 * Temporarily maps one host page specified by HC physical address.
1835 *
1836 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1837 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1838 *
1839 * @returns VBox status.
1840 * @param pVM VM handle.
1841 * @param HCPhys HC Physical address of the page.
1842 * @param ppv Where to store the address of the mapping. This is the
1843 * address of the PAGE not the exact address corresponding
1844 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
1845 * page offset.
1846 */
1847VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1848{
1849 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1850# ifdef IN_GC
1851
1852 /*
1853 * Check the cache.
1854 */
1855 register unsigned iCache;
1856 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys
1857 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys
1858 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys
1859 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)
1860 {
1861 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
1862 {
1863 { 0, 5, 6, 7 },
1864 { 0, 1, 6, 7 },
1865 { 0, 1, 2, 7 },
1866 { 0, 1, 2, 3 },
1867 { 4, 1, 2, 3 },
1868 { 4, 5, 2, 3 },
1869 { 4, 5, 6, 3 },
1870 { 4, 5, 6, 7 },
1871 };
1872 Assert(RT_ELEMENTS(au8Trans) == 8);
1873 Assert(RT_ELEMENTS(au8Trans[0]) == 4);
1874 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
1875 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
1876 *ppv = pv;
1877 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
1878 //Log(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
1879 return VINF_SUCCESS;
1880 }
1881 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);
1882 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
1883
1884 /*
1885 * Update the page tables.
1886 */
1887 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
1888 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
1889 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);
1890
1891 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
1892 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
1893 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
1894
1895 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
1896 *ppv = pv;
1897 ASMInvalidatePage(pv);
1898 Log4(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d\n", HCPhys, pv, iPage));
1899 return VINF_SUCCESS;
1900
1901#else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1902 AssertFailed();
1903 return VERR_NOT_IMPLEMENTED;
1904#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1905}
1906
1907
1908/**
1909 * Temporarily maps one host page specified by HC physical address, returning
1910 * pointer within the page.
1911 *
1912 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1913 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1914 *
1915 * @returns VBox status.
1916 * @param pVM VM handle.
1917 * @param HCPhys HC Physical address of the page.
1918 * @param ppv Where to store the address corresponding to HCPhys.
1919 */
1920VMMDECL(int) PGMDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1921{
1922 int rc = PGMDynMapHCPage(pVM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1923 if (RT_SUCCESS(rc))
1924 *ppv = (void *)((uintptr_t)*ppv | (HCPhys & PAGE_OFFSET_MASK));
1925 return rc;
1926}
1927
1928#endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1929
1930#ifdef VBOX_STRICT
1931
1932/**
1933 * Asserts that there are no mapping conflicts.
1934 *
1935 * @returns Number of conflicts.
1936 * @param pVM The VM Handle.
1937 */
1938VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1939{
1940 unsigned cErrors = 0;
1941
1942 /*
1943 * Check for mapping conflicts.
1944 */
1945 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1946 pMapping;
1947 pMapping = pMapping->CTX_SUFF(pNext))
1948 {
1949 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1950 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1951 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1952 GCPtr += PAGE_SIZE)
1953 {
1954 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1955 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1956 {
1957 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
1958 cErrors++;
1959 break;
1960 }
1961 }
1962 }
1963
1964 return cErrors;
1965}
1966
1967
1968/**
1969 * Asserts that everything related to the guest CR3 is correctly shadowed.
1970 *
1971 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1972 * and assert the correctness of the guest CR3 mapping before asserting that the
1973 * shadow page tables is in sync with the guest page tables.
1974 *
1975 * @returns Number of conflicts.
1976 * @param pVM The VM Handle.
1977 * @param cr3 The current guest CR3 register value.
1978 * @param cr4 The current guest CR4 register value.
1979 */
1980VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1981{
1982 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1983 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1984 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1985 return cErrors;
1986 return 0;
1987}
1988
1989#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette