VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 13038

Last change on this file since 13038 was 13019, checked in by vboxsync, 16 years ago

#1865: PGM ...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 56.7 KB
Line 
1/* $Id: PGMAll.cpp 13019 2008-10-06 16:21:01Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71
72/*
73 * Shadow - 32-bit mode
74 */
75#define PGM_SHW_TYPE PGM_TYPE_32BIT
76#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
77#include "PGMAllShw.h"
78
79/* Guest - real mode */
80#define PGM_GST_TYPE PGM_TYPE_REAL
81#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
82#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
83#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - 32-bit mode */
104#define PGM_GST_TYPE PGM_TYPE_32BIT
105#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
108#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_BIG
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117#undef PGM_SHW_TYPE
118#undef PGM_SHW_NAME
119
120
121/*
122 * Shadow - PAE mode
123 */
124#define PGM_SHW_TYPE PGM_TYPE_PAE
125#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
126#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
127#include "PGMAllShw.h"
128
129/* Guest - real mode */
130#define PGM_GST_TYPE PGM_TYPE_REAL
131#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
132#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
133#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
134#include "PGMAllBth.h"
135#undef BTH_PGMPOOLKIND_PT_FOR_PT
136#undef PGM_BTH_NAME
137#undef PGM_GST_TYPE
138#undef PGM_GST_NAME
139
140/* Guest - protected mode */
141#define PGM_GST_TYPE PGM_TYPE_PROT
142#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
144#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
145#include "PGMAllBth.h"
146#undef BTH_PGMPOOLKIND_PT_FOR_PT
147#undef PGM_BTH_NAME
148#undef PGM_GST_TYPE
149#undef PGM_GST_NAME
150
151/* Guest - 32-bit mode */
152#define PGM_GST_TYPE PGM_TYPE_32BIT
153#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
154#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
155#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
156#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
157#include "PGMAllBth.h"
158#undef BTH_PGMPOOLKIND_PT_FOR_BIG
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef PGM_BTH_NAME
161#undef PGM_GST_TYPE
162#undef PGM_GST_NAME
163
164
165/* Guest - PAE mode */
166#define PGM_GST_TYPE PGM_TYPE_PAE
167#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
168#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
169#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
170#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
171#include "PGMAllGst.h"
172#include "PGMAllBth.h"
173#undef BTH_PGMPOOLKIND_PT_FOR_BIG
174#undef BTH_PGMPOOLKIND_PT_FOR_PT
175#undef PGM_BTH_NAME
176#undef PGM_GST_TYPE
177#undef PGM_GST_NAME
178
179#undef PGM_SHW_TYPE
180#undef PGM_SHW_NAME
181
182
183#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
184/*
185 * Shadow - AMD64 mode
186 */
187#define PGM_SHW_TYPE PGM_TYPE_AMD64
188#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
189#include "PGMAllShw.h"
190
191/* Guest - protected mode */
192#define PGM_GST_TYPE PGM_TYPE_PROT
193#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef PGM_BTH_NAME
199#undef PGM_GST_TYPE
200#undef PGM_GST_NAME
201
202/* Guest - AMD64 mode */
203#define PGM_GST_TYPE PGM_TYPE_AMD64
204#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
205#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
206#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
207#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
208#include "PGMAllGst.h"
209#include "PGMAllBth.h"
210#undef BTH_PGMPOOLKIND_PT_FOR_BIG
211#undef BTH_PGMPOOLKIND_PT_FOR_PT
212#undef PGM_BTH_NAME
213#undef PGM_GST_TYPE
214#undef PGM_GST_NAME
215
216#undef PGM_SHW_TYPE
217#undef PGM_SHW_NAME
218
219/*
220 * Shadow - Nested paging mode
221 */
222#define PGM_SHW_TYPE PGM_TYPE_NESTED
223#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
224#include "PGMAllShw.h"
225
226/* Guest - real mode */
227#define PGM_GST_TYPE PGM_TYPE_REAL
228#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
229#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
230#include "PGMAllBth.h"
231#undef PGM_BTH_NAME
232#undef PGM_GST_TYPE
233#undef PGM_GST_NAME
234
235/* Guest - protected mode */
236#define PGM_GST_TYPE PGM_TYPE_PROT
237#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
238#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
239#include "PGMAllBth.h"
240#undef PGM_BTH_NAME
241#undef PGM_GST_TYPE
242#undef PGM_GST_NAME
243
244/* Guest - 32-bit mode */
245#define PGM_GST_TYPE PGM_TYPE_32BIT
246#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
247#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
248#include "PGMAllBth.h"
249#undef PGM_BTH_NAME
250#undef PGM_GST_TYPE
251#undef PGM_GST_NAME
252
253/* Guest - PAE mode */
254#define PGM_GST_TYPE PGM_TYPE_PAE
255#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
256#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
257#include "PGMAllBth.h"
258#undef PGM_BTH_NAME
259#undef PGM_GST_TYPE
260#undef PGM_GST_NAME
261
262/* Guest - AMD64 mode */
263#define PGM_GST_TYPE PGM_TYPE_AMD64
264#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
265#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
266#include "PGMAllBth.h"
267#undef PGM_BTH_NAME
268#undef PGM_GST_TYPE
269#undef PGM_GST_NAME
270
271#undef PGM_SHW_TYPE
272#undef PGM_SHW_NAME
273
274/*
275 * Shadow - EPT
276 */
277#define PGM_SHW_TYPE PGM_TYPE_EPT
278#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
279#include "PGMAllShw.h"
280
281/* Guest - real mode */
282#define PGM_GST_TYPE PGM_TYPE_REAL
283#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
284#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
285#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
286#include "PGMAllBth.h"
287#undef BTH_PGMPOOLKIND_PT_FOR_PT
288#undef PGM_BTH_NAME
289#undef PGM_GST_TYPE
290#undef PGM_GST_NAME
291
292/* Guest - protected mode */
293#define PGM_GST_TYPE PGM_TYPE_PROT
294#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
295#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
296#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
297#include "PGMAllBth.h"
298#undef BTH_PGMPOOLKIND_PT_FOR_PT
299#undef PGM_BTH_NAME
300#undef PGM_GST_TYPE
301#undef PGM_GST_NAME
302
303/* Guest - 32-bit mode */
304#define PGM_GST_TYPE PGM_TYPE_32BIT
305#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
306#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
307#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
308#include "PGMAllBth.h"
309#undef BTH_PGMPOOLKIND_PT_FOR_PT
310#undef PGM_BTH_NAME
311#undef PGM_GST_TYPE
312#undef PGM_GST_NAME
313
314/* Guest - PAE mode */
315#define PGM_GST_TYPE PGM_TYPE_PAE
316#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
317#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
318#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
319#include "PGMAllBth.h"
320#undef BTH_PGMPOOLKIND_PT_FOR_PT
321#undef PGM_BTH_NAME
322#undef PGM_GST_TYPE
323#undef PGM_GST_NAME
324
325/* Guest - AMD64 mode */
326#define PGM_GST_TYPE PGM_TYPE_AMD64
327#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
328#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
329#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
330#include "PGMAllBth.h"
331#undef BTH_PGMPOOLKIND_PT_FOR_PT
332#undef PGM_BTH_NAME
333#undef PGM_GST_TYPE
334#undef PGM_GST_NAME
335
336#undef PGM_SHW_TYPE
337#undef PGM_SHW_NAME
338
339#endif
340
341/**
342 * #PF Handler.
343 *
344 * @returns VBox status code (appropriate for trap handling and GC return).
345 * @param pVM VM Handle.
346 * @param uErr The trap error code.
347 * @param pRegFrame Trap register frame.
348 * @param pvFault The fault address.
349 */
350VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
351{
352 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->rip));
353 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
354 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
355
356
357#ifdef VBOX_WITH_STATISTICS
358 /*
359 * Error code stats.
360 */
361 if (uErr & X86_TRAP_PF_US)
362 {
363 if (!(uErr & X86_TRAP_PF_P))
364 {
365 if (uErr & X86_TRAP_PF_RW)
366 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
367 else
368 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
369 }
370 else if (uErr & X86_TRAP_PF_RW)
371 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
372 else if (uErr & X86_TRAP_PF_RSVD)
373 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
374 else if (uErr & X86_TRAP_PF_ID)
375 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
376 else
377 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
378 }
379 else
380 { /* Supervisor */
381 if (!(uErr & X86_TRAP_PF_P))
382 {
383 if (uErr & X86_TRAP_PF_RW)
384 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
385 else
386 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
387 }
388 else if (uErr & X86_TRAP_PF_RW)
389 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
390 else if (uErr & X86_TRAP_PF_ID)
391 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
392 else if (uErr & X86_TRAP_PF_RSVD)
393 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
394 }
395#endif
396
397 /*
398 * Call the worker.
399 */
400 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
401 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
402 rc = VINF_SUCCESS;
403 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
404 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
405 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
406 return rc;
407}
408
409/**
410 * Prefetch a page
411 *
412 * Typically used to sync commonly used pages before entering raw mode
413 * after a CR3 reload.
414 *
415 * @returns VBox status code suitable for scheduling.
416 * @retval VINF_SUCCESS on success.
417 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
418 * @param pVM VM handle.
419 * @param GCPtrPage Page to invalidate.
420 */
421VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
422{
423 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
424 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
425 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
426 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
427 return rc;
428}
429
430
431/**
432 * Gets the mapping corresponding to the specified address (if any).
433 *
434 * @returns Pointer to the mapping.
435 * @returns NULL if not
436 *
437 * @param pVM The virtual machine.
438 * @param GCPtr The guest context pointer.
439 */
440PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
441{
442 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
443 while (pMapping)
444 {
445 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
446 break;
447 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
448 {
449 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
450 return pMapping;
451 }
452 pMapping = pMapping->CTX_SUFF(pNext);
453 }
454 return NULL;
455}
456
457
458/**
459 * Verifies a range of pages for read or write access
460 *
461 * Only checks the guest's page tables
462 *
463 * @returns VBox status code.
464 * @param pVM VM handle.
465 * @param Addr Guest virtual address to check
466 * @param cbSize Access size
467 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
468 */
469VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
470{
471 /*
472 * Validate input.
473 */
474 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
475 {
476 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
477 return VERR_INVALID_PARAMETER;
478 }
479
480 uint64_t fPage;
481 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
482 if (VBOX_FAILURE(rc))
483 {
484 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
485 return VINF_EM_RAW_GUEST_TRAP;
486 }
487
488 /*
489 * Check if the access would cause a page fault
490 *
491 * Note that hypervisor page directories are not present in the guest's tables, so this check
492 * is sufficient.
493 */
494 bool fWrite = !!(fAccess & X86_PTE_RW);
495 bool fUser = !!(fAccess & X86_PTE_US);
496 if ( !(fPage & X86_PTE_P)
497 || (fWrite && !(fPage & X86_PTE_RW))
498 || (fUser && !(fPage & X86_PTE_US)) )
499 {
500 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
501 return VINF_EM_RAW_GUEST_TRAP;
502 }
503 if ( VBOX_SUCCESS(rc)
504 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
505 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
506 return rc;
507}
508
509
510/**
511 * Verifies a range of pages for read or write access
512 *
513 * Supports handling of pages marked for dirty bit tracking and CSAM
514 *
515 * @returns VBox status code.
516 * @param pVM VM handle.
517 * @param Addr Guest virtual address to check
518 * @param cbSize Access size
519 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
520 */
521VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
522{
523 /*
524 * Validate input.
525 */
526 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
527 {
528 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
529 return VERR_INVALID_PARAMETER;
530 }
531
532 uint64_t fPageGst;
533 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
534 if (VBOX_FAILURE(rc))
535 {
536 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
537 return VINF_EM_RAW_GUEST_TRAP;
538 }
539
540 /*
541 * Check if the access would cause a page fault
542 *
543 * Note that hypervisor page directories are not present in the guest's tables, so this check
544 * is sufficient.
545 */
546 const bool fWrite = !!(fAccess & X86_PTE_RW);
547 const bool fUser = !!(fAccess & X86_PTE_US);
548 if ( !(fPageGst & X86_PTE_P)
549 || (fWrite && !(fPageGst & X86_PTE_RW))
550 || (fUser && !(fPageGst & X86_PTE_US)) )
551 {
552 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
553 return VINF_EM_RAW_GUEST_TRAP;
554 }
555
556 if (!HWACCMIsNestedPagingActive(pVM))
557 {
558 /*
559 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
560 */
561 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
562 if ( rc == VERR_PAGE_NOT_PRESENT
563 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
564 {
565 /*
566 * Page is not present in our page tables.
567 * Try to sync it!
568 */
569 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
570 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
571 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
572 if (rc != VINF_SUCCESS)
573 return rc;
574 }
575 else
576 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
577 }
578
579#if 0 /* def VBOX_STRICT; triggers too often now */
580 /*
581 * This check is a bit paranoid, but useful.
582 */
583 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
584 uint64_t fPageShw;
585 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
586 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
587 || (fWrite && !(fPageShw & X86_PTE_RW))
588 || (fUser && !(fPageShw & X86_PTE_US)) )
589 {
590 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
591 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
592 return VINF_EM_RAW_GUEST_TRAP;
593 }
594#endif
595
596 if ( VBOX_SUCCESS(rc)
597 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
598 || Addr + cbSize < Addr))
599 {
600 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
601 for (;;)
602 {
603 Addr += PAGE_SIZE;
604 if (cbSize > PAGE_SIZE)
605 cbSize -= PAGE_SIZE;
606 else
607 cbSize = 1;
608 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
609 if (rc != VINF_SUCCESS)
610 break;
611 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
612 break;
613 }
614 }
615 return rc;
616}
617
618
619#ifndef IN_GC
620/**
621 * Emulation of the invlpg instruction (HC only actually).
622 *
623 * @returns VBox status code.
624 * @param pVM VM handle.
625 * @param GCPtrPage Page to invalidate.
626 * @remark ASSUMES the page table entry or page directory is
627 * valid. Fairly safe, but there could be edge cases!
628 * @todo Flush page or page directory only if necessary!
629 */
630VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
631{
632 int rc;
633
634 Log3(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
635
636 /** @todo merge PGMGCInvalidatePage with this one */
637
638#ifndef IN_RING3
639 /*
640 * Notify the recompiler so it can record this instruction.
641 * Failure happens when it's out of space. We'll return to HC in that case.
642 */
643 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
644 if (VBOX_FAILURE(rc))
645 return rc;
646#endif
647
648 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
649 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
650 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
651
652#ifndef IN_RING0
653 /*
654 * Check if we have a pending update of the CR3 monitoring.
655 */
656 if ( VBOX_SUCCESS(rc)
657 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
658 {
659 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
660 Assert(!pVM->pgm.s.fMappingsFixed);
661 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
662 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
663 }
664#endif
665
666#ifdef IN_RING3
667 /*
668 * Inform CSAM about the flush
669 */
670 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
671 CSAMR3FlushPage(pVM, GCPtrPage);
672#endif
673 return rc;
674}
675#endif
676
677
678/**
679 * Executes an instruction using the interpreter.
680 *
681 * @returns VBox status code (appropriate for trap handling and GC return).
682 * @param pVM VM handle.
683 * @param pRegFrame Register frame.
684 * @param pvFault Fault address.
685 */
686VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
687{
688 uint32_t cb;
689 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
690 if (rc == VERR_EM_INTERPRETER)
691 rc = VINF_EM_RAW_EMULATE_INSTR;
692 if (rc != VINF_SUCCESS)
693 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
694 return rc;
695}
696
697
698/**
699 * Gets effective page information (from the VMM page directory).
700 *
701 * @returns VBox status.
702 * @param pVM VM Handle.
703 * @param GCPtr Guest Context virtual address of the page.
704 * @param pfFlags Where to store the flags. These are X86_PTE_*.
705 * @param pHCPhys Where to store the HC physical address of the page.
706 * This is page aligned.
707 * @remark You should use PGMMapGetPage() for pages in a mapping.
708 */
709VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
710{
711 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
712}
713
714
715/**
716 * Sets (replaces) the page flags for a range of pages in the shadow context.
717 *
718 * @returns VBox status.
719 * @param pVM VM handle.
720 * @param GCPtr The address of the first page.
721 * @param cb The size of the range in bytes.
722 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
723 * @remark You must use PGMMapSetPage() for pages in a mapping.
724 */
725VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
726{
727 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
728}
729
730
731/**
732 * Modify page flags for a range of pages in the shadow context.
733 *
734 * The existing flags are ANDed with the fMask and ORed with the fFlags.
735 *
736 * @returns VBox status code.
737 * @param pVM VM handle.
738 * @param GCPtr Virtual address of the first page in the range.
739 * @param cb Size (in bytes) of the range to apply the modification to.
740 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
741 * @param fMask The AND mask - page flags X86_PTE_*.
742 * Be very CAREFUL when ~'ing constants which could be 32-bit!
743 * @remark You must use PGMMapModifyPage() for pages in a mapping.
744 */
745VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
746{
747 /*
748 * Validate input.
749 */
750 if (fFlags & X86_PTE_PAE_PG_MASK)
751 {
752 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
753 return VERR_INVALID_PARAMETER;
754 }
755 if (!cb)
756 {
757 AssertFailed();
758 return VERR_INVALID_PARAMETER;
759 }
760
761 /*
762 * Align the input.
763 */
764 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
765 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
766 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
767
768 /*
769 * Call worker.
770 */
771 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
772}
773
774/**
775 * Syncs the SHADOW page directory pointer for the specified address. Allocates
776 * backing pages in case the PDPT entry is missing.
777 *
778 * @returns VBox status.
779 * @param pVM VM handle.
780 * @param GCPtr The address.
781 * @param pGstPdpe Guest PDPT entry
782 * @param ppPD Receives address of page directory
783 */
784VMMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
785{
786 PPGM pPGM = &pVM->pgm.s;
787 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
788 PPGMPOOLPAGE pShwPage;
789 int rc;
790
791 Assert(!HWACCMIsNestedPagingActive(pVM));
792
793 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
794 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
795 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
796
797 /* Allocate page directory if not present. */
798 if ( !pPdpe->n.u1Present
799 && !(pPdpe->u & X86_PDPE_PG_MASK))
800 {
801 PX86PDPE pPdptGst = &CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt];
802
803 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
804 /* Create a reference back to the PDPT by using the index in its shadow page. */
805 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
806 if (rc == VERR_PGM_POOL_FLUSHED)
807 {
808 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
809 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
810 return VINF_PGM_SYNC_CR3;
811 }
812 AssertRCReturn(rc, rc);
813 }
814 else
815 {
816 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
817 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
818 }
819 /* The PD was cached or created; hook it up now. */
820 pPdpe->u |= pShwPage->Core.Key
821 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
822
823 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
824 return VINF_SUCCESS;
825}
826
827/**
828 * Gets the SHADOW page directory pointer for the specified address.
829 *
830 * @returns VBox status.
831 * @param pVM VM handle.
832 * @param GCPtr The address.
833 * @param ppPdpt Receives address of pdpt
834 * @param ppPD Receives address of page directory
835 */
836VMMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
837{
838 PPGM pPGM = &pVM->pgm.s;
839 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
840 PPGMPOOLPAGE pShwPage;
841
842 Assert(!HWACCMIsNestedPagingActive(pVM));
843
844 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
845 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
846 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
847
848 *ppPdpt = pPdpt;
849 if (!pPdpe->n.u1Present)
850 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
851
852 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
853 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
854
855 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
856 return VINF_SUCCESS;
857}
858
859#ifndef IN_GC
860/**
861 * Syncs the SHADOW page directory pointer for the specified address. Allocates
862 * backing pages in case the PDPT or PML4 entry is missing.
863 *
864 * @returns VBox status.
865 * @param pVM VM handle.
866 * @param GCPtr The address.
867 * @param pGstPml4e Guest PML4 entry
868 * @param pGstPdpe Guest PDPT entry
869 * @param ppPD Receives address of page directory
870 */
871VMMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
872{
873 PPGM pPGM = &pVM->pgm.s;
874 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
875 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
876 PX86PML4E pPml4e;
877 PPGMPOOLPAGE pShwPage;
878 int rc;
879 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
880
881 Assert(pVM->pgm.s.pHCPaePML4);
882
883 /* Allocate page directory pointer table if not present. */
884 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
885 if ( !pPml4e->n.u1Present
886 && !(pPml4e->u & X86_PML4E_PG_MASK))
887 {
888 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
889
890 if (!fNestedPaging)
891 {
892 Assert(pVM->pgm.s.pHCShwAmd64CR3);
893 Assert(pPGM->pGstPaePML4HC);
894
895 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
896
897 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage);
898 }
899 else
900 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
901
902 if (rc == VERR_PGM_POOL_FLUSHED)
903 {
904 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
905 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
906 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
907 return VINF_PGM_SYNC_CR3;
908 }
909 AssertRCReturn(rc, rc);
910 }
911 else
912 {
913 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
914 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
915 }
916 /* The PDPT was cached or created; hook it up now. */
917 pPml4e->u |= pShwPage->Core.Key
918 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
919
920 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
921 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
922 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
923
924 /* Allocate page directory if not present. */
925 if ( !pPdpe->n.u1Present
926 && !(pPdpe->u & X86_PDPE_PG_MASK))
927 {
928 if (!fNestedPaging)
929 {
930 Assert(pPGM->pGstPaePML4HC);
931
932 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
933 PX86PDPT pPdptGst;
934 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
935 AssertRCReturn(rc, rc);
936
937 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
938 /* Create a reference back to the PDPT by using the index in its shadow page. */
939 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
940 }
941 else
942 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
943
944 if (rc == VERR_PGM_POOL_FLUSHED)
945 {
946 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
947 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
948 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
949 return VINF_PGM_SYNC_CR3;
950 }
951 AssertRCReturn(rc, rc);
952 }
953 else
954 {
955 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
956 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
957 }
958 /* The PD was cached or created; hook it up now. */
959 pPdpe->u |= pShwPage->Core.Key
960 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
961
962 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
963 return VINF_SUCCESS;
964}
965
966/**
967 * Gets the SHADOW page directory pointer for the specified address.
968 *
969 * @returns VBox status.
970 * @param pVM VM handle.
971 * @param GCPtr The address.
972 * @param ppPdpt Receives address of pdpt
973 * @param ppPD Receives address of page directory
974 */
975VMMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
976{
977 PPGM pPGM = &pVM->pgm.s;
978 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
979 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
980 PX86PML4E pPml4e;
981 PPGMPOOLPAGE pShwPage;
982
983 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR);
984
985 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
986 if (!pPml4e->n.u1Present)
987 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
988
989 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
990 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
991
992 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
993 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
994 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
995
996 *ppPdpt = pPdpt;
997 if (!pPdpe->n.u1Present)
998 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
999
1000 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1001 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1002
1003 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1004 return VINF_SUCCESS;
1005}
1006
1007/**
1008 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1009 * backing pages in case the PDPT or PML4 entry is missing.
1010 *
1011 * @returns VBox status.
1012 * @param pVM VM handle.
1013 * @param GCPtr The address.
1014 * @param ppPdpt Receives address of pdpt
1015 * @param ppPD Receives address of page directory
1016 */
1017VMMDECL(int) PGMShwGetEPTPDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1018{
1019 PPGM pPGM = &pVM->pgm.s;
1020 const unsigned iPml4e = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1021 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
1022 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot;
1023 PEPTPML4E pPml4e;
1024 PPGMPOOLPAGE pShwPage;
1025 int rc;
1026
1027 Assert(HWACCMIsNestedPagingActive(pVM));
1028 Assert(pPml4);
1029
1030 /* Allocate page directory pointer table if not present. */
1031 pPml4e = &pPml4->a[iPml4e];
1032 if ( !pPml4e->n.u1Present
1033 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1034 {
1035 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1036
1037 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PML4E_PG_MASK) + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
1038 if (rc == VERR_PGM_POOL_FLUSHED)
1039 {
1040 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1041 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1042 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1043 return VINF_PGM_SYNC_CR3;
1044 }
1045 AssertRCReturn(rc, rc);
1046 }
1047 else
1048 {
1049 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1050 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1051 }
1052 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1053 pPml4e->u = pShwPage->Core.Key;
1054 pPml4e->n.u1Present = 1;
1055 pPml4e->n.u1Write = 1;
1056 pPml4e->n.u1Execute = 1;
1057
1058 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1059 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1060 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1061
1062 if (ppPdpt)
1063 *ppPdpt = pPdpt;
1064
1065 /* Allocate page directory if not present. */
1066 if ( !pPdpe->n.u1Present
1067 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1068 {
1069 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PDPTE_PG_MASK) + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1070 if (rc == VERR_PGM_POOL_FLUSHED)
1071 {
1072 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1073 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1074 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1075 return VINF_PGM_SYNC_CR3;
1076 }
1077 AssertRCReturn(rc, rc);
1078 }
1079 else
1080 {
1081 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1082 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1083 }
1084 /* The PD was cached or created; hook it up now and fill with the default value. */
1085 pPdpe->u = pShwPage->Core.Key;
1086 pPdpe->n.u1Present = 1;
1087 pPdpe->n.u1Write = 1;
1088 pPdpe->n.u1Execute = 1;
1089
1090 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1091 return VINF_SUCCESS;
1092}
1093
1094#endif
1095
1096/**
1097 * Gets effective Guest OS page information.
1098 *
1099 * When GCPtr is in a big page, the function will return as if it was a normal
1100 * 4KB page. If the need for distinguishing between big and normal page becomes
1101 * necessary at a later point, a PGMGstGetPage() will be created for that
1102 * purpose.
1103 *
1104 * @returns VBox status.
1105 * @param pVM VM Handle.
1106 * @param GCPtr Guest Context virtual address of the page.
1107 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1108 * @param pGCPhys Where to store the GC physical address of the page.
1109 * This is page aligned. The fact that the
1110 */
1111VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1112{
1113 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
1114}
1115
1116
1117/**
1118 * Checks if the page is present.
1119 *
1120 * @returns true if the page is present.
1121 * @returns false if the page is not present.
1122 * @param pVM The VM handle.
1123 * @param GCPtr Address within the page.
1124 */
1125VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1126{
1127 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1128 return VBOX_SUCCESS(rc);
1129}
1130
1131
1132/**
1133 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1134 *
1135 * @returns VBox status.
1136 * @param pVM VM handle.
1137 * @param GCPtr The address of the first page.
1138 * @param cb The size of the range in bytes.
1139 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1140 */
1141VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1142{
1143 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1144}
1145
1146
1147/**
1148 * Modify page flags for a range of pages in the guest's tables
1149 *
1150 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1151 *
1152 * @returns VBox status code.
1153 * @param pVM VM handle.
1154 * @param GCPtr Virtual address of the first page in the range.
1155 * @param cb Size (in bytes) of the range to apply the modification to.
1156 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1157 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1158 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1159 */
1160VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1161{
1162 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1163
1164 /*
1165 * Validate input.
1166 */
1167 if (fFlags & X86_PTE_PAE_PG_MASK)
1168 {
1169 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
1170 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1171 return VERR_INVALID_PARAMETER;
1172 }
1173
1174 if (!cb)
1175 {
1176 AssertFailed();
1177 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1178 return VERR_INVALID_PARAMETER;
1179 }
1180
1181 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1182
1183 /*
1184 * Adjust input.
1185 */
1186 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1187 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1188 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
1189
1190 /*
1191 * Call worker.
1192 */
1193 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
1194
1195 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
1196 return rc;
1197}
1198
1199
1200/**
1201 * Gets the current CR3 register value for the shadow memory context.
1202 * @returns CR3 value.
1203 * @param pVM The VM handle.
1204 */
1205VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1206{
1207 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1208 switch (enmShadowMode)
1209 {
1210 case PGMMODE_32_BIT:
1211 return pVM->pgm.s.HCPhys32BitPD;
1212
1213 case PGMMODE_PAE:
1214 case PGMMODE_PAE_NX:
1215 return pVM->pgm.s.HCPhysPaePDPT;
1216
1217 case PGMMODE_AMD64:
1218 case PGMMODE_AMD64_NX:
1219 return pVM->pgm.s.HCPhysPaePML4;
1220
1221 case PGMMODE_EPT:
1222 return pVM->pgm.s.HCPhysNestedRoot;
1223
1224 case PGMMODE_NESTED:
1225 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1226
1227 default:
1228 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1229 return ~0;
1230 }
1231}
1232
1233/**
1234 * Gets the current CR3 register value for the nested memory context.
1235 * @returns CR3 value.
1236 * @param pVM The VM handle.
1237 */
1238VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1239{
1240 switch (enmShadowMode)
1241 {
1242 case PGMMODE_32_BIT:
1243 return pVM->pgm.s.HCPhys32BitPD;
1244
1245 case PGMMODE_PAE:
1246 case PGMMODE_PAE_NX:
1247 return pVM->pgm.s.HCPhysPaePDPT;
1248
1249 case PGMMODE_AMD64:
1250 case PGMMODE_AMD64_NX:
1251 return pVM->pgm.s.HCPhysPaePML4;
1252
1253 default:
1254 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1255 return ~0;
1256 }
1257}
1258
1259
1260/**
1261 * Gets the CR3 register value for the 32-Bit shadow memory context.
1262 * @returns CR3 value.
1263 * @param pVM The VM handle.
1264 */
1265VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1266{
1267 return pVM->pgm.s.HCPhys32BitPD;
1268}
1269
1270
1271/**
1272 * Gets the CR3 register value for the PAE shadow memory context.
1273 * @returns CR3 value.
1274 * @param pVM The VM handle.
1275 */
1276VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1277{
1278 return pVM->pgm.s.HCPhysPaePDPT;
1279}
1280
1281
1282/**
1283 * Gets the CR3 register value for the AMD64 shadow memory context.
1284 * @returns CR3 value.
1285 * @param pVM The VM handle.
1286 */
1287VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1288{
1289 return pVM->pgm.s.HCPhysPaePML4;
1290}
1291
1292
1293/**
1294 * Gets the current CR3 register value for the HC intermediate memory context.
1295 * @returns CR3 value.
1296 * @param pVM The VM handle.
1297 */
1298VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1299{
1300 switch (pVM->pgm.s.enmHostMode)
1301 {
1302 case SUPPAGINGMODE_32_BIT:
1303 case SUPPAGINGMODE_32_BIT_GLOBAL:
1304 return pVM->pgm.s.HCPhysInterPD;
1305
1306 case SUPPAGINGMODE_PAE:
1307 case SUPPAGINGMODE_PAE_GLOBAL:
1308 case SUPPAGINGMODE_PAE_NX:
1309 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1310 return pVM->pgm.s.HCPhysInterPaePDPT;
1311
1312 case SUPPAGINGMODE_AMD64:
1313 case SUPPAGINGMODE_AMD64_GLOBAL:
1314 case SUPPAGINGMODE_AMD64_NX:
1315 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1316 return pVM->pgm.s.HCPhysInterPaePDPT;
1317
1318 default:
1319 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1320 return ~0;
1321 }
1322}
1323
1324
1325/**
1326 * Gets the current CR3 register value for the GC intermediate memory context.
1327 * @returns CR3 value.
1328 * @param pVM The VM handle.
1329 */
1330VMMDECL(RTHCPHYS) PGMGetInterGCCR3(PVM pVM)
1331{
1332 switch (pVM->pgm.s.enmShadowMode)
1333 {
1334 case PGMMODE_32_BIT:
1335 return pVM->pgm.s.HCPhysInterPD;
1336
1337 case PGMMODE_PAE:
1338 case PGMMODE_PAE_NX:
1339 return pVM->pgm.s.HCPhysInterPaePDPT;
1340
1341 case PGMMODE_AMD64:
1342 case PGMMODE_AMD64_NX:
1343 return pVM->pgm.s.HCPhysInterPaePML4;
1344
1345 case PGMMODE_EPT:
1346 case PGMMODE_NESTED:
1347 return 0; /* not relevant */
1348
1349 default:
1350 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1351 return ~0;
1352 }
1353}
1354
1355
1356/**
1357 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1358 * @returns CR3 value.
1359 * @param pVM The VM handle.
1360 */
1361VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1362{
1363 return pVM->pgm.s.HCPhysInterPD;
1364}
1365
1366
1367/**
1368 * Gets the CR3 register value for the PAE intermediate memory context.
1369 * @returns CR3 value.
1370 * @param pVM The VM handle.
1371 */
1372VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1373{
1374 return pVM->pgm.s.HCPhysInterPaePDPT;
1375}
1376
1377
1378/**
1379 * Gets the CR3 register value for the AMD64 intermediate memory context.
1380 * @returns CR3 value.
1381 * @param pVM The VM handle.
1382 */
1383VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1384{
1385 return pVM->pgm.s.HCPhysInterPaePML4;
1386}
1387
1388
1389/**
1390 * Performs and schedules necessary updates following a CR3 load or reload.
1391 *
1392 * This will normally involve mapping the guest PD or nPDPT
1393 *
1394 * @returns VBox status code.
1395 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1396 * safely be ignored and overridden since the FF will be set too then.
1397 * @param pVM VM handle.
1398 * @param cr3 The new cr3.
1399 * @param fGlobal Indicates whether this is a global flush or not.
1400 */
1401VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1402{
1403 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
1404
1405 /*
1406 * Always flag the necessary updates; necessary for hardware acceleration
1407 */
1408 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1409 if (fGlobal)
1410 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1411 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1412
1413 /*
1414 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1415 */
1416 int rc = VINF_SUCCESS;
1417 RTGCPHYS GCPhysCR3;
1418 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1419 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1420 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1421 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1422 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1423 else
1424 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1425 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1426 {
1427 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1428 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1429 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1430 {
1431 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1432 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1433 }
1434 if (fGlobal)
1435 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1436 else
1437 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1438 }
1439 else
1440 {
1441 /*
1442 * Check if we have a pending update of the CR3 monitoring.
1443 */
1444 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1445 {
1446 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1447 Assert(!pVM->pgm.s.fMappingsFixed);
1448 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1449 }
1450 if (fGlobal)
1451 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1452 else
1453 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1454 }
1455
1456 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1457 return rc;
1458}
1459
1460/**
1461 * Performs and schedules necessary updates following a CR3 load or reload,
1462 * without actually flushing the TLB as with PGMFlushTLB.
1463 *
1464 * This will normally involve mapping the guest PD or nPDPT
1465 *
1466 * @returns VBox status code.
1467 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1468 * safely be ignored and overridden since the FF will be set too then.
1469 * @param pVM VM handle.
1470 * @param cr3 The new cr3.
1471 */
1472VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1473{
1474 LogFlow(("PGMUpdateCR3: cr3=%VX64 OldCr3=%VX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1475
1476 /* We assume we're only called in nested paging mode. */
1477 Assert(pVM->pgm.s.fMappingsFixed);
1478 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1479 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1480
1481 /*
1482 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1483 */
1484 int rc = VINF_SUCCESS;
1485 RTGCPHYS GCPhysCR3;
1486 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1487 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1488 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1489 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1490 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1491 else
1492 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1493 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1494 {
1495 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1496 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1497 }
1498 AssertRC(rc);
1499 return rc;
1500}
1501
1502/**
1503 * Synchronize the paging structures.
1504 *
1505 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1506 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1507 * in several places, most importantly whenever the CR3 is loaded.
1508 *
1509 * @returns VBox status code.
1510 * @param pVM The virtual machine.
1511 * @param cr0 Guest context CR0 register
1512 * @param cr3 Guest context CR3 register
1513 * @param cr4 Guest context CR4 register
1514 * @param fGlobal Including global page directories or not
1515 */
1516VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1517{
1518 /*
1519 * We might be called when we shouldn't.
1520 *
1521 * The mode switching will ensure that the PD is resynced
1522 * after every mode switch. So, if we find ourselves here
1523 * when in protected or real mode we can safely disable the
1524 * FF and return immediately.
1525 */
1526 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1527 {
1528 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1529 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1530 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1531 return VINF_SUCCESS;
1532 }
1533
1534 /* If global pages are not supported, then all flushes are global */
1535 if (!(cr4 & X86_CR4_PGE))
1536 fGlobal = true;
1537 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1538 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1539
1540 /*
1541 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1542 */
1543 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1544 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1545 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1546 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1547 if (rc == VINF_SUCCESS)
1548 {
1549 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1550 {
1551 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1552 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1553 }
1554
1555 /*
1556 * Check if we have a pending update of the CR3 monitoring.
1557 */
1558 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1559 {
1560 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1561 Assert(!pVM->pgm.s.fMappingsFixed);
1562 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1563 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1564 }
1565 }
1566
1567 /*
1568 * Now flush the CR3 (guest context).
1569 */
1570 if (rc == VINF_SUCCESS)
1571 PGM_INVL_GUEST_TLBS();
1572 return rc;
1573}
1574
1575
1576/**
1577 * Called whenever CR0 or CR4 in a way which may change
1578 * the paging mode.
1579 *
1580 * @returns VBox status code fit for scheduling in GC and R0.
1581 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1582 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1583 * @param pVM VM handle.
1584 * @param cr0 The new cr0.
1585 * @param cr4 The new cr4.
1586 * @param efer The new extended feature enable register.
1587 */
1588VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1589{
1590 PGMMODE enmGuestMode;
1591
1592 /*
1593 * Calc the new guest mode.
1594 */
1595 if (!(cr0 & X86_CR0_PE))
1596 enmGuestMode = PGMMODE_REAL;
1597 else if (!(cr0 & X86_CR0_PG))
1598 enmGuestMode = PGMMODE_PROTECTED;
1599 else if (!(cr4 & X86_CR4_PAE))
1600 enmGuestMode = PGMMODE_32_BIT;
1601 else if (!(efer & MSR_K6_EFER_LME))
1602 {
1603 if (!(efer & MSR_K6_EFER_NXE))
1604 enmGuestMode = PGMMODE_PAE;
1605 else
1606 enmGuestMode = PGMMODE_PAE_NX;
1607 }
1608 else
1609 {
1610 if (!(efer & MSR_K6_EFER_NXE))
1611 enmGuestMode = PGMMODE_AMD64;
1612 else
1613 enmGuestMode = PGMMODE_AMD64_NX;
1614 }
1615
1616 /*
1617 * Did it change?
1618 */
1619 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1620 return VINF_SUCCESS;
1621
1622 /* Flush the TLB */
1623 PGM_INVL_GUEST_TLBS();
1624
1625#ifdef IN_RING3
1626 return PGMR3ChangeMode(pVM, enmGuestMode);
1627#else
1628 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1629 return VINF_PGM_CHANGE_MODE;
1630#endif
1631}
1632
1633
1634/**
1635 * Gets the current guest paging mode.
1636 *
1637 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1638 *
1639 * @returns The current paging mode.
1640 * @param pVM The VM handle.
1641 */
1642VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1643{
1644 return pVM->pgm.s.enmGuestMode;
1645}
1646
1647
1648/**
1649 * Gets the current shadow paging mode.
1650 *
1651 * @returns The current paging mode.
1652 * @param pVM The VM handle.
1653 */
1654VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1655{
1656 return pVM->pgm.s.enmShadowMode;
1657}
1658
1659/**
1660 * Gets the current host paging mode.
1661 *
1662 * @returns The current paging mode.
1663 * @param pVM The VM handle.
1664 */
1665VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1666{
1667 switch (pVM->pgm.s.enmHostMode)
1668 {
1669 case SUPPAGINGMODE_32_BIT:
1670 case SUPPAGINGMODE_32_BIT_GLOBAL:
1671 return PGMMODE_32_BIT;
1672
1673 case SUPPAGINGMODE_PAE:
1674 case SUPPAGINGMODE_PAE_GLOBAL:
1675 return PGMMODE_PAE;
1676
1677 case SUPPAGINGMODE_PAE_NX:
1678 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1679 return PGMMODE_PAE_NX;
1680
1681 case SUPPAGINGMODE_AMD64:
1682 case SUPPAGINGMODE_AMD64_GLOBAL:
1683 return PGMMODE_AMD64;
1684
1685 case SUPPAGINGMODE_AMD64_NX:
1686 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1687 return PGMMODE_AMD64_NX;
1688
1689 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1690 }
1691
1692 return PGMMODE_INVALID;
1693}
1694
1695
1696/**
1697 * Get mode name.
1698 *
1699 * @returns read-only name string.
1700 * @param enmMode The mode which name is desired.
1701 */
1702VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1703{
1704 switch (enmMode)
1705 {
1706 case PGMMODE_REAL: return "Real";
1707 case PGMMODE_PROTECTED: return "Protected";
1708 case PGMMODE_32_BIT: return "32-bit";
1709 case PGMMODE_PAE: return "PAE";
1710 case PGMMODE_PAE_NX: return "PAE+NX";
1711 case PGMMODE_AMD64: return "AMD64";
1712 case PGMMODE_AMD64_NX: return "AMD64+NX";
1713 case PGMMODE_NESTED: return "Nested";
1714 case PGMMODE_EPT: return "EPT";
1715 default: return "unknown mode value";
1716 }
1717}
1718
1719
1720/**
1721 * Acquire the PGM lock.
1722 *
1723 * @returns VBox status code
1724 * @param pVM The VM to operate on.
1725 */
1726int pgmLock(PVM pVM)
1727{
1728 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1729#ifdef IN_GC
1730 if (rc == VERR_SEM_BUSY)
1731 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1732#elif defined(IN_RING0)
1733 if (rc == VERR_SEM_BUSY)
1734 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1735#endif
1736 AssertRC(rc);
1737 return rc;
1738}
1739
1740
1741/**
1742 * Release the PGM lock.
1743 *
1744 * @returns VBox status code
1745 * @param pVM The VM to operate on.
1746 */
1747void pgmUnlock(PVM pVM)
1748{
1749 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1750}
1751
1752
1753#ifdef VBOX_STRICT
1754
1755/**
1756 * Asserts that there are no mapping conflicts.
1757 *
1758 * @returns Number of conflicts.
1759 * @param pVM The VM Handle.
1760 */
1761VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1762{
1763 unsigned cErrors = 0;
1764
1765 /*
1766 * Check for mapping conflicts.
1767 */
1768 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1769 pMapping;
1770 pMapping = pMapping->CTX_SUFF(pNext))
1771 {
1772 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1773 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1774 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1775 GCPtr += PAGE_SIZE)
1776 {
1777 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1778 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1779 {
1780 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
1781 cErrors++;
1782 break;
1783 }
1784 }
1785 }
1786
1787 return cErrors;
1788}
1789
1790
1791/**
1792 * Asserts that everything related to the guest CR3 is correctly shadowed.
1793 *
1794 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1795 * and assert the correctness of the guest CR3 mapping before asserting that the
1796 * shadow page tables is in sync with the guest page tables.
1797 *
1798 * @returns Number of conflicts.
1799 * @param pVM The VM Handle.
1800 * @param cr3 The current guest CR3 register value.
1801 * @param cr4 The current guest CR4 register value.
1802 */
1803VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1804{
1805 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1806 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1807 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1808 return cErrors;
1809 return 0;
1810}
1811
1812#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette