VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 13186

Last change on this file since 13186 was 13122, checked in by vboxsync, 16 years ago

Corrected return value checks for PGMShwGetEPTPDPtr.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 63.7 KB
Line 
1/* $Id: PGMAll.cpp 13122 2008-10-09 11:22:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The todo flags. */
62 RTUINT fTodo;
63 /** The CR4 register value. */
64 uint32_t cr4;
65} PGMHVUSTATE, *PPGMHVUSTATE;
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71
72/*
73 * Shadow - 32-bit mode
74 */
75#define PGM_SHW_TYPE PGM_TYPE_32BIT
76#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
77#include "PGMAllShw.h"
78
79/* Guest - real mode */
80#define PGM_GST_TYPE PGM_TYPE_REAL
81#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
82#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
83#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - 32-bit mode */
104#define PGM_GST_TYPE PGM_TYPE_32BIT
105#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
108#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_BIG
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117#undef PGM_SHW_TYPE
118#undef PGM_SHW_NAME
119
120
121/*
122 * Shadow - PAE mode
123 */
124#define PGM_SHW_TYPE PGM_TYPE_PAE
125#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
126#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
127#include "PGMAllShw.h"
128
129/* Guest - real mode */
130#define PGM_GST_TYPE PGM_TYPE_REAL
131#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
132#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
133#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
134#include "PGMAllBth.h"
135#undef BTH_PGMPOOLKIND_PT_FOR_PT
136#undef PGM_BTH_NAME
137#undef PGM_GST_TYPE
138#undef PGM_GST_NAME
139
140/* Guest - protected mode */
141#define PGM_GST_TYPE PGM_TYPE_PROT
142#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
143#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
144#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
145#include "PGMAllBth.h"
146#undef BTH_PGMPOOLKIND_PT_FOR_PT
147#undef PGM_BTH_NAME
148#undef PGM_GST_TYPE
149#undef PGM_GST_NAME
150
151/* Guest - 32-bit mode */
152#define PGM_GST_TYPE PGM_TYPE_32BIT
153#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
154#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
155#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
156#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
157#include "PGMAllBth.h"
158#undef BTH_PGMPOOLKIND_PT_FOR_BIG
159#undef BTH_PGMPOOLKIND_PT_FOR_PT
160#undef PGM_BTH_NAME
161#undef PGM_GST_TYPE
162#undef PGM_GST_NAME
163
164
165/* Guest - PAE mode */
166#define PGM_GST_TYPE PGM_TYPE_PAE
167#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
168#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
169#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
170#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
171#include "PGMAllGst.h"
172#include "PGMAllBth.h"
173#undef BTH_PGMPOOLKIND_PT_FOR_BIG
174#undef BTH_PGMPOOLKIND_PT_FOR_PT
175#undef PGM_BTH_NAME
176#undef PGM_GST_TYPE
177#undef PGM_GST_NAME
178
179#undef PGM_SHW_TYPE
180#undef PGM_SHW_NAME
181
182
183#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
184/*
185 * Shadow - AMD64 mode
186 */
187#define PGM_SHW_TYPE PGM_TYPE_AMD64
188#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
189#include "PGMAllShw.h"
190
191/* Guest - protected mode */
192#define PGM_GST_TYPE PGM_TYPE_PROT
193#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
194#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
195#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_PT
198#undef PGM_BTH_NAME
199#undef PGM_GST_TYPE
200#undef PGM_GST_NAME
201
202#ifdef VBOX_WITH_64_BITS_GUESTS
203/* Guest - AMD64 mode */
204#define PGM_GST_TYPE PGM_TYPE_AMD64
205#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
206#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
207#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
208#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
209#include "PGMAllGst.h"
210#include "PGMAllBth.h"
211#undef BTH_PGMPOOLKIND_PT_FOR_BIG
212#undef BTH_PGMPOOLKIND_PT_FOR_PT
213#undef PGM_BTH_NAME
214#undef PGM_GST_TYPE
215#undef PGM_GST_NAME
216#endif
217
218#undef PGM_SHW_TYPE
219#undef PGM_SHW_NAME
220
221/*
222 * Shadow - Nested paging mode
223 */
224#define PGM_SHW_TYPE PGM_TYPE_NESTED
225#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
226#include "PGMAllShw.h"
227
228/* Guest - real mode */
229#define PGM_GST_TYPE PGM_TYPE_REAL
230#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
231#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
232#include "PGMAllBth.h"
233#undef PGM_BTH_NAME
234#undef PGM_GST_TYPE
235#undef PGM_GST_NAME
236
237/* Guest - protected mode */
238#define PGM_GST_TYPE PGM_TYPE_PROT
239#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
240#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
241#include "PGMAllBth.h"
242#undef PGM_BTH_NAME
243#undef PGM_GST_TYPE
244#undef PGM_GST_NAME
245
246/* Guest - 32-bit mode */
247#define PGM_GST_TYPE PGM_TYPE_32BIT
248#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
249#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
250#include "PGMAllBth.h"
251#undef PGM_BTH_NAME
252#undef PGM_GST_TYPE
253#undef PGM_GST_NAME
254
255/* Guest - PAE mode */
256#define PGM_GST_TYPE PGM_TYPE_PAE
257#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
258#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
259#include "PGMAllBth.h"
260#undef PGM_BTH_NAME
261#undef PGM_GST_TYPE
262#undef PGM_GST_NAME
263
264#ifdef VBOX_WITH_64_BITS_GUESTS
265/* Guest - AMD64 mode */
266#define PGM_GST_TYPE PGM_TYPE_AMD64
267#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
268#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
269#include "PGMAllBth.h"
270#undef PGM_BTH_NAME
271#undef PGM_GST_TYPE
272#undef PGM_GST_NAME
273#endif
274
275#undef PGM_SHW_TYPE
276#undef PGM_SHW_NAME
277
278/*
279 * Shadow - EPT
280 */
281#define PGM_SHW_TYPE PGM_TYPE_EPT
282#define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
283#include "PGMAllShw.h"
284
285/* Guest - real mode */
286#define PGM_GST_TYPE PGM_TYPE_REAL
287#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
288#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
289#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
290#include "PGMAllBth.h"
291#undef BTH_PGMPOOLKIND_PT_FOR_PT
292#undef PGM_BTH_NAME
293#undef PGM_GST_TYPE
294#undef PGM_GST_NAME
295
296/* Guest - protected mode */
297#define PGM_GST_TYPE PGM_TYPE_PROT
298#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
299#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
300#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
301#include "PGMAllBth.h"
302#undef BTH_PGMPOOLKIND_PT_FOR_PT
303#undef PGM_BTH_NAME
304#undef PGM_GST_TYPE
305#undef PGM_GST_NAME
306
307/* Guest - 32-bit mode */
308#define PGM_GST_TYPE PGM_TYPE_32BIT
309#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
310#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
311#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
312#include "PGMAllBth.h"
313#undef BTH_PGMPOOLKIND_PT_FOR_PT
314#undef PGM_BTH_NAME
315#undef PGM_GST_TYPE
316#undef PGM_GST_NAME
317
318/* Guest - PAE mode */
319#define PGM_GST_TYPE PGM_TYPE_PAE
320#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
321#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
322#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
323#include "PGMAllBth.h"
324#undef BTH_PGMPOOLKIND_PT_FOR_PT
325#undef PGM_BTH_NAME
326#undef PGM_GST_TYPE
327#undef PGM_GST_NAME
328
329#ifdef VBOX_WITH_64_BITS_GUESTS
330/* Guest - AMD64 mode */
331#define PGM_GST_TYPE PGM_TYPE_AMD64
332#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
333#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
334#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
335#include "PGMAllBth.h"
336#undef BTH_PGMPOOLKIND_PT_FOR_PT
337#undef PGM_BTH_NAME
338#undef PGM_GST_TYPE
339#undef PGM_GST_NAME
340#endif
341
342#undef PGM_SHW_TYPE
343#undef PGM_SHW_NAME
344
345#endif /* !IN_GC */
346
347
348#ifndef IN_RING3
349/**
350 * #PF Handler.
351 *
352 * @returns VBox status code (appropriate for trap handling and GC return).
353 * @param pVM VM Handle.
354 * @param uErr The trap error code.
355 * @param pRegFrame Trap register frame.
356 * @param pvFault The fault address.
357 */
358VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
359{
360 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->rip));
361 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a);
362 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
363
364
365#ifdef VBOX_WITH_STATISTICS
366 /*
367 * Error code stats.
368 */
369 if (uErr & X86_TRAP_PF_US)
370 {
371 if (!(uErr & X86_TRAP_PF_P))
372 {
373 if (uErr & X86_TRAP_PF_RW)
374 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentWrite);
375 else
376 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNotPresentRead);
377 }
378 else if (uErr & X86_TRAP_PF_RW)
379 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSWrite);
380 else if (uErr & X86_TRAP_PF_RSVD)
381 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSReserved);
382 else if (uErr & X86_TRAP_PF_ID)
383 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSNXE);
384 else
385 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eUSRead);
386 }
387 else
388 { /* Supervisor */
389 if (!(uErr & X86_TRAP_PF_P))
390 {
391 if (uErr & X86_TRAP_PF_RW)
392 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentWrite);
393 else
394 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVNotPresentRead);
395 }
396 else if (uErr & X86_TRAP_PF_RW)
397 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVWrite);
398 else if (uErr & X86_TRAP_PF_ID)
399 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSNXE);
400 else if (uErr & X86_TRAP_PF_RSVD)
401 STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eSVReserved);
402 }
403#endif
404
405 /*
406 * Call the worker.
407 */
408 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
409 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
410 rc = VINF_SUCCESS;
411 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVM->pgm.s.StatRZTrap0eGuestPF); });
412 STAM_STATS({ if (!pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
413 pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });
414 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
415 return rc;
416}
417#endif /* !IN_RING3 */
418
419
420/**
421 * Prefetch a page
422 *
423 * Typically used to sync commonly used pages before entering raw mode
424 * after a CR3 reload.
425 *
426 * @returns VBox status code suitable for scheduling.
427 * @retval VINF_SUCCESS on success.
428 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
429 * @param pVM VM handle.
430 * @param GCPtrPage Page to invalidate.
431 */
432VMMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
433{
434 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
435 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
436 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
437 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
438 return rc;
439}
440
441
442/**
443 * Gets the mapping corresponding to the specified address (if any).
444 *
445 * @returns Pointer to the mapping.
446 * @returns NULL if not
447 *
448 * @param pVM The virtual machine.
449 * @param GCPtr The guest context pointer.
450 */
451PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
452{
453 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
454 while (pMapping)
455 {
456 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
457 break;
458 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
459 return pMapping;
460 pMapping = pMapping->CTX_SUFF(pNext);
461 }
462 return NULL;
463}
464
465
466/**
467 * Verifies a range of pages for read or write access
468 *
469 * Only checks the guest's page tables
470 *
471 * @returns VBox status code.
472 * @param pVM VM handle.
473 * @param Addr Guest virtual address to check
474 * @param cbSize Access size
475 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
476 */
477VMMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
478{
479 /*
480 * Validate input.
481 */
482 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
483 {
484 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
485 return VERR_INVALID_PARAMETER;
486 }
487
488 uint64_t fPage;
489 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
490 if (VBOX_FAILURE(rc))
491 {
492 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
493 return VINF_EM_RAW_GUEST_TRAP;
494 }
495
496 /*
497 * Check if the access would cause a page fault
498 *
499 * Note that hypervisor page directories are not present in the guest's tables, so this check
500 * is sufficient.
501 */
502 bool fWrite = !!(fAccess & X86_PTE_RW);
503 bool fUser = !!(fAccess & X86_PTE_US);
504 if ( !(fPage & X86_PTE_P)
505 || (fWrite && !(fPage & X86_PTE_RW))
506 || (fUser && !(fPage & X86_PTE_US)) )
507 {
508 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
509 return VINF_EM_RAW_GUEST_TRAP;
510 }
511 if ( VBOX_SUCCESS(rc)
512 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
513 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
514 return rc;
515}
516
517
518/**
519 * Verifies a range of pages for read or write access
520 *
521 * Supports handling of pages marked for dirty bit tracking and CSAM
522 *
523 * @returns VBox status code.
524 * @param pVM VM handle.
525 * @param Addr Guest virtual address to check
526 * @param cbSize Access size
527 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
528 */
529VMMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
530{
531 /*
532 * Validate input.
533 */
534 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
535 {
536 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
537 return VERR_INVALID_PARAMETER;
538 }
539
540 uint64_t fPageGst;
541 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
542 if (VBOX_FAILURE(rc))
543 {
544 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
545 return VINF_EM_RAW_GUEST_TRAP;
546 }
547
548 /*
549 * Check if the access would cause a page fault
550 *
551 * Note that hypervisor page directories are not present in the guest's tables, so this check
552 * is sufficient.
553 */
554 const bool fWrite = !!(fAccess & X86_PTE_RW);
555 const bool fUser = !!(fAccess & X86_PTE_US);
556 if ( !(fPageGst & X86_PTE_P)
557 || (fWrite && !(fPageGst & X86_PTE_RW))
558 || (fUser && !(fPageGst & X86_PTE_US)) )
559 {
560 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
561 return VINF_EM_RAW_GUEST_TRAP;
562 }
563
564 if (!HWACCMIsNestedPagingActive(pVM))
565 {
566 /*
567 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
568 */
569 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
570 if ( rc == VERR_PAGE_NOT_PRESENT
571 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
572 {
573 /*
574 * Page is not present in our page tables.
575 * Try to sync it!
576 */
577 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
578 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
579 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
580 if (rc != VINF_SUCCESS)
581 return rc;
582 }
583 else
584 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
585 }
586
587#if 0 /* def VBOX_STRICT; triggers too often now */
588 /*
589 * This check is a bit paranoid, but useful.
590 */
591 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
592 uint64_t fPageShw;
593 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
594 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
595 || (fWrite && !(fPageShw & X86_PTE_RW))
596 || (fUser && !(fPageShw & X86_PTE_US)) )
597 {
598 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
599 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
600 return VINF_EM_RAW_GUEST_TRAP;
601 }
602#endif
603
604 if ( VBOX_SUCCESS(rc)
605 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
606 || Addr + cbSize < Addr))
607 {
608 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
609 for (;;)
610 {
611 Addr += PAGE_SIZE;
612 if (cbSize > PAGE_SIZE)
613 cbSize -= PAGE_SIZE;
614 else
615 cbSize = 1;
616 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
617 if (rc != VINF_SUCCESS)
618 break;
619 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
620 break;
621 }
622 }
623 return rc;
624}
625
626
627#ifndef IN_GC
628/**
629 * Emulation of the invlpg instruction (HC only actually).
630 *
631 * @returns VBox status code.
632 * @param pVM VM handle.
633 * @param GCPtrPage Page to invalidate.
634 * @remark ASSUMES the page table entry or page directory is
635 * valid. Fairly safe, but there could be edge cases!
636 * @todo Flush page or page directory only if necessary!
637 */
638VMMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
639{
640 int rc;
641
642 Log3(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
643
644 /** @todo merge PGMGCInvalidatePage with this one */
645
646#ifndef IN_RING3
647 /*
648 * Notify the recompiler so it can record this instruction.
649 * Failure happens when it's out of space. We'll return to HC in that case.
650 */
651 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
652 if (VBOX_FAILURE(rc))
653 return rc;
654#endif
655
656 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
657 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
658 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
659
660#ifndef IN_RING0
661 /*
662 * Check if we have a pending update of the CR3 monitoring.
663 */
664 if ( VBOX_SUCCESS(rc)
665 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
666 {
667 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
668 Assert(!pVM->pgm.s.fMappingsFixed);
669 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
670 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
671 }
672#endif
673
674#ifdef IN_RING3
675 /*
676 * Inform CSAM about the flush
677 */
678 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
679 CSAMR3FlushPage(pVM, GCPtrPage);
680#endif
681 return rc;
682}
683#endif
684
685
686/**
687 * Executes an instruction using the interpreter.
688 *
689 * @returns VBox status code (appropriate for trap handling and GC return).
690 * @param pVM VM handle.
691 * @param pRegFrame Register frame.
692 * @param pvFault Fault address.
693 */
694VMMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
695{
696 uint32_t cb;
697 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
698 if (rc == VERR_EM_INTERPRETER)
699 rc = VINF_EM_RAW_EMULATE_INSTR;
700 if (rc != VINF_SUCCESS)
701 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
702 return rc;
703}
704
705
706/**
707 * Gets effective page information (from the VMM page directory).
708 *
709 * @returns VBox status.
710 * @param pVM VM Handle.
711 * @param GCPtr Guest Context virtual address of the page.
712 * @param pfFlags Where to store the flags. These are X86_PTE_*.
713 * @param pHCPhys Where to store the HC physical address of the page.
714 * This is page aligned.
715 * @remark You should use PGMMapGetPage() for pages in a mapping.
716 */
717VMMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
718{
719 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
720}
721
722
723/**
724 * Sets (replaces) the page flags for a range of pages in the shadow context.
725 *
726 * @returns VBox status.
727 * @param pVM VM handle.
728 * @param GCPtr The address of the first page.
729 * @param cb The size of the range in bytes.
730 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
731 * @remark You must use PGMMapSetPage() for pages in a mapping.
732 */
733VMMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
734{
735 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
736}
737
738
739/**
740 * Modify page flags for a range of pages in the shadow context.
741 *
742 * The existing flags are ANDed with the fMask and ORed with the fFlags.
743 *
744 * @returns VBox status code.
745 * @param pVM VM handle.
746 * @param GCPtr Virtual address of the first page in the range.
747 * @param cb Size (in bytes) of the range to apply the modification to.
748 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
749 * @param fMask The AND mask - page flags X86_PTE_*.
750 * Be very CAREFUL when ~'ing constants which could be 32-bit!
751 * @remark You must use PGMMapModifyPage() for pages in a mapping.
752 */
753VMMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
754{
755 /*
756 * Validate input.
757 */
758 if (fFlags & X86_PTE_PAE_PG_MASK)
759 {
760 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
761 return VERR_INVALID_PARAMETER;
762 }
763 if (!cb)
764 {
765 AssertFailed();
766 return VERR_INVALID_PARAMETER;
767 }
768
769 /*
770 * Align the input.
771 */
772 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
773 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
774 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
775
776 /*
777 * Call worker.
778 */
779 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
780}
781
782/**
783 * Syncs the SHADOW page directory pointer for the specified address. Allocates
784 * backing pages in case the PDPT entry is missing.
785 *
786 * @returns VBox status.
787 * @param pVM VM handle.
788 * @param GCPtr The address.
789 * @param pGstPdpe Guest PDPT entry
790 * @param ppPD Receives address of page directory
791 */
792VMMDECL(int) PGMShwSyncPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
793{
794 PPGM pPGM = &pVM->pgm.s;
795 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
796 PPGMPOOLPAGE pShwPage;
797 int rc;
798
799 Assert(!HWACCMIsNestedPagingActive(pVM));
800
801 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
802 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
803 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
804
805 /* Allocate page directory if not present. */
806 if ( !pPdpe->n.u1Present
807 && !(pPdpe->u & X86_PDPE_PG_MASK))
808 {
809 PX86PDPE pPdptGst = &CTXSUFF(pPGM->pGstPaePDPT)->a[iPdPt];
810
811 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
812 /* Create a reference back to the PDPT by using the index in its shadow page. */
813 rc = pgmPoolAlloc(pVM, pPdptGst->u & X86_PDPE_PG_MASK, PGMPOOLKIND_PAE_PD_FOR_PAE_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
814 if (rc == VERR_PGM_POOL_FLUSHED)
815 {
816 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
817 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
818 return VINF_PGM_SYNC_CR3;
819 }
820 AssertRCReturn(rc, rc);
821 }
822 else
823 {
824 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
825 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
826 }
827 /* The PD was cached or created; hook it up now. */
828 pPdpe->u |= pShwPage->Core.Key
829 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
830
831 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
832 return VINF_SUCCESS;
833}
834
835/**
836 * Gets the SHADOW page directory pointer for the specified address.
837 *
838 * @returns VBox status.
839 * @param pVM VM handle.
840 * @param GCPtr The address.
841 * @param ppPdpt Receives address of pdpt
842 * @param ppPD Receives address of page directory
843 */
844VMMDECL(int) PGMShwGetPAEPDPtr(PVM pVM, RTGCUINTPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
845{
846 PPGM pPGM = &pVM->pgm.s;
847 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
848 PPGMPOOLPAGE pShwPage;
849
850 Assert(!HWACCMIsNestedPagingActive(pVM));
851
852 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
853 PX86PDPT pPdpt = pVM->pgm.s.CTXMID(p,PaePDPT);
854 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
855
856 *ppPdpt = pPdpt;
857 if (!pPdpe->n.u1Present)
858 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
859
860 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
861 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
862
863 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
864 return VINF_SUCCESS;
865}
866
867#ifndef IN_GC
868/**
869 * Syncs the SHADOW page directory pointer for the specified address. Allocates
870 * backing pages in case the PDPT or PML4 entry is missing.
871 *
872 * @returns VBox status.
873 * @param pVM VM handle.
874 * @param GCPtr The address.
875 * @param pGstPml4e Guest PML4 entry
876 * @param pGstPdpe Guest PDPT entry
877 * @param ppPD Receives address of page directory
878 */
879VMMDECL(int) PGMShwSyncLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
880{
881 PPGM pPGM = &pVM->pgm.s;
882 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
883 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
884 PX86PML4E pPml4e;
885 PPGMPOOLPAGE pShwPage;
886 int rc;
887 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM);
888
889 Assert(pVM->pgm.s.pHCPaePML4);
890
891 /* Allocate page directory pointer table if not present. */
892 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
893 if ( !pPml4e->n.u1Present
894 && !(pPml4e->u & X86_PML4E_PG_MASK))
895 {
896 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
897
898 if (!fNestedPaging)
899 {
900 Assert(pVM->pgm.s.pHCShwAmd64CR3);
901 Assert(pPGM->pGstPaePML4HC);
902
903 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
904
905 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e, &pShwPage);
906 }
907 else
908 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
909
910 if (rc == VERR_PGM_POOL_FLUSHED)
911 {
912 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
913 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
914 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
915 return VINF_PGM_SYNC_CR3;
916 }
917 AssertRCReturn(rc, rc);
918 }
919 else
920 {
921 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
922 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
923 }
924 /* The PDPT was cached or created; hook it up now. */
925 pPml4e->u |= pShwPage->Core.Key
926 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
927
928 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
929 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
930 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
931
932 /* Allocate page directory if not present. */
933 if ( !pPdpe->n.u1Present
934 && !(pPdpe->u & X86_PDPE_PG_MASK))
935 {
936 if (!fNestedPaging)
937 {
938 Assert(pPGM->pGstPaePML4HC);
939
940 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
941 PX86PDPT pPdptGst;
942 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
943 AssertRCReturn(rc, rc);
944
945 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
946 /* Create a reference back to the PDPT by using the index in its shadow page. */
947 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
948 }
949 else
950 rc = pgmPoolAlloc(pVM, GCPtr + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
951
952 if (rc == VERR_PGM_POOL_FLUSHED)
953 {
954 Log(("PGMShwSyncLongModePDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
955 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
956 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
957 return VINF_PGM_SYNC_CR3;
958 }
959 AssertRCReturn(rc, rc);
960 }
961 else
962 {
963 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
964 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
965 }
966 /* The PD was cached or created; hook it up now. */
967 pPdpe->u |= pShwPage->Core.Key
968 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
969
970 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
971 return VINF_SUCCESS;
972}
973
974/**
975 * Gets the SHADOW page directory pointer for the specified address.
976 *
977 * @returns VBox status.
978 * @param pVM VM handle.
979 * @param GCPtr The address.
980 * @param ppPdpt Receives address of pdpt
981 * @param ppPD Receives address of page directory
982 */
983VMMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
984{
985 PPGM pPGM = &pVM->pgm.s;
986 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
987 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
988 PX86PML4E pPml4e;
989 PPGMPOOLPAGE pShwPage;
990
991 AssertReturn(pVM->pgm.s.pHCPaePML4, VERR_INTERNAL_ERROR);
992
993 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
994 if (!pPml4e->n.u1Present)
995 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
996
997 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
998 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
999
1000 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1001 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1002 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1003
1004 *ppPdpt = pPdpt;
1005 if (!pPdpe->n.u1Present)
1006 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1007
1008 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1009 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1010
1011 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1012 return VINF_SUCCESS;
1013}
1014
1015/**
1016 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1017 * backing pages in case the PDPT or PML4 entry is missing.
1018 *
1019 * @returns VBox status.
1020 * @param pVM VM handle.
1021 * @param GCPtr The address.
1022 * @param ppPdpt Receives address of pdpt
1023 * @param ppPD Receives address of page directory
1024 */
1025VMMDECL(int) PGMShwGetEPTPDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1026{
1027 PPGM pPGM = &pVM->pgm.s;
1028 const unsigned iPml4e = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1029 PPGMPOOL pPool = pPGM->CTX_SUFF(pPool);
1030 PEPTPML4 pPml4 = (PEPTPML4)pPGM->pHCNestedRoot;
1031 PEPTPML4E pPml4e;
1032 PPGMPOOLPAGE pShwPage;
1033 int rc;
1034
1035 Assert(HWACCMIsNestedPagingActive(pVM));
1036 Assert(pPml4);
1037
1038 /* Allocate page directory pointer table if not present. */
1039 pPml4e = &pPml4->a[iPml4e];
1040 if ( !pPml4e->n.u1Present
1041 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1042 {
1043 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1044
1045 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PML4E_PG_MASK) + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4e, &pShwPage);
1046 if (rc == VERR_PGM_POOL_FLUSHED)
1047 {
1048 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (1) -> signal sync cr3\n"));
1049 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1050 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1051 return VINF_PGM_SYNC_CR3;
1052 }
1053 AssertRCReturn(rc, rc);
1054 }
1055 else
1056 {
1057 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1058 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1059 }
1060 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1061 pPml4e->u = pShwPage->Core.Key;
1062 pPml4e->n.u1Present = 1;
1063 pPml4e->n.u1Write = 1;
1064 pPml4e->n.u1Execute = 1;
1065
1066 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1067 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1068 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1069
1070 if (ppPdpt)
1071 *ppPdpt = pPdpt;
1072
1073 /* Allocate page directory if not present. */
1074 if ( !pPdpe->n.u1Present
1075 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1076 {
1077 rc = pgmPoolAlloc(pVM, (GCPtr & EPT_PDPTE_PG_MASK) + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1078 if (rc == VERR_PGM_POOL_FLUSHED)
1079 {
1080 Log(("PGMShwSyncEPTPDPtr: PGM pool flushed (2) -> signal sync cr3\n"));
1081 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
1082 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1083 return VINF_PGM_SYNC_CR3;
1084 }
1085 AssertRCReturn(rc, rc);
1086 }
1087 else
1088 {
1089 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1090 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1091 }
1092 /* The PD was cached or created; hook it up now and fill with the default value. */
1093 pPdpe->u = pShwPage->Core.Key;
1094 pPdpe->n.u1Present = 1;
1095 pPdpe->n.u1Write = 1;
1096 pPdpe->n.u1Execute = 1;
1097
1098 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1099 return VINF_SUCCESS;
1100}
1101
1102#endif
1103
1104/**
1105 * Gets effective Guest OS page information.
1106 *
1107 * When GCPtr is in a big page, the function will return as if it was a normal
1108 * 4KB page. If the need for distinguishing between big and normal page becomes
1109 * necessary at a later point, a PGMGstGetPage() will be created for that
1110 * purpose.
1111 *
1112 * @returns VBox status.
1113 * @param pVM VM Handle.
1114 * @param GCPtr Guest Context virtual address of the page.
1115 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1116 * @param pGCPhys Where to store the GC physical address of the page.
1117 * This is page aligned. The fact that the
1118 */
1119VMMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1120{
1121 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
1122}
1123
1124
1125/**
1126 * Checks if the page is present.
1127 *
1128 * @returns true if the page is present.
1129 * @returns false if the page is not present.
1130 * @param pVM The VM handle.
1131 * @param GCPtr Address within the page.
1132 */
1133VMMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
1134{
1135 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
1136 return VBOX_SUCCESS(rc);
1137}
1138
1139
1140/**
1141 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1142 *
1143 * @returns VBox status.
1144 * @param pVM VM handle.
1145 * @param GCPtr The address of the first page.
1146 * @param cb The size of the range in bytes.
1147 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1148 */
1149VMMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1150{
1151 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
1152}
1153
1154
1155/**
1156 * Modify page flags for a range of pages in the guest's tables
1157 *
1158 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1159 *
1160 * @returns VBox status code.
1161 * @param pVM VM handle.
1162 * @param GCPtr Virtual address of the first page in the range.
1163 * @param cb Size (in bytes) of the range to apply the modification to.
1164 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1165 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1166 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1167 */
1168VMMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1169{
1170 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1171
1172 /*
1173 * Validate input.
1174 */
1175 if (fFlags & X86_PTE_PAE_PG_MASK)
1176 {
1177 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
1178 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1179 return VERR_INVALID_PARAMETER;
1180 }
1181
1182 if (!cb)
1183 {
1184 AssertFailed();
1185 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1186 return VERR_INVALID_PARAMETER;
1187 }
1188
1189 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1190
1191 /*
1192 * Adjust input.
1193 */
1194 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1195 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1196 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
1197
1198 /*
1199 * Call worker.
1200 */
1201 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
1202
1203 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1204 return rc;
1205}
1206
1207
1208/**
1209 * Gets the current CR3 register value for the shadow memory context.
1210 * @returns CR3 value.
1211 * @param pVM The VM handle.
1212 */
1213VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
1214{
1215 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
1216 switch (enmShadowMode)
1217 {
1218 case PGMMODE_32_BIT:
1219 return pVM->pgm.s.HCPhys32BitPD;
1220
1221 case PGMMODE_PAE:
1222 case PGMMODE_PAE_NX:
1223 return pVM->pgm.s.HCPhysPaePDPT;
1224
1225 case PGMMODE_AMD64:
1226 case PGMMODE_AMD64_NX:
1227 return pVM->pgm.s.HCPhysPaePML4;
1228
1229 case PGMMODE_EPT:
1230 return pVM->pgm.s.HCPhysNestedRoot;
1231
1232 case PGMMODE_NESTED:
1233 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
1234
1235 default:
1236 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1237 return ~0;
1238 }
1239}
1240
1241/**
1242 * Gets the current CR3 register value for the nested memory context.
1243 * @returns CR3 value.
1244 * @param pVM The VM handle.
1245 */
1246VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
1247{
1248 switch (enmShadowMode)
1249 {
1250 case PGMMODE_32_BIT:
1251 return pVM->pgm.s.HCPhys32BitPD;
1252
1253 case PGMMODE_PAE:
1254 case PGMMODE_PAE_NX:
1255 return pVM->pgm.s.HCPhysPaePDPT;
1256
1257 case PGMMODE_AMD64:
1258 case PGMMODE_AMD64_NX:
1259 return pVM->pgm.s.HCPhysPaePML4;
1260
1261 default:
1262 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
1263 return ~0;
1264 }
1265}
1266
1267/**
1268 * Gets the current CR3 register value for the EPT paging memory context.
1269 * @returns CR3 value.
1270 * @param pVM The VM handle.
1271 */
1272VMMDECL(RTHCPHYS) PGMGetEPTCR3(PVM pVM)
1273{
1274 return pVM->pgm.s.HCPhysNestedRoot;
1275}
1276
1277/**
1278 * Gets the CR3 register value for the 32-Bit shadow memory context.
1279 * @returns CR3 value.
1280 * @param pVM The VM handle.
1281 */
1282VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
1283{
1284 return pVM->pgm.s.HCPhys32BitPD;
1285}
1286
1287
1288/**
1289 * Gets the CR3 register value for the PAE shadow memory context.
1290 * @returns CR3 value.
1291 * @param pVM The VM handle.
1292 */
1293VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
1294{
1295 return pVM->pgm.s.HCPhysPaePDPT;
1296}
1297
1298
1299/**
1300 * Gets the CR3 register value for the AMD64 shadow memory context.
1301 * @returns CR3 value.
1302 * @param pVM The VM handle.
1303 */
1304VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
1305{
1306 return pVM->pgm.s.HCPhysPaePML4;
1307}
1308
1309
1310/**
1311 * Gets the current CR3 register value for the HC intermediate memory context.
1312 * @returns CR3 value.
1313 * @param pVM The VM handle.
1314 */
1315VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1316{
1317 switch (pVM->pgm.s.enmHostMode)
1318 {
1319 case SUPPAGINGMODE_32_BIT:
1320 case SUPPAGINGMODE_32_BIT_GLOBAL:
1321 return pVM->pgm.s.HCPhysInterPD;
1322
1323 case SUPPAGINGMODE_PAE:
1324 case SUPPAGINGMODE_PAE_GLOBAL:
1325 case SUPPAGINGMODE_PAE_NX:
1326 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1327 return pVM->pgm.s.HCPhysInterPaePDPT;
1328
1329 case SUPPAGINGMODE_AMD64:
1330 case SUPPAGINGMODE_AMD64_GLOBAL:
1331 case SUPPAGINGMODE_AMD64_NX:
1332 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1333 return pVM->pgm.s.HCPhysInterPaePDPT;
1334
1335 default:
1336 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1337 return ~0;
1338 }
1339}
1340
1341
1342/**
1343 * Gets the current CR3 register value for the GC intermediate memory context.
1344 * @returns CR3 value.
1345 * @param pVM The VM handle.
1346 */
1347VMMDECL(RTHCPHYS) PGMGetInterGCCR3(PVM pVM)
1348{
1349 switch (pVM->pgm.s.enmShadowMode)
1350 {
1351 case PGMMODE_32_BIT:
1352 return pVM->pgm.s.HCPhysInterPD;
1353
1354 case PGMMODE_PAE:
1355 case PGMMODE_PAE_NX:
1356 return pVM->pgm.s.HCPhysInterPaePDPT;
1357
1358 case PGMMODE_AMD64:
1359 case PGMMODE_AMD64_NX:
1360 return pVM->pgm.s.HCPhysInterPaePML4;
1361
1362 case PGMMODE_EPT:
1363 case PGMMODE_NESTED:
1364 return 0; /* not relevant */
1365
1366 default:
1367 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1368 return ~0;
1369 }
1370}
1371
1372
1373/**
1374 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1375 * @returns CR3 value.
1376 * @param pVM The VM handle.
1377 */
1378VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1379{
1380 return pVM->pgm.s.HCPhysInterPD;
1381}
1382
1383
1384/**
1385 * Gets the CR3 register value for the PAE intermediate memory context.
1386 * @returns CR3 value.
1387 * @param pVM The VM handle.
1388 */
1389VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1390{
1391 return pVM->pgm.s.HCPhysInterPaePDPT;
1392}
1393
1394
1395/**
1396 * Gets the CR3 register value for the AMD64 intermediate memory context.
1397 * @returns CR3 value.
1398 * @param pVM The VM handle.
1399 */
1400VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1401{
1402 return pVM->pgm.s.HCPhysInterPaePML4;
1403}
1404
1405
1406/**
1407 * Performs and schedules necessary updates following a CR3 load or reload.
1408 *
1409 * This will normally involve mapping the guest PD or nPDPT
1410 *
1411 * @returns VBox status code.
1412 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1413 * safely be ignored and overridden since the FF will be set too then.
1414 * @param pVM VM handle.
1415 * @param cr3 The new cr3.
1416 * @param fGlobal Indicates whether this is a global flush or not.
1417 */
1418VMMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1419{
1420 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1421
1422 /*
1423 * Always flag the necessary updates; necessary for hardware acceleration
1424 */
1425 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1426 if (fGlobal)
1427 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1428 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1429
1430 /*
1431 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1432 */
1433 int rc = VINF_SUCCESS;
1434 RTGCPHYS GCPhysCR3;
1435 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1436 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1437 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1438 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1439 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1440 else
1441 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1442 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1443 {
1444 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1445 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1446 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1447 {
1448 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1449 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1450 }
1451 if (fGlobal)
1452 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1453 else
1454 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1455 }
1456 else
1457 {
1458 /*
1459 * Check if we have a pending update of the CR3 monitoring.
1460 */
1461 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1462 {
1463 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1464 Assert(!pVM->pgm.s.fMappingsFixed);
1465 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1466 }
1467 if (fGlobal)
1468 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1469 else
1470 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1471 }
1472
1473 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1474 return rc;
1475}
1476
1477/**
1478 * Performs and schedules necessary updates following a CR3 load or reload,
1479 * without actually flushing the TLB as with PGMFlushTLB.
1480 *
1481 * This will normally involve mapping the guest PD or nPDPT
1482 *
1483 * @returns VBox status code.
1484 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1485 * safely be ignored and overridden since the FF will be set too then.
1486 * @param pVM VM handle.
1487 * @param cr3 The new cr3.
1488 */
1489VMMDECL(int) PGMUpdateCR3(PVM pVM, uint64_t cr3)
1490{
1491 LogFlow(("PGMUpdateCR3: cr3=%VX64 OldCr3=%VX64\n", cr3, pVM->pgm.s.GCPhysCR3));
1492
1493 /* We assume we're only called in nested paging mode. */
1494 Assert(pVM->pgm.s.fMappingsFixed);
1495 Assert(!(pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1496 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_NESTED || pVM->pgm.s.enmShadowMode == PGMMODE_EPT);
1497
1498 /*
1499 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1500 */
1501 int rc = VINF_SUCCESS;
1502 RTGCPHYS GCPhysCR3;
1503 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1504 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1505 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1506 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1507 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1508 else
1509 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1510 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1511 {
1512 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1513 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1514 }
1515 AssertRC(rc);
1516 return rc;
1517}
1518
1519/**
1520 * Synchronize the paging structures.
1521 *
1522 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1523 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1524 * in several places, most importantly whenever the CR3 is loaded.
1525 *
1526 * @returns VBox status code.
1527 * @param pVM The virtual machine.
1528 * @param cr0 Guest context CR0 register
1529 * @param cr3 Guest context CR3 register
1530 * @param cr4 Guest context CR4 register
1531 * @param fGlobal Including global page directories or not
1532 */
1533VMMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1534{
1535 /*
1536 * We might be called when we shouldn't.
1537 *
1538 * The mode switching will ensure that the PD is resynced
1539 * after every mode switch. So, if we find ourselves here
1540 * when in protected or real mode we can safely disable the
1541 * FF and return immediately.
1542 */
1543 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1544 {
1545 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1546 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1547 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1548 return VINF_SUCCESS;
1549 }
1550
1551 /* If global pages are not supported, then all flushes are global */
1552 if (!(cr4 & X86_CR4_PGE))
1553 fGlobal = true;
1554 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1555 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1556
1557 /*
1558 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1559 */
1560 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1561 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1562 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1563 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1564 if (rc == VINF_SUCCESS)
1565 {
1566 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1567 {
1568 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1569 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1570 }
1571
1572 /*
1573 * Check if we have a pending update of the CR3 monitoring.
1574 */
1575 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1576 {
1577 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1578 Assert(!pVM->pgm.s.fMappingsFixed);
1579 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1580 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1581 }
1582 }
1583
1584 /*
1585 * Now flush the CR3 (guest context).
1586 */
1587 if (rc == VINF_SUCCESS)
1588 PGM_INVL_GUEST_TLBS();
1589 return rc;
1590}
1591
1592
1593/**
1594 * Called whenever CR0 or CR4 in a way which may change
1595 * the paging mode.
1596 *
1597 * @returns VBox status code fit for scheduling in GC and R0.
1598 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1599 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1600 * @param pVM VM handle.
1601 * @param cr0 The new cr0.
1602 * @param cr4 The new cr4.
1603 * @param efer The new extended feature enable register.
1604 */
1605VMMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1606{
1607 PGMMODE enmGuestMode;
1608
1609 /*
1610 * Calc the new guest mode.
1611 */
1612 if (!(cr0 & X86_CR0_PE))
1613 enmGuestMode = PGMMODE_REAL;
1614 else if (!(cr0 & X86_CR0_PG))
1615 enmGuestMode = PGMMODE_PROTECTED;
1616 else if (!(cr4 & X86_CR4_PAE))
1617 enmGuestMode = PGMMODE_32_BIT;
1618 else if (!(efer & MSR_K6_EFER_LME))
1619 {
1620 if (!(efer & MSR_K6_EFER_NXE))
1621 enmGuestMode = PGMMODE_PAE;
1622 else
1623 enmGuestMode = PGMMODE_PAE_NX;
1624 }
1625 else
1626 {
1627 if (!(efer & MSR_K6_EFER_NXE))
1628 enmGuestMode = PGMMODE_AMD64;
1629 else
1630 enmGuestMode = PGMMODE_AMD64_NX;
1631 }
1632
1633 /*
1634 * Did it change?
1635 */
1636 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1637 return VINF_SUCCESS;
1638
1639 /* Flush the TLB */
1640 PGM_INVL_GUEST_TLBS();
1641
1642#ifdef IN_RING3
1643 return PGMR3ChangeMode(pVM, enmGuestMode);
1644#else
1645 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1646 return VINF_PGM_CHANGE_MODE;
1647#endif
1648}
1649
1650
1651/**
1652 * Gets the current guest paging mode.
1653 *
1654 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1655 *
1656 * @returns The current paging mode.
1657 * @param pVM The VM handle.
1658 */
1659VMMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1660{
1661 return pVM->pgm.s.enmGuestMode;
1662}
1663
1664
1665/**
1666 * Gets the current shadow paging mode.
1667 *
1668 * @returns The current paging mode.
1669 * @param pVM The VM handle.
1670 */
1671VMMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1672{
1673 return pVM->pgm.s.enmShadowMode;
1674}
1675
1676/**
1677 * Gets the current host paging mode.
1678 *
1679 * @returns The current paging mode.
1680 * @param pVM The VM handle.
1681 */
1682VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1683{
1684 switch (pVM->pgm.s.enmHostMode)
1685 {
1686 case SUPPAGINGMODE_32_BIT:
1687 case SUPPAGINGMODE_32_BIT_GLOBAL:
1688 return PGMMODE_32_BIT;
1689
1690 case SUPPAGINGMODE_PAE:
1691 case SUPPAGINGMODE_PAE_GLOBAL:
1692 return PGMMODE_PAE;
1693
1694 case SUPPAGINGMODE_PAE_NX:
1695 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1696 return PGMMODE_PAE_NX;
1697
1698 case SUPPAGINGMODE_AMD64:
1699 case SUPPAGINGMODE_AMD64_GLOBAL:
1700 return PGMMODE_AMD64;
1701
1702 case SUPPAGINGMODE_AMD64_NX:
1703 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1704 return PGMMODE_AMD64_NX;
1705
1706 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1707 }
1708
1709 return PGMMODE_INVALID;
1710}
1711
1712
1713/**
1714 * Get mode name.
1715 *
1716 * @returns read-only name string.
1717 * @param enmMode The mode which name is desired.
1718 */
1719VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1720{
1721 switch (enmMode)
1722 {
1723 case PGMMODE_REAL: return "Real";
1724 case PGMMODE_PROTECTED: return "Protected";
1725 case PGMMODE_32_BIT: return "32-bit";
1726 case PGMMODE_PAE: return "PAE";
1727 case PGMMODE_PAE_NX: return "PAE+NX";
1728 case PGMMODE_AMD64: return "AMD64";
1729 case PGMMODE_AMD64_NX: return "AMD64+NX";
1730 case PGMMODE_NESTED: return "Nested";
1731 case PGMMODE_EPT: return "EPT";
1732 default: return "unknown mode value";
1733 }
1734}
1735
1736
1737/**
1738 * Acquire the PGM lock.
1739 *
1740 * @returns VBox status code
1741 * @param pVM The VM to operate on.
1742 */
1743int pgmLock(PVM pVM)
1744{
1745 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1746#ifdef IN_GC
1747 if (rc == VERR_SEM_BUSY)
1748 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1749#elif defined(IN_RING0)
1750 if (rc == VERR_SEM_BUSY)
1751 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1752#endif
1753 AssertRC(rc);
1754 return rc;
1755}
1756
1757
1758/**
1759 * Release the PGM lock.
1760 *
1761 * @returns VBox status code
1762 * @param pVM The VM to operate on.
1763 */
1764void pgmUnlock(PVM pVM)
1765{
1766 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1767}
1768
1769#if defined(IN_GC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1770
1771/**
1772 * Temporarily maps one guest page specified by GC physical address.
1773 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1774 *
1775 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1776 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1777 *
1778 * @returns VBox status.
1779 * @param pVM VM handle.
1780 * @param GCPhys GC Physical address of the page.
1781 * @param ppv Where to store the address of the mapping.
1782 */
1783VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1784{
1785 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
1786
1787 /*
1788 * Get the ram range.
1789 */
1790 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1791 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1792 pRam = pRam->CTX_SUFF(pNext);
1793 if (!pRam)
1794 {
1795 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
1796 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1797 }
1798
1799 /*
1800 * Pass it on to PGMDynMapHCPage.
1801 */
1802 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1803 //Log(("PGMDynMapGCPage: GCPhys=%VGp HCPhys=%VHp\n", GCPhys, HCPhys));
1804 return PGMDynMapHCPage(pVM, HCPhys, ppv);
1805}
1806
1807
1808/**
1809 * Temporarily maps one guest page specified by unaligned GC physical address.
1810 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
1811 *
1812 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1813 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1814 *
1815 * The caller is aware that only the speicifed page is mapped and that really bad things
1816 * will happen if writing beyond the page!
1817 *
1818 * @returns VBox status.
1819 * @param pVM VM handle.
1820 * @param GCPhys GC Physical address within the page to be mapped.
1821 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
1822 */
1823VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
1824{
1825 /*
1826 * Get the ram range.
1827 */
1828 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1829 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
1830 pRam = pRam->CTX_SUFF(pNext);
1831 if (!pRam)
1832 {
1833 AssertMsgFailed(("Invalid physical address %VGp!\n", GCPhys));
1834 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1835 }
1836
1837 /*
1838 * Pass it on to PGMDynMapHCPage.
1839 */
1840 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
1841 int rc = PGMDynMapHCPage(pVM, HCPhys, ppv);
1842 if (RT_SUCCESS(rc))
1843 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
1844 return rc;
1845}
1846
1847
1848/**
1849 * Temporarily maps one host page specified by HC physical address.
1850 *
1851 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1852 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1853 *
1854 * @returns VBox status.
1855 * @param pVM VM handle.
1856 * @param HCPhys HC Physical address of the page.
1857 * @param ppv Where to store the address of the mapping. This is the
1858 * address of the PAGE not the exact address corresponding
1859 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
1860 * page offset.
1861 */
1862VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1863{
1864 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
1865# ifdef IN_GC
1866
1867 /*
1868 * Check the cache.
1869 */
1870 register unsigned iCache;
1871 if ( pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 0] == HCPhys
1872 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 1] == HCPhys
1873 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 2] == HCPhys
1874 || pVM->pgm.s.aHCPhysDynPageMapCache[iCache = 3] == HCPhys)
1875 {
1876 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
1877 {
1878 { 0, 5, 6, 7 },
1879 { 0, 1, 6, 7 },
1880 { 0, 1, 2, 7 },
1881 { 0, 1, 2, 3 },
1882 { 4, 1, 2, 3 },
1883 { 4, 5, 2, 3 },
1884 { 4, 5, 6, 3 },
1885 { 4, 5, 6, 7 },
1886 };
1887 Assert(RT_ELEMENTS(au8Trans) == 8);
1888 Assert(RT_ELEMENTS(au8Trans[0]) == 4);
1889 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
1890 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
1891 *ppv = pv;
1892 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
1893 //Log(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
1894 return VINF_SUCCESS;
1895 }
1896 Assert(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 4);
1897 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
1898
1899 /*
1900 * Update the page tables.
1901 */
1902 register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
1903 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
1904 Assert((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 8);
1905
1906 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
1907 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
1908 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
1909
1910 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
1911 *ppv = pv;
1912 ASMInvalidatePage(pv);
1913 Log4(("PGMGCDynMapHCPage: HCPhys=%VHp pv=%VGv iPage=%d\n", HCPhys, pv, iPage));
1914 return VINF_SUCCESS;
1915
1916#else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1917 AssertFailed();
1918 return VERR_NOT_IMPLEMENTED;
1919#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1920}
1921
1922
1923/**
1924 * Temporarily maps one host page specified by HC physical address, returning
1925 * pointer within the page.
1926 *
1927 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
1928 * reused after 8 mappings (or perhaps a few more if you score with the cache).
1929 *
1930 * @returns VBox status.
1931 * @param pVM VM handle.
1932 * @param HCPhys HC Physical address of the page.
1933 * @param ppv Where to store the address corresponding to HCPhys.
1934 */
1935VMMDECL(int) PGMDynMapHCPageOff(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1936{
1937 int rc = PGMDynMapHCPage(pVM, HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv);
1938 if (RT_SUCCESS(rc))
1939 *ppv = (void *)((uintptr_t)*ppv | (HCPhys & PAGE_OFFSET_MASK));
1940 return rc;
1941}
1942
1943#endif /* IN_GC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1944
1945#ifdef VBOX_STRICT
1946
1947/**
1948 * Asserts that there are no mapping conflicts.
1949 *
1950 * @returns Number of conflicts.
1951 * @param pVM The VM Handle.
1952 */
1953VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1954{
1955 unsigned cErrors = 0;
1956
1957 /*
1958 * Check for mapping conflicts.
1959 */
1960 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
1961 pMapping;
1962 pMapping = pMapping->CTX_SUFF(pNext))
1963 {
1964 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1965 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1966 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1967 GCPtr += PAGE_SIZE)
1968 {
1969 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1970 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1971 {
1972 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
1973 cErrors++;
1974 break;
1975 }
1976 }
1977 }
1978
1979 return cErrors;
1980}
1981
1982
1983/**
1984 * Asserts that everything related to the guest CR3 is correctly shadowed.
1985 *
1986 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1987 * and assert the correctness of the guest CR3 mapping before asserting that the
1988 * shadow page tables is in sync with the guest page tables.
1989 *
1990 * @returns Number of conflicts.
1991 * @param pVM The VM Handle.
1992 * @param cr3 The current guest CR3 register value.
1993 * @param cr4 The current guest CR4 register value.
1994 */
1995VMMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1996{
1997 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1998 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1999 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2000 return cErrors;
2001 return 0;
2002}
2003
2004#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette