VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 9021

Last change on this file since 9021 was 9021, checked in by vboxsync, 17 years ago

Nested paging updates. Extra paging mode added to prevent illegal changes to the shadow page table.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 43.8 KB
Line 
1/* $Id: PGMAll.cpp 9021 2008-05-21 14:38:13Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71/*
72 * Shadow - 32-bit mode
73 */
74#define PGM_SHW_TYPE PGM_TYPE_32BIT
75#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
76#include "PGMAllShw.h"
77
78/* Guest - real mode */
79#define PGM_GST_TYPE PGM_TYPE_REAL
80#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#include "PGMAllGst.h"
84#include "PGMAllBth.h"
85#undef BTH_PGMPOOLKIND_PT_FOR_PT
86#undef PGM_BTH_NAME
87#undef PGM_GST_TYPE
88#undef PGM_GST_NAME
89
90/* Guest - protected mode */
91#define PGM_GST_TYPE PGM_TYPE_PROT
92#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
93#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
94#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - 32-bit mode */
103#define PGM_GST_TYPE PGM_TYPE_32BIT
104#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
107#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
108#include "PGMAllGst.h"
109#include "PGMAllBth.h"
110#undef BTH_PGMPOOLKIND_PT_FOR_BIG
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
112#undef PGM_BTH_NAME
113#undef PGM_GST_TYPE
114#undef PGM_GST_NAME
115
116#undef PGM_SHW_TYPE
117#undef PGM_SHW_NAME
118
119
120/*
121 * Shadow - PAE mode
122 */
123#define PGM_SHW_TYPE PGM_TYPE_PAE
124#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
125#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
126#include "PGMAllShw.h"
127
128/* Guest - real mode */
129#define PGM_GST_TYPE PGM_TYPE_REAL
130#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
131#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
132#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
133#include "PGMAllBth.h"
134#undef BTH_PGMPOOLKIND_PT_FOR_PT
135#undef PGM_BTH_NAME
136#undef PGM_GST_TYPE
137#undef PGM_GST_NAME
138
139/* Guest - protected mode */
140#define PGM_GST_TYPE PGM_TYPE_PROT
141#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#include "PGMAllBth.h"
145#undef BTH_PGMPOOLKIND_PT_FOR_PT
146#undef PGM_BTH_NAME
147#undef PGM_GST_TYPE
148#undef PGM_GST_NAME
149
150/* Guest - 32-bit mode */
151#define PGM_GST_TYPE PGM_TYPE_32BIT
152#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
153#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
154#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
155#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
156#include "PGMAllBth.h"
157#undef BTH_PGMPOOLKIND_PT_FOR_BIG
158#undef BTH_PGMPOOLKIND_PT_FOR_PT
159#undef PGM_BTH_NAME
160#undef PGM_GST_TYPE
161#undef PGM_GST_NAME
162
163
164/* Guest - PAE mode */
165#define PGM_GST_TYPE PGM_TYPE_PAE
166#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
167#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
168#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
169#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
170#include "PGMAllGst.h"
171#include "PGMAllBth.h"
172#undef BTH_PGMPOOLKIND_PT_FOR_BIG
173#undef BTH_PGMPOOLKIND_PT_FOR_PT
174#undef PGM_BTH_NAME
175#undef PGM_GST_TYPE
176#undef PGM_GST_NAME
177
178#undef PGM_SHW_TYPE
179#undef PGM_SHW_NAME
180
181
182#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
183/*
184 * Shadow - AMD64 mode
185 */
186#define PGM_SHW_TYPE PGM_TYPE_AMD64
187#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
188#include "PGMAllShw.h"
189
190/* Guest - protected mode */
191#define PGM_GST_TYPE PGM_TYPE_PROT
192#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
193#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
194#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_PT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201/* Guest - AMD64 mode */
202#define PGM_GST_TYPE PGM_TYPE_AMD64
203#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
204#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
205#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
206#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
207#include "PGMAllGst.h"
208#include "PGMAllBth.h"
209#undef BTH_PGMPOOLKIND_PT_FOR_BIG
210#undef BTH_PGMPOOLKIND_PT_FOR_PT
211#undef PGM_BTH_NAME
212#undef PGM_GST_TYPE
213#undef PGM_GST_NAME
214
215#undef PGM_SHW_TYPE
216#undef PGM_SHW_NAME
217
218/*
219 * Shadow - Nested paging mode
220 */
221#define PGM_SHW_TYPE PGM_TYPE_NESTED
222#define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
223#include "PGMAllShw.h"
224
225/* Guest - real mode */
226#define PGM_GST_TYPE PGM_TYPE_REAL
227#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
228#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
229#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
230#include "PGMAllBth.h"
231#undef BTH_PGMPOOLKIND_PT_FOR_PT
232#undef PGM_BTH_NAME
233#undef PGM_GST_TYPE
234#undef PGM_GST_NAME
235
236/* Guest - protected mode */
237#define PGM_GST_TYPE PGM_TYPE_PROT
238#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
239#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
240#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
241#include "PGMAllBth.h"
242#undef BTH_PGMPOOLKIND_PT_FOR_PT
243#undef PGM_BTH_NAME
244#undef PGM_GST_TYPE
245#undef PGM_GST_NAME
246
247/* Guest - 32-bit mode */
248#define PGM_GST_TYPE PGM_TYPE_32BIT
249#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
250#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
251#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
252#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
253#include "PGMAllBth.h"
254#undef BTH_PGMPOOLKIND_PT_FOR_BIG
255#undef BTH_PGMPOOLKIND_PT_FOR_PT
256#undef PGM_BTH_NAME
257#undef PGM_GST_TYPE
258#undef PGM_GST_NAME
259
260/* Guest - PAE mode */
261#define PGM_GST_TYPE PGM_TYPE_PAE
262#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
263#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
264#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
265#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
266#include "PGMAllBth.h"
267#undef BTH_PGMPOOLKIND_PT_FOR_BIG
268#undef BTH_PGMPOOLKIND_PT_FOR_PT
269#undef PGM_BTH_NAME
270#undef PGM_GST_TYPE
271#undef PGM_GST_NAME
272
273/* Guest - AMD64 mode */
274#define PGM_GST_TYPE PGM_TYPE_AMD64
275#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
276#define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
277#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
278#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
279#include "PGMAllBth.h"
280#undef BTH_PGMPOOLKIND_PT_FOR_BIG
281#undef BTH_PGMPOOLKIND_PT_FOR_PT
282#undef PGM_BTH_NAME
283#undef PGM_GST_TYPE
284#undef PGM_GST_NAME
285
286#undef PGM_SHW_TYPE
287#undef PGM_SHW_NAME
288
289#endif
290
291
292/**
293 * #PF Handler.
294 *
295 * @returns VBox status code (appropriate for trap handling and GC return).
296 * @param pVM VM Handle.
297 * @param uErr The trap error code.
298 * @param pRegFrame Trap register frame.
299 * @param pvFault The fault address.
300 */
301PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
302{
303 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
304 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
305 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
306
307
308#ifdef VBOX_WITH_STATISTICS
309 /*
310 * Error code stats.
311 */
312 if (uErr & X86_TRAP_PF_US)
313 {
314 if (!(uErr & X86_TRAP_PF_P))
315 {
316 if (uErr & X86_TRAP_PF_RW)
317 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
318 else
319 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
320 }
321 else if (uErr & X86_TRAP_PF_RW)
322 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
323 else if (uErr & X86_TRAP_PF_RSVD)
324 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
325 else if (uErr & X86_TRAP_PF_ID)
326 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
327 else
328 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
329 }
330 else
331 { /* Supervisor */
332 if (!(uErr & X86_TRAP_PF_P))
333 {
334 if (uErr & X86_TRAP_PF_RW)
335 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
336 else
337 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
338 }
339 else if (uErr & X86_TRAP_PF_RW)
340 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
341 else if (uErr & X86_TRAP_PF_ID)
342 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
343 else if (uErr & X86_TRAP_PF_RSVD)
344 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
345 }
346#endif
347
348 /*
349 * Call the worker.
350 */
351 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
352 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
353 rc = VINF_SUCCESS;
354 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
355 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
356 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
357 return rc;
358}
359
360/**
361 * Prefetch a page
362 *
363 * Typically used to sync commonly used pages before entering raw mode
364 * after a CR3 reload.
365 *
366 * @returns VBox status code suitable for scheduling.
367 * @retval VINF_SUCCESS on success.
368 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
369 * @param pVM VM handle.
370 * @param GCPtrPage Page to invalidate.
371 */
372PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
373{
374 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
375 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
376 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
377 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
378 return rc;
379}
380
381
382/**
383 * Gets the mapping corresponding to the specified address (if any).
384 *
385 * @returns Pointer to the mapping.
386 * @returns NULL if not
387 *
388 * @param pVM The virtual machine.
389 * @param GCPtr The guest context pointer.
390 */
391PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
392{
393 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
394 while (pMapping)
395 {
396 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
397 break;
398 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
399 {
400 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
401 return pMapping;
402 }
403 pMapping = CTXALLSUFF(pMapping->pNext);
404 }
405 return NULL;
406}
407
408
409/**
410 * Verifies a range of pages for read or write access
411 *
412 * Only checks the guest's page tables
413 *
414 * @returns VBox status code.
415 * @param pVM VM handle.
416 * @param Addr Guest virtual address to check
417 * @param cbSize Access size
418 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
419 */
420PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
421{
422 /*
423 * Validate input.
424 */
425 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
426 {
427 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
428 return VERR_INVALID_PARAMETER;
429 }
430
431 uint64_t fPage;
432 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
433 if (VBOX_FAILURE(rc))
434 {
435 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
436 return VINF_EM_RAW_GUEST_TRAP;
437 }
438
439 /*
440 * Check if the access would cause a page fault
441 *
442 * Note that hypervisor page directories are not present in the guest's tables, so this check
443 * is sufficient.
444 */
445 bool fWrite = !!(fAccess & X86_PTE_RW);
446 bool fUser = !!(fAccess & X86_PTE_US);
447 if ( !(fPage & X86_PTE_P)
448 || (fWrite && !(fPage & X86_PTE_RW))
449 || (fUser && !(fPage & X86_PTE_US)) )
450 {
451 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
452 return VINF_EM_RAW_GUEST_TRAP;
453 }
454 if ( VBOX_SUCCESS(rc)
455 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
456 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
457 return rc;
458}
459
460
461/**
462 * Verifies a range of pages for read or write access
463 *
464 * Supports handling of pages marked for dirty bit tracking and CSAM
465 *
466 * @returns VBox status code.
467 * @param pVM VM handle.
468 * @param Addr Guest virtual address to check
469 * @param cbSize Access size
470 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
471 */
472PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
473{
474 /*
475 * Validate input.
476 */
477 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
478 {
479 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
480 return VERR_INVALID_PARAMETER;
481 }
482
483 uint64_t fPageGst;
484 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
485 if (VBOX_FAILURE(rc))
486 {
487 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
488 return VINF_EM_RAW_GUEST_TRAP;
489 }
490
491 /*
492 * Check if the access would cause a page fault
493 *
494 * Note that hypervisor page directories are not present in the guest's tables, so this check
495 * is sufficient.
496 */
497 const bool fWrite = !!(fAccess & X86_PTE_RW);
498 const bool fUser = !!(fAccess & X86_PTE_US);
499 if ( !(fPageGst & X86_PTE_P)
500 || (fWrite && !(fPageGst & X86_PTE_RW))
501 || (fUser && !(fPageGst & X86_PTE_US)) )
502 {
503 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
504 return VINF_EM_RAW_GUEST_TRAP;
505 }
506
507 if (!HWACCMIsNestedPagingActive(pVM))
508 {
509 /*
510 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
511 */
512 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
513 if ( rc == VERR_PAGE_NOT_PRESENT
514 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
515 {
516 /*
517 * Page is not present in our page tables.
518 * Try to sync it!
519 */
520 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
521 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
522 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
523 if (rc != VINF_SUCCESS)
524 return rc;
525 }
526 else
527 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
528 }
529
530#if 0 /* def VBOX_STRICT; triggers too often now */
531 /*
532 * This check is a bit paranoid, but useful.
533 */
534 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
535 uint64_t fPageShw;
536 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
537 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
538 || (fWrite && !(fPageShw & X86_PTE_RW))
539 || (fUser && !(fPageShw & X86_PTE_US)) )
540 {
541 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
542 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
543 return VINF_EM_RAW_GUEST_TRAP;
544 }
545#endif
546
547 if ( VBOX_SUCCESS(rc)
548 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
549 || Addr + cbSize < Addr))
550 {
551 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
552 for (;;)
553 {
554 Addr += PAGE_SIZE;
555 if (cbSize > PAGE_SIZE)
556 cbSize -= PAGE_SIZE;
557 else
558 cbSize = 1;
559 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
560 if (rc != VINF_SUCCESS)
561 break;
562 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
563 break;
564 }
565 }
566 return rc;
567}
568
569
570#ifndef IN_GC
571/**
572 * Emulation of the invlpg instruction (HC only actually).
573 *
574 * @returns VBox status code.
575 * @param pVM VM handle.
576 * @param GCPtrPage Page to invalidate.
577 * @remark ASSUMES the page table entry or page directory is
578 * valid. Fairly safe, but there could be edge cases!
579 * @todo Flush page or page directory only if necessary!
580 */
581PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
582{
583 int rc;
584
585 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
586
587 /** @todo merge PGMGCInvalidatePage with this one */
588
589#ifndef IN_RING3
590 /*
591 * Notify the recompiler so it can record this instruction.
592 * Failure happens when it's out of space. We'll return to HC in that case.
593 */
594 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
595 if (VBOX_FAILURE(rc))
596 return rc;
597#endif
598
599 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
600 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
601 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
602
603#ifndef IN_RING0
604 /*
605 * Check if we have a pending update of the CR3 monitoring.
606 */
607 if ( VBOX_SUCCESS(rc)
608 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
609 {
610 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
611 Assert(!pVM->pgm.s.fMappingsFixed);
612 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
613 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
614 }
615#endif
616
617#ifdef IN_RING3
618 /*
619 * Inform CSAM about the flush
620 */
621 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
622 CSAMR3FlushPage(pVM, GCPtrPage);
623#endif
624 return rc;
625}
626#endif
627
628
629/**
630 * Executes an instruction using the interpreter.
631 *
632 * @returns VBox status code (appropriate for trap handling and GC return).
633 * @param pVM VM handle.
634 * @param pRegFrame Register frame.
635 * @param pvFault Fault address.
636 */
637PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
638{
639 uint32_t cb;
640 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
641 if (rc == VERR_EM_INTERPRETER)
642 rc = VINF_EM_RAW_EMULATE_INSTR;
643 if (rc != VINF_SUCCESS)
644 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
645 return rc;
646}
647
648
649/**
650 * Gets effective page information (from the VMM page directory).
651 *
652 * @returns VBox status.
653 * @param pVM VM Handle.
654 * @param GCPtr Guest Context virtual address of the page.
655 * @param pfFlags Where to store the flags. These are X86_PTE_*.
656 * @param pHCPhys Where to store the HC physical address of the page.
657 * This is page aligned.
658 * @remark You should use PGMMapGetPage() for pages in a mapping.
659 */
660PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
661{
662 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
663}
664
665
666/**
667 * Sets (replaces) the page flags for a range of pages in the shadow context.
668 *
669 * @returns VBox status.
670 * @param pVM VM handle.
671 * @param GCPtr The address of the first page.
672 * @param cb The size of the range in bytes.
673 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
674 * @remark You must use PGMMapSetPage() for pages in a mapping.
675 */
676PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
677{
678 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
679}
680
681
682/**
683 * Modify page flags for a range of pages in the shadow context.
684 *
685 * The existing flags are ANDed with the fMask and ORed with the fFlags.
686 *
687 * @returns VBox status code.
688 * @param pVM VM handle.
689 * @param GCPtr Virtual address of the first page in the range.
690 * @param cb Size (in bytes) of the range to apply the modification to.
691 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
692 * @param fMask The AND mask - page flags X86_PTE_*.
693 * Be very CAREFUL when ~'ing constants which could be 32-bit!
694 * @remark You must use PGMMapModifyPage() for pages in a mapping.
695 */
696PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
697{
698 /*
699 * Validate input.
700 */
701 if (fFlags & X86_PTE_PAE_PG_MASK)
702 {
703 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
704 return VERR_INVALID_PARAMETER;
705 }
706 if (!cb)
707 {
708 AssertFailed();
709 return VERR_INVALID_PARAMETER;
710 }
711
712 /*
713 * Align the input.
714 */
715 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
716 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
717 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
718
719 /*
720 * Call worker.
721 */
722 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
723}
724
725#ifndef IN_GC
726/**
727 * Gets the SHADOW page directory pointer for the specified address. Allocates
728 * backing pages in case the PDPT or page dirctory is missing.
729 *
730 * @returns VBox status.
731 * @param pVM VM handle.
732 * @param GCPtr The address.
733 * @param ppPD Receives address of page directory
734 */
735PGMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)
736{
737 PPGM pPGM = &pVM->pgm.s;
738 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
739 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
740 PX86PML4E pPml4e;
741 PPGMPOOLPAGE pShwPage;
742 int rc;
743
744 Assert(!HWACCMIsNestedPagingActive(pVM));
745
746 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
747 if ( !pPml4e->n.u1Present
748 && !(pPml4e->u & X86_PML4E_PG_MASK))
749 {
750 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
751
752 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
753 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, PGMPOOL_IDX_PML4, iPml4e, &pShwPage);
754 if (rc == VERR_PGM_POOL_FLUSHED)
755 return VINF_PGM_SYNC_CR3;
756
757 AssertRCReturn(rc, rc);
758
759 /* The PDPT was cached or created; hook it up now. */
760 pPml4e->u |= pShwPage->Core.Key;
761 }
762 else
763 {
764 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
765 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
766 }
767
768 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
769 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
770 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
771
772 if ( !pPdpe->n.u1Present
773 && !(pPdpe->u & X86_PDPE_PG_MASK))
774 {
775 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
776 PX86PDPT pPdptGst;
777 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
778 AssertRCReturn(rc, rc);
779
780 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
781 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
782 if (rc == VERR_PGM_POOL_FLUSHED)
783 return VINF_PGM_SYNC_CR3;
784
785 AssertRCReturn(rc, rc);
786
787 /* The PDPT was cached or created; hook it up now. */
788 pPdpe->u |= pShwPage->Core.Key;
789 }
790 else
791 {
792 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
793 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
794 }
795
796 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
797 return VINF_SUCCESS;
798}
799#endif
800
801/**
802 * Gets effective Guest OS page information.
803 *
804 * When GCPtr is in a big page, the function will return as if it was a normal
805 * 4KB page. If the need for distinguishing between big and normal page becomes
806 * necessary at a later point, a PGMGstGetPage() will be created for that
807 * purpose.
808 *
809 * @returns VBox status.
810 * @param pVM VM Handle.
811 * @param GCPtr Guest Context virtual address of the page.
812 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
813 * @param pGCPhys Where to store the GC physical address of the page.
814 * This is page aligned. The fact that the
815 */
816PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
817{
818 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
819}
820
821
822/**
823 * Checks if the page is present.
824 *
825 * @returns true if the page is present.
826 * @returns false if the page is not present.
827 * @param pVM The VM handle.
828 * @param GCPtr Address within the page.
829 */
830PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
831{
832 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
833 return VBOX_SUCCESS(rc);
834}
835
836
837/**
838 * Sets (replaces) the page flags for a range of pages in the guest's tables.
839 *
840 * @returns VBox status.
841 * @param pVM VM handle.
842 * @param GCPtr The address of the first page.
843 * @param cb The size of the range in bytes.
844 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
845 */
846PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
847{
848 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
849}
850
851
852/**
853 * Modify page flags for a range of pages in the guest's tables
854 *
855 * The existing flags are ANDed with the fMask and ORed with the fFlags.
856 *
857 * @returns VBox status code.
858 * @param pVM VM handle.
859 * @param GCPtr Virtual address of the first page in the range.
860 * @param cb Size (in bytes) of the range to apply the modification to.
861 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
862 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
863 * Be very CAREFUL when ~'ing constants which could be 32-bit!
864 */
865PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
866{
867 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
868
869 /*
870 * Validate input.
871 */
872 if (fFlags & X86_PTE_PAE_PG_MASK)
873 {
874 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
875 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
876 return VERR_INVALID_PARAMETER;
877 }
878
879 if (!cb)
880 {
881 AssertFailed();
882 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
883 return VERR_INVALID_PARAMETER;
884 }
885
886 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
887
888 /*
889 * Adjust input.
890 */
891 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
892 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
893 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
894
895 /*
896 * Call worker.
897 */
898 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
899
900 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
901 return rc;
902}
903
904
905/**
906 * Gets the current CR3 register value for the shadow memory context.
907 * @returns CR3 value.
908 * @param pVM The VM handle.
909 */
910PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
911{
912 switch (pVM->pgm.s.enmShadowMode)
913 {
914 case PGMMODE_32_BIT:
915 return pVM->pgm.s.HCPhys32BitPD;
916
917 case PGMMODE_PAE:
918 case PGMMODE_PAE_NX:
919 return pVM->pgm.s.HCPhysPaePDPT;
920
921 case PGMMODE_AMD64:
922 case PGMMODE_AMD64_NX:
923 return pVM->pgm.s.HCPhysPaePML4;
924
925 default:
926 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
927 return ~0;
928 }
929}
930
931
932/**
933 * Gets the CR3 register value for the 32-Bit shadow memory context.
934 * @returns CR3 value.
935 * @param pVM The VM handle.
936 */
937PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
938{
939 return pVM->pgm.s.HCPhys32BitPD;
940}
941
942
943/**
944 * Gets the CR3 register value for the PAE shadow memory context.
945 * @returns CR3 value.
946 * @param pVM The VM handle.
947 */
948PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
949{
950 return pVM->pgm.s.HCPhysPaePDPT;
951}
952
953
954/**
955 * Gets the CR3 register value for the AMD64 shadow memory context.
956 * @returns CR3 value.
957 * @param pVM The VM handle.
958 */
959PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
960{
961 return pVM->pgm.s.HCPhysPaePML4;
962}
963
964
965/**
966 * Gets the current CR3 register value for the HC intermediate memory context.
967 * @returns CR3 value.
968 * @param pVM The VM handle.
969 */
970PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
971{
972 switch (pVM->pgm.s.enmHostMode)
973 {
974 case SUPPAGINGMODE_32_BIT:
975 case SUPPAGINGMODE_32_BIT_GLOBAL:
976 return pVM->pgm.s.HCPhysInterPD;
977
978 case SUPPAGINGMODE_PAE:
979 case SUPPAGINGMODE_PAE_GLOBAL:
980 case SUPPAGINGMODE_PAE_NX:
981 case SUPPAGINGMODE_PAE_GLOBAL_NX:
982 return pVM->pgm.s.HCPhysInterPaePDPT;
983
984 case SUPPAGINGMODE_AMD64:
985 case SUPPAGINGMODE_AMD64_GLOBAL:
986 case SUPPAGINGMODE_AMD64_NX:
987 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
988 return pVM->pgm.s.HCPhysInterPaePDPT;
989
990 default:
991 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
992 return ~0;
993 }
994}
995
996
997/**
998 * Gets the current CR3 register value for the GC intermediate memory context.
999 * @returns CR3 value.
1000 * @param pVM The VM handle.
1001 */
1002PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
1003{
1004 switch (pVM->pgm.s.enmShadowMode)
1005 {
1006 case PGMMODE_32_BIT:
1007 return pVM->pgm.s.HCPhysInterPD;
1008
1009 case PGMMODE_PAE:
1010 case PGMMODE_PAE_NX:
1011 return pVM->pgm.s.HCPhysInterPaePDPT;
1012
1013 case PGMMODE_AMD64:
1014 case PGMMODE_AMD64_NX:
1015 return pVM->pgm.s.HCPhysInterPaePML4;
1016
1017 default:
1018 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
1019 return ~0;
1020 }
1021}
1022
1023
1024/**
1025 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1026 * @returns CR3 value.
1027 * @param pVM The VM handle.
1028 */
1029PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
1030{
1031 return pVM->pgm.s.HCPhysInterPD;
1032}
1033
1034
1035/**
1036 * Gets the CR3 register value for the PAE intermediate memory context.
1037 * @returns CR3 value.
1038 * @param pVM The VM handle.
1039 */
1040PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
1041{
1042 return pVM->pgm.s.HCPhysInterPaePDPT;
1043}
1044
1045
1046/**
1047 * Gets the CR3 register value for the AMD64 intermediate memory context.
1048 * @returns CR3 value.
1049 * @param pVM The VM handle.
1050 */
1051PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
1052{
1053 return pVM->pgm.s.HCPhysInterPaePML4;
1054}
1055
1056
1057/**
1058 * Performs and schedules necessary updates following a CR3 load or reload.
1059 *
1060 * This will normally involve mapping the guest PD or nPDPT
1061 *
1062 * @returns VBox status code.
1063 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1064 * safely be ignored and overridden since the FF will be set too then.
1065 * @param pVM VM handle.
1066 * @param cr3 The new cr3.
1067 * @param fGlobal Indicates whether this is a global flush or not.
1068 */
1069PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
1070{
1071 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
1072
1073 /*
1074 * Always flag the necessary updates; necessary for hardware acceleration
1075 */
1076 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1077 if (fGlobal)
1078 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1079 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1080
1081 /*
1082 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1083 */
1084 int rc = VINF_SUCCESS;
1085 RTGCPHYS GCPhysCR3;
1086 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1087 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1088 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1089 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1090 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1091 else
1092 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1093 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1094 {
1095 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1096 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1097 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1098 {
1099 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1100 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1101 }
1102 if (fGlobal)
1103 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1104 else
1105 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1106 }
1107 else
1108 {
1109 /*
1110 * Check if we have a pending update of the CR3 monitoring.
1111 */
1112 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1113 {
1114 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1115 Assert(!pVM->pgm.s.fMappingsFixed);
1116 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1117 }
1118 if (fGlobal)
1119 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1120 else
1121 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1122 }
1123
1124 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1125 return rc;
1126}
1127
1128
1129/**
1130 * Synchronize the paging structures.
1131 *
1132 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1133 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1134 * in several places, most importantly whenever the CR3 is loaded.
1135 *
1136 * @returns VBox status code.
1137 * @param pVM The virtual machine.
1138 * @param cr0 Guest context CR0 register
1139 * @param cr3 Guest context CR3 register
1140 * @param cr4 Guest context CR4 register
1141 * @param fGlobal Including global page directories or not
1142 */
1143PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1144{
1145 /*
1146 * We might be called when we shouldn't.
1147 *
1148 * The mode switching will ensure that the PD is resynced
1149 * after every mode switch. So, if we find ourselves here
1150 * when in protected or real mode we can safely disable the
1151 * FF and return immediately.
1152 */
1153 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1154 {
1155 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1156 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1157 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1158 return VINF_SUCCESS;
1159 }
1160
1161 /* If global pages are not supported, then all flushes are global */
1162 if (!(cr4 & X86_CR4_PGE))
1163 fGlobal = true;
1164 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1165 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1166
1167 /*
1168 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1169 */
1170 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1171 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1172 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1173 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1174 if (rc == VINF_SUCCESS)
1175 {
1176 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1177 {
1178 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1179 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1180 }
1181
1182 /*
1183 * Check if we have a pending update of the CR3 monitoring.
1184 */
1185 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1186 {
1187 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1188 Assert(!pVM->pgm.s.fMappingsFixed);
1189 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1190 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1191 }
1192 }
1193
1194 /*
1195 * Now flush the CR3 (guest context).
1196 */
1197 if (rc == VINF_SUCCESS)
1198 PGM_INVL_GUEST_TLBS();
1199 return rc;
1200}
1201
1202
1203/**
1204 * Called whenever CR0 or CR4 in a way which may change
1205 * the paging mode.
1206 *
1207 * @returns VBox status code fit for scheduling in GC and R0.
1208 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1209 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1210 * @param pVM VM handle.
1211 * @param cr0 The new cr0.
1212 * @param cr4 The new cr4.
1213 * @param efer The new extended feature enable register.
1214 */
1215PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1216{
1217 PGMMODE enmGuestMode;
1218
1219 /*
1220 * Calc the new guest mode.
1221 */
1222 if (!(cr0 & X86_CR0_PE))
1223 enmGuestMode = PGMMODE_REAL;
1224 else if (!(cr0 & X86_CR0_PG))
1225 enmGuestMode = PGMMODE_PROTECTED;
1226 else if (!(cr4 & X86_CR4_PAE))
1227 enmGuestMode = PGMMODE_32_BIT;
1228 else if (!(efer & MSR_K6_EFER_LME))
1229 {
1230 if (!(efer & MSR_K6_EFER_NXE))
1231 enmGuestMode = PGMMODE_PAE;
1232 else
1233 enmGuestMode = PGMMODE_PAE_NX;
1234 }
1235 else
1236 {
1237 if (!(efer & MSR_K6_EFER_NXE))
1238 enmGuestMode = PGMMODE_AMD64;
1239 else
1240 enmGuestMode = PGMMODE_AMD64_NX;
1241 }
1242
1243 /*
1244 * Did it change?
1245 */
1246 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1247 return VINF_SUCCESS;
1248#ifdef IN_RING3
1249 return pgmR3ChangeMode(pVM, enmGuestMode);
1250#else
1251 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1252 return VINF_PGM_CHANGE_MODE;
1253#endif
1254}
1255
1256
1257/**
1258 * Gets the current guest paging mode.
1259 *
1260 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1261 *
1262 * @returns The current paging mode.
1263 * @param pVM The VM handle.
1264 */
1265PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1266{
1267 return pVM->pgm.s.enmGuestMode;
1268}
1269
1270
1271/**
1272 * Gets the current shadow paging mode.
1273 *
1274 * @returns The current paging mode.
1275 * @param pVM The VM handle.
1276 */
1277PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1278{
1279 return pVM->pgm.s.enmShadowMode;
1280}
1281
1282/**
1283 * Gets the current host paging mode.
1284 *
1285 * @returns The current paging mode.
1286 * @param pVM The VM handle.
1287 */
1288PGMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1289{
1290 switch (pVM->pgm.s.enmHostMode)
1291 {
1292 case SUPPAGINGMODE_32_BIT:
1293 case SUPPAGINGMODE_32_BIT_GLOBAL:
1294 return PGMMODE_32_BIT;
1295
1296 case SUPPAGINGMODE_PAE:
1297 case SUPPAGINGMODE_PAE_GLOBAL:
1298 return PGMMODE_PAE;
1299
1300 case SUPPAGINGMODE_PAE_NX:
1301 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1302 return PGMMODE_PAE_NX;
1303
1304 case SUPPAGINGMODE_AMD64:
1305 case SUPPAGINGMODE_AMD64_GLOBAL:
1306 return PGMMODE_AMD64;
1307
1308 case SUPPAGINGMODE_AMD64_NX:
1309 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1310 return PGMMODE_AMD64_NX;
1311
1312 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1313 }
1314
1315 return PGMMODE_INVALID;
1316}
1317
1318
1319/**
1320 * Get mode name.
1321 *
1322 * @returns read-only name string.
1323 * @param enmMode The mode which name is desired.
1324 */
1325PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1326{
1327 switch (enmMode)
1328 {
1329 case PGMMODE_REAL: return "real";
1330 case PGMMODE_PROTECTED: return "protected";
1331 case PGMMODE_32_BIT: return "32-bit";
1332 case PGMMODE_PAE: return "PAE";
1333 case PGMMODE_PAE_NX: return "PAE+NX";
1334 case PGMMODE_AMD64: return "AMD64";
1335 case PGMMODE_AMD64_NX: return "AMD64+NX";
1336 default: return "unknown mode value";
1337 }
1338}
1339
1340
1341/**
1342 * Acquire the PGM lock.
1343 *
1344 * @returns VBox status code
1345 * @param pVM The VM to operate on.
1346 */
1347int pgmLock(PVM pVM)
1348{
1349 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1350#ifdef IN_GC
1351 if (rc == VERR_SEM_BUSY)
1352 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1353#elif defined(IN_RING0)
1354 if (rc == VERR_SEM_BUSY)
1355 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1356#endif
1357 AssertRC(rc);
1358 return rc;
1359}
1360
1361
1362/**
1363 * Release the PGM lock.
1364 *
1365 * @returns VBox status code
1366 * @param pVM The VM to operate on.
1367 */
1368void pgmUnlock(PVM pVM)
1369{
1370 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1371}
1372
1373
1374#ifdef VBOX_STRICT
1375
1376/**
1377 * Asserts that there are no mapping conflicts.
1378 *
1379 * @returns Number of conflicts.
1380 * @param pVM The VM Handle.
1381 */
1382PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1383{
1384 unsigned cErrors = 0;
1385
1386 /*
1387 * Check for mapping conflicts.
1388 */
1389 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1390 pMapping;
1391 pMapping = CTXALLSUFF(pMapping->pNext))
1392 {
1393 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1394 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1395 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1396 GCPtr += PAGE_SIZE)
1397 {
1398 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1399 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1400 {
1401 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1402 cErrors++;
1403 break;
1404 }
1405 }
1406 }
1407
1408 return cErrors;
1409}
1410
1411
1412/**
1413 * Asserts that everything related to the guest CR3 is correctly shadowed.
1414 *
1415 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1416 * and assert the correctness of the guest CR3 mapping before asserting that the
1417 * shadow page tables is in sync with the guest page tables.
1418 *
1419 * @returns Number of conflicts.
1420 * @param pVM The VM Handle.
1421 * @param cr3 The current guest CR3 register value.
1422 * @param cr4 The current guest CR4 register value.
1423 */
1424PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1425{
1426 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1427 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1428 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1429 return cErrors;
1430 return 0;
1431}
1432
1433#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette