VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 8984

Last change on this file since 8984 was 8965, checked in by vboxsync, 17 years ago

Nested paging updates

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.9 KB
Line 
1/* $Id: PGMAll.cpp 8965 2008-05-20 15:41:55Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71#if 1///@todo ndef RT_ARCH_AMD64
72/*
73 * Shadow - 32-bit mode
74 */
75#define PGM_SHW_TYPE PGM_TYPE_32BIT
76#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
77#include "PGMAllShw.h"
78
79/* Guest - real mode */
80#define PGM_GST_TYPE PGM_TYPE_REAL
81#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
82#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
83#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
84#include "PGMAllGst.h"
85#include "PGMAllBth.h"
86#undef BTH_PGMPOOLKIND_PT_FOR_PT
87#undef PGM_BTH_NAME
88#undef PGM_GST_TYPE
89#undef PGM_GST_NAME
90
91/* Guest - protected mode */
92#define PGM_GST_TYPE PGM_TYPE_PROT
93#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
94#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
95#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
96#include "PGMAllGst.h"
97#include "PGMAllBth.h"
98#undef BTH_PGMPOOLKIND_PT_FOR_PT
99#undef PGM_BTH_NAME
100#undef PGM_GST_TYPE
101#undef PGM_GST_NAME
102
103/* Guest - 32-bit mode */
104#define PGM_GST_TYPE PGM_TYPE_32BIT
105#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
106#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
107#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
108#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
109#include "PGMAllGst.h"
110#include "PGMAllBth.h"
111#undef BTH_PGMPOOLKIND_PT_FOR_BIG
112#undef BTH_PGMPOOLKIND_PT_FOR_PT
113#undef PGM_BTH_NAME
114#undef PGM_GST_TYPE
115#undef PGM_GST_NAME
116
117#undef PGM_SHW_TYPE
118#undef PGM_SHW_NAME
119#endif /* !RT_ARCH_AMD64 */
120
121
122/*
123 * Shadow - PAE mode
124 */
125#define PGM_SHW_TYPE PGM_TYPE_PAE
126#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
127#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
128#include "PGMAllShw.h"
129
130/* Guest - real mode */
131#define PGM_GST_TYPE PGM_TYPE_REAL
132#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
133#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
134#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
135#include "PGMAllBth.h"
136#undef BTH_PGMPOOLKIND_PT_FOR_PT
137#undef PGM_BTH_NAME
138#undef PGM_GST_TYPE
139#undef PGM_GST_NAME
140
141/* Guest - protected mode */
142#define PGM_GST_TYPE PGM_TYPE_PROT
143#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
144#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
145#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
146#include "PGMAllBth.h"
147#undef BTH_PGMPOOLKIND_PT_FOR_PT
148#undef PGM_BTH_NAME
149#undef PGM_GST_TYPE
150#undef PGM_GST_NAME
151
152/* Guest - 32-bit mode */
153#define PGM_GST_TYPE PGM_TYPE_32BIT
154#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
155#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
156#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
157#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
158#include "PGMAllBth.h"
159#undef BTH_PGMPOOLKIND_PT_FOR_BIG
160#undef BTH_PGMPOOLKIND_PT_FOR_PT
161#undef PGM_BTH_NAME
162#undef PGM_GST_TYPE
163#undef PGM_GST_NAME
164
165
166/* Guest - PAE mode */
167#define PGM_GST_TYPE PGM_TYPE_PAE
168#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
169#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
170#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
171#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
172#include "PGMAllGst.h"
173#include "PGMAllBth.h"
174#undef BTH_PGMPOOLKIND_PT_FOR_BIG
175#undef BTH_PGMPOOLKIND_PT_FOR_PT
176#undef PGM_BTH_NAME
177#undef PGM_GST_TYPE
178#undef PGM_GST_NAME
179
180#undef PGM_SHW_TYPE
181#undef PGM_SHW_NAME
182
183
184#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
185/*
186 * Shadow - AMD64 mode
187 */
188#define PGM_SHW_TYPE PGM_TYPE_AMD64
189#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
190#include "PGMAllShw.h"
191
192/* Guest - AMD64 mode */
193#define PGM_GST_TYPE PGM_TYPE_AMD64
194#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
195#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
196#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
197#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
198#include "PGMAllGst.h"
199#include "PGMAllBth.h"
200#undef BTH_PGMPOOLKIND_PT_FOR_BIG
201#undef BTH_PGMPOOLKIND_PT_FOR_PT
202#undef PGM_BTH_NAME
203#undef PGM_GST_TYPE
204#undef PGM_GST_NAME
205
206#undef PGM_SHW_TYPE
207#undef PGM_SHW_NAME
208#endif
209
210
211/**
212 * #PF Handler.
213 *
214 * @returns VBox status code (appropriate for trap handling and GC return).
215 * @param pVM VM Handle.
216 * @param uErr The trap error code.
217 * @param pRegFrame Trap register frame.
218 * @param pvFault The fault address.
219 */
220PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
221{
222 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
223 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
224 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
225
226
227#ifdef VBOX_WITH_STATISTICS
228 /*
229 * Error code stats.
230 */
231 if (uErr & X86_TRAP_PF_US)
232 {
233 if (!(uErr & X86_TRAP_PF_P))
234 {
235 if (uErr & X86_TRAP_PF_RW)
236 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
237 else
238 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
239 }
240 else if (uErr & X86_TRAP_PF_RW)
241 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
242 else if (uErr & X86_TRAP_PF_RSVD)
243 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
244 else if (uErr & X86_TRAP_PF_ID)
245 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
246 else
247 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
248 }
249 else
250 { /* Supervisor */
251 if (!(uErr & X86_TRAP_PF_P))
252 {
253 if (uErr & X86_TRAP_PF_RW)
254 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
255 else
256 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
257 }
258 else if (uErr & X86_TRAP_PF_RW)
259 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
260 else if (uErr & X86_TRAP_PF_ID)
261 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
262 else if (uErr & X86_TRAP_PF_RSVD)
263 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
264 }
265#endif
266
267 /*
268 * Call the worker.
269 */
270 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
271 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
272 rc = VINF_SUCCESS;
273 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
274 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
275 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
276 return rc;
277}
278
279/**
280 * Prefetch a page
281 *
282 * Typically used to sync commonly used pages before entering raw mode
283 * after a CR3 reload.
284 *
285 * @returns VBox status code suitable for scheduling.
286 * @retval VINF_SUCCESS on success.
287 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
288 * @param pVM VM handle.
289 * @param GCPtrPage Page to invalidate.
290 */
291PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
292{
293 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
294 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
295 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
296 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
297 return rc;
298}
299
300
301/**
302 * Gets the mapping corresponding to the specified address (if any).
303 *
304 * @returns Pointer to the mapping.
305 * @returns NULL if not
306 *
307 * @param pVM The virtual machine.
308 * @param GCPtr The guest context pointer.
309 */
310PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
311{
312 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
313 while (pMapping)
314 {
315 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
316 break;
317 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
318 {
319 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
320 return pMapping;
321 }
322 pMapping = CTXALLSUFF(pMapping->pNext);
323 }
324 return NULL;
325}
326
327
328/**
329 * Verifies a range of pages for read or write access
330 *
331 * Only checks the guest's page tables
332 *
333 * @returns VBox status code.
334 * @param pVM VM handle.
335 * @param Addr Guest virtual address to check
336 * @param cbSize Access size
337 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
338 */
339PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
340{
341 /*
342 * Validate input.
343 */
344 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
345 {
346 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
347 return VERR_INVALID_PARAMETER;
348 }
349
350 uint64_t fPage;
351 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
352 if (VBOX_FAILURE(rc))
353 {
354 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
355 return VINF_EM_RAW_GUEST_TRAP;
356 }
357
358 /*
359 * Check if the access would cause a page fault
360 *
361 * Note that hypervisor page directories are not present in the guest's tables, so this check
362 * is sufficient.
363 */
364 bool fWrite = !!(fAccess & X86_PTE_RW);
365 bool fUser = !!(fAccess & X86_PTE_US);
366 if ( !(fPage & X86_PTE_P)
367 || (fWrite && !(fPage & X86_PTE_RW))
368 || (fUser && !(fPage & X86_PTE_US)) )
369 {
370 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
371 return VINF_EM_RAW_GUEST_TRAP;
372 }
373 if ( VBOX_SUCCESS(rc)
374 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
375 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
376 return rc;
377}
378
379
380/**
381 * Verifies a range of pages for read or write access
382 *
383 * Supports handling of pages marked for dirty bit tracking and CSAM
384 *
385 * @returns VBox status code.
386 * @param pVM VM handle.
387 * @param Addr Guest virtual address to check
388 * @param cbSize Access size
389 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
390 */
391PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
392{
393 /*
394 * Validate input.
395 */
396 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
397 {
398 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
399 return VERR_INVALID_PARAMETER;
400 }
401
402 uint64_t fPageGst;
403 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
404 if (VBOX_FAILURE(rc))
405 {
406 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
407 return VINF_EM_RAW_GUEST_TRAP;
408 }
409
410 /*
411 * Check if the access would cause a page fault
412 *
413 * Note that hypervisor page directories are not present in the guest's tables, so this check
414 * is sufficient.
415 */
416 const bool fWrite = !!(fAccess & X86_PTE_RW);
417 const bool fUser = !!(fAccess & X86_PTE_US);
418 if ( !(fPageGst & X86_PTE_P)
419 || (fWrite && !(fPageGst & X86_PTE_RW))
420 || (fUser && !(fPageGst & X86_PTE_US)) )
421 {
422 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
423 return VINF_EM_RAW_GUEST_TRAP;
424 }
425
426 /*
427 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
428 */
429 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
430 if ( rc == VERR_PAGE_NOT_PRESENT
431 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
432 {
433 /*
434 * Page is not present in our page tables.
435 * Try to sync it!
436 */
437 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
438 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
439 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
440 if (rc != VINF_SUCCESS)
441 return rc;
442 }
443 else
444 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
445
446#if 0 /* def VBOX_STRICT; triggers too often now */
447 /*
448 * This check is a bit paranoid, but useful.
449 */
450 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
451 uint64_t fPageShw;
452 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
453 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
454 || (fWrite && !(fPageShw & X86_PTE_RW))
455 || (fUser && !(fPageShw & X86_PTE_US)) )
456 {
457 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
458 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
459 return VINF_EM_RAW_GUEST_TRAP;
460 }
461#endif
462
463 if ( VBOX_SUCCESS(rc)
464 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
465 || Addr + cbSize < Addr))
466 {
467 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
468 for (;;)
469 {
470 Addr += PAGE_SIZE;
471 if (cbSize > PAGE_SIZE)
472 cbSize -= PAGE_SIZE;
473 else
474 cbSize = 1;
475 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
476 if (rc != VINF_SUCCESS)
477 break;
478 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
479 break;
480 }
481 }
482 return rc;
483}
484
485
486#ifndef IN_GC
487/**
488 * Emulation of the invlpg instruction (HC only actually).
489 *
490 * @returns VBox status code.
491 * @param pVM VM handle.
492 * @param GCPtrPage Page to invalidate.
493 * @remark ASSUMES the page table entry or page directory is
494 * valid. Fairly safe, but there could be edge cases!
495 * @todo Flush page or page directory only if necessary!
496 */
497PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
498{
499 int rc;
500
501 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
502
503 /** @todo merge PGMGCInvalidatePage with this one */
504
505#ifndef IN_RING3
506 /*
507 * Notify the recompiler so it can record this instruction.
508 * Failure happens when it's out of space. We'll return to HC in that case.
509 */
510 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
511 if (VBOX_FAILURE(rc))
512 return rc;
513#endif
514
515 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
516 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
517 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
518
519#ifndef IN_RING0
520 /*
521 * Check if we have a pending update of the CR3 monitoring.
522 */
523 if ( VBOX_SUCCESS(rc)
524 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
525 {
526 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
527 Assert(!pVM->pgm.s.fMappingsFixed);
528 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
529 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
530 }
531#endif
532
533#ifdef IN_RING3
534 /*
535 * Inform CSAM about the flush
536 */
537 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
538 CSAMR3FlushPage(pVM, GCPtrPage);
539#endif
540 return rc;
541}
542#endif
543
544
545/**
546 * Executes an instruction using the interpreter.
547 *
548 * @returns VBox status code (appropriate for trap handling and GC return).
549 * @param pVM VM handle.
550 * @param pRegFrame Register frame.
551 * @param pvFault Fault address.
552 */
553PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
554{
555 uint32_t cb;
556 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
557 if (rc == VERR_EM_INTERPRETER)
558 rc = VINF_EM_RAW_EMULATE_INSTR;
559 if (rc != VINF_SUCCESS)
560 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
561 return rc;
562}
563
564
565/**
566 * Gets effective page information (from the VMM page directory).
567 *
568 * @returns VBox status.
569 * @param pVM VM Handle.
570 * @param GCPtr Guest Context virtual address of the page.
571 * @param pfFlags Where to store the flags. These are X86_PTE_*.
572 * @param pHCPhys Where to store the HC physical address of the page.
573 * This is page aligned.
574 * @remark You should use PGMMapGetPage() for pages in a mapping.
575 */
576PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
577{
578 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
579}
580
581
582/**
583 * Sets (replaces) the page flags for a range of pages in the shadow context.
584 *
585 * @returns VBox status.
586 * @param pVM VM handle.
587 * @param GCPtr The address of the first page.
588 * @param cb The size of the range in bytes.
589 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
590 * @remark You must use PGMMapSetPage() for pages in a mapping.
591 */
592PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
593{
594 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
595}
596
597
598/**
599 * Modify page flags for a range of pages in the shadow context.
600 *
601 * The existing flags are ANDed with the fMask and ORed with the fFlags.
602 *
603 * @returns VBox status code.
604 * @param pVM VM handle.
605 * @param GCPtr Virtual address of the first page in the range.
606 * @param cb Size (in bytes) of the range to apply the modification to.
607 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
608 * @param fMask The AND mask - page flags X86_PTE_*.
609 * Be very CAREFUL when ~'ing constants which could be 32-bit!
610 * @remark You must use PGMMapModifyPage() for pages in a mapping.
611 */
612PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
613{
614 /*
615 * Validate input.
616 */
617 if (fFlags & X86_PTE_PAE_PG_MASK)
618 {
619 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
620 return VERR_INVALID_PARAMETER;
621 }
622 if (!cb)
623 {
624 AssertFailed();
625 return VERR_INVALID_PARAMETER;
626 }
627
628 /*
629 * Align the input.
630 */
631 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
632 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
633 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
634
635 /*
636 * Call worker.
637 */
638 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
639}
640
641#ifndef IN_GC
642/**
643 * Gets the SHADOW page directory pointer for the specified address. Allocates
644 * backing pages in case the PDPT or page dirctory is missing.
645 *
646 * @returns VBox status.
647 * @param pVM VM handle.
648 * @param GCPtr The address.
649 * @param ppPD Receives address of page directory
650 */
651PGMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)
652{
653 PPGM pPGM = &pVM->pgm.s;
654 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
655 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
656 PX86PML4E pPml4e;
657 PPGMPOOLPAGE pShwPage;
658 int rc;
659
660 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
661 if ( !pPml4e->n.u1Present
662 && !(pPml4e->u & X86_PML4E_PG_MASK))
663 {
664 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
665
666 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
667 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, PGMPOOL_IDX_PML4, iPml4e, &pShwPage);
668 if (rc == VERR_PGM_POOL_FLUSHED)
669 return VINF_PGM_SYNC_CR3;
670
671 AssertRCReturn(rc, rc);
672
673 /* The PDPT was cached or created; hook it up now. */
674 pPml4e->u |= pShwPage->Core.Key;
675 }
676 else
677 {
678 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
679 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
680 }
681
682 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
683 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
684 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
685
686 if ( !pPdpe->n.u1Present
687 && !(pPdpe->u & X86_PDPE_PG_MASK))
688 {
689 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
690 PX86PDPT pPdptGst;
691 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
692 AssertRCReturn(rc, rc);
693
694 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
695 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
696 if (rc == VERR_PGM_POOL_FLUSHED)
697 return VINF_PGM_SYNC_CR3;
698
699 AssertRCReturn(rc, rc);
700
701 /* The PDPT was cached or created; hook it up now. */
702 pPdpe->u |= pShwPage->Core.Key;
703 }
704 else
705 {
706 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
707 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
708 }
709
710 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
711 return VINF_SUCCESS;
712}
713#endif
714
715/**
716 * Gets effective Guest OS page information.
717 *
718 * When GCPtr is in a big page, the function will return as if it was a normal
719 * 4KB page. If the need for distinguishing between big and normal page becomes
720 * necessary at a later point, a PGMGstGetPage() will be created for that
721 * purpose.
722 *
723 * @returns VBox status.
724 * @param pVM VM Handle.
725 * @param GCPtr Guest Context virtual address of the page.
726 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
727 * @param pGCPhys Where to store the GC physical address of the page.
728 * This is page aligned. The fact that the
729 */
730PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
731{
732 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
733}
734
735
736/**
737 * Checks if the page is present.
738 *
739 * @returns true if the page is present.
740 * @returns false if the page is not present.
741 * @param pVM The VM handle.
742 * @param GCPtr Address within the page.
743 */
744PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
745{
746 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
747 return VBOX_SUCCESS(rc);
748}
749
750
751/**
752 * Sets (replaces) the page flags for a range of pages in the guest's tables.
753 *
754 * @returns VBox status.
755 * @param pVM VM handle.
756 * @param GCPtr The address of the first page.
757 * @param cb The size of the range in bytes.
758 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
759 */
760PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
761{
762 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
763}
764
765
766/**
767 * Modify page flags for a range of pages in the guest's tables
768 *
769 * The existing flags are ANDed with the fMask and ORed with the fFlags.
770 *
771 * @returns VBox status code.
772 * @param pVM VM handle.
773 * @param GCPtr Virtual address of the first page in the range.
774 * @param cb Size (in bytes) of the range to apply the modification to.
775 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
776 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
777 * Be very CAREFUL when ~'ing constants which could be 32-bit!
778 */
779PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
780{
781 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
782
783 /*
784 * Validate input.
785 */
786 if (fFlags & X86_PTE_PAE_PG_MASK)
787 {
788 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
789 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
790 return VERR_INVALID_PARAMETER;
791 }
792
793 if (!cb)
794 {
795 AssertFailed();
796 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
797 return VERR_INVALID_PARAMETER;
798 }
799
800 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
801
802 /*
803 * Adjust input.
804 */
805 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
806 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
807 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
808
809 /*
810 * Call worker.
811 */
812 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
813
814 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
815 return rc;
816}
817
818
819/**
820 * Gets the current CR3 register value for the shadow memory context.
821 * @returns CR3 value.
822 * @param pVM The VM handle.
823 */
824PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
825{
826 switch (pVM->pgm.s.enmShadowMode)
827 {
828 case PGMMODE_32_BIT:
829 return pVM->pgm.s.HCPhys32BitPD;
830
831 case PGMMODE_PAE:
832 case PGMMODE_PAE_NX:
833 return pVM->pgm.s.HCPhysPaePDPT;
834
835 case PGMMODE_AMD64:
836 case PGMMODE_AMD64_NX:
837 return pVM->pgm.s.HCPhysPaePML4;
838
839 default:
840 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
841 return ~0;
842 }
843}
844
845
846/**
847 * Gets the CR3 register value for the 32-Bit shadow memory context.
848 * @returns CR3 value.
849 * @param pVM The VM handle.
850 */
851PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
852{
853 return pVM->pgm.s.HCPhys32BitPD;
854}
855
856
857/**
858 * Gets the CR3 register value for the PAE shadow memory context.
859 * @returns CR3 value.
860 * @param pVM The VM handle.
861 */
862PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
863{
864 return pVM->pgm.s.HCPhysPaePDPT;
865}
866
867
868/**
869 * Gets the CR3 register value for the AMD64 shadow memory context.
870 * @returns CR3 value.
871 * @param pVM The VM handle.
872 */
873PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
874{
875 return pVM->pgm.s.HCPhysPaePML4;
876}
877
878
879/**
880 * Gets the current CR3 register value for the HC intermediate memory context.
881 * @returns CR3 value.
882 * @param pVM The VM handle.
883 */
884PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
885{
886 switch (pVM->pgm.s.enmHostMode)
887 {
888 case SUPPAGINGMODE_32_BIT:
889 case SUPPAGINGMODE_32_BIT_GLOBAL:
890 return pVM->pgm.s.HCPhysInterPD;
891
892 case SUPPAGINGMODE_PAE:
893 case SUPPAGINGMODE_PAE_GLOBAL:
894 case SUPPAGINGMODE_PAE_NX:
895 case SUPPAGINGMODE_PAE_GLOBAL_NX:
896 return pVM->pgm.s.HCPhysInterPaePDPT;
897
898 case SUPPAGINGMODE_AMD64:
899 case SUPPAGINGMODE_AMD64_GLOBAL:
900 case SUPPAGINGMODE_AMD64_NX:
901 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
902 return pVM->pgm.s.HCPhysInterPaePDPT;
903
904 default:
905 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
906 return ~0;
907 }
908}
909
910
911/**
912 * Gets the current CR3 register value for the GC intermediate memory context.
913 * @returns CR3 value.
914 * @param pVM The VM handle.
915 */
916PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
917{
918 switch (pVM->pgm.s.enmShadowMode)
919 {
920 case PGMMODE_32_BIT:
921 return pVM->pgm.s.HCPhysInterPD;
922
923 case PGMMODE_PAE:
924 case PGMMODE_PAE_NX:
925 return pVM->pgm.s.HCPhysInterPaePDPT;
926
927 case PGMMODE_AMD64:
928 case PGMMODE_AMD64_NX:
929 return pVM->pgm.s.HCPhysInterPaePML4;
930
931 default:
932 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
933 return ~0;
934 }
935}
936
937
938/**
939 * Gets the CR3 register value for the 32-Bit intermediate memory context.
940 * @returns CR3 value.
941 * @param pVM The VM handle.
942 */
943PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
944{
945 return pVM->pgm.s.HCPhysInterPD;
946}
947
948
949/**
950 * Gets the CR3 register value for the PAE intermediate memory context.
951 * @returns CR3 value.
952 * @param pVM The VM handle.
953 */
954PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
955{
956 return pVM->pgm.s.HCPhysInterPaePDPT;
957}
958
959
960/**
961 * Gets the CR3 register value for the AMD64 intermediate memory context.
962 * @returns CR3 value.
963 * @param pVM The VM handle.
964 */
965PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
966{
967 return pVM->pgm.s.HCPhysInterPaePML4;
968}
969
970
971/**
972 * Performs and schedules necessary updates following a CR3 load or reload.
973 *
974 * This will normally involve mapping the guest PD or nPDPT
975 *
976 * @returns VBox status code.
977 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
978 * safely be ignored and overridden since the FF will be set too then.
979 * @param pVM VM handle.
980 * @param cr3 The new cr3.
981 * @param fGlobal Indicates whether this is a global flush or not.
982 */
983PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
984{
985 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
986
987 /*
988 * Always flag the necessary updates; necessary for hardware acceleration
989 */
990 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
991 if (fGlobal)
992 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
993 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
994
995 /*
996 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
997 */
998 int rc = VINF_SUCCESS;
999 RTGCPHYS GCPhysCR3;
1000 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1001 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1002 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1003 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1004 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1005 else
1006 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1007 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1008 {
1009 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1010 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1011 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1012 {
1013 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1014 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1015 }
1016 if (fGlobal)
1017 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1018 else
1019 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1020 }
1021 else
1022 {
1023 /*
1024 * Check if we have a pending update of the CR3 monitoring.
1025 */
1026 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1027 {
1028 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1029 Assert(!pVM->pgm.s.fMappingsFixed);
1030 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1031 }
1032 if (fGlobal)
1033 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1034 else
1035 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1036 }
1037
1038 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1039 return rc;
1040}
1041
1042
1043/**
1044 * Synchronize the paging structures.
1045 *
1046 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1047 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1048 * in several places, most importantly whenever the CR3 is loaded.
1049 *
1050 * @returns VBox status code.
1051 * @param pVM The virtual machine.
1052 * @param cr0 Guest context CR0 register
1053 * @param cr3 Guest context CR3 register
1054 * @param cr4 Guest context CR4 register
1055 * @param fGlobal Including global page directories or not
1056 */
1057PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1058{
1059 /*
1060 * We might be called when we shouldn't.
1061 *
1062 * The mode switching will ensure that the PD is resynced
1063 * after every mode switch. So, if we find ourselves here
1064 * when in protected or real mode we can safely disable the
1065 * FF and return immediately.
1066 */
1067 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1068 {
1069 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1070 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1071 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1072 return VINF_SUCCESS;
1073 }
1074
1075 /* If global pages are not supported, then all flushes are global */
1076 if (!(cr4 & X86_CR4_PGE))
1077 fGlobal = true;
1078 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1079 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1080
1081 /*
1082 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1083 */
1084 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1085 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1086 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1087 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1088 if (rc == VINF_SUCCESS)
1089 {
1090 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1091 {
1092 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1093 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1094 }
1095
1096 /*
1097 * Check if we have a pending update of the CR3 monitoring.
1098 */
1099 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1100 {
1101 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1102 Assert(!pVM->pgm.s.fMappingsFixed);
1103 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1104 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1105 }
1106 }
1107
1108 /*
1109 * Now flush the CR3 (guest context).
1110 */
1111 if (rc == VINF_SUCCESS)
1112 PGM_INVL_GUEST_TLBS();
1113 return rc;
1114}
1115
1116
1117/**
1118 * Called whenever CR0 or CR4 in a way which may change
1119 * the paging mode.
1120 *
1121 * @returns VBox status code fit for scheduling in GC and R0.
1122 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1123 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1124 * @param pVM VM handle.
1125 * @param cr0 The new cr0.
1126 * @param cr4 The new cr4.
1127 * @param efer The new extended feature enable register.
1128 */
1129PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1130{
1131 PGMMODE enmGuestMode;
1132
1133 /*
1134 * Calc the new guest mode.
1135 */
1136 if (!(cr0 & X86_CR0_PE))
1137 enmGuestMode = PGMMODE_REAL;
1138 else if (!(cr0 & X86_CR0_PG))
1139 enmGuestMode = PGMMODE_PROTECTED;
1140 else if (!(cr4 & X86_CR4_PAE))
1141 enmGuestMode = PGMMODE_32_BIT;
1142 else if (!(efer & MSR_K6_EFER_LME))
1143 {
1144 if (!(efer & MSR_K6_EFER_NXE))
1145 enmGuestMode = PGMMODE_PAE;
1146 else
1147 enmGuestMode = PGMMODE_PAE_NX;
1148 }
1149 else
1150 {
1151 if (!(efer & MSR_K6_EFER_NXE))
1152 enmGuestMode = PGMMODE_AMD64;
1153 else
1154 enmGuestMode = PGMMODE_AMD64_NX;
1155 }
1156
1157 /*
1158 * Did it change?
1159 */
1160 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1161 return VINF_SUCCESS;
1162#ifdef IN_RING3
1163 return pgmR3ChangeMode(pVM, enmGuestMode);
1164#else
1165 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1166 return VINF_PGM_CHANGE_MODE;
1167#endif
1168}
1169
1170
1171/**
1172 * Gets the current guest paging mode.
1173 *
1174 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1175 *
1176 * @returns The current paging mode.
1177 * @param pVM The VM handle.
1178 */
1179PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1180{
1181 return pVM->pgm.s.enmGuestMode;
1182}
1183
1184
1185/**
1186 * Gets the current shadow paging mode.
1187 *
1188 * @returns The current paging mode.
1189 * @param pVM The VM handle.
1190 */
1191PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1192{
1193 return pVM->pgm.s.enmShadowMode;
1194}
1195
1196/**
1197 * Gets the current host paging mode.
1198 *
1199 * @returns The current paging mode.
1200 * @param pVM The VM handle.
1201 */
1202PGMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1203{
1204 switch (pVM->pgm.s.enmHostMode)
1205 {
1206 case SUPPAGINGMODE_32_BIT:
1207 case SUPPAGINGMODE_32_BIT_GLOBAL:
1208 return PGMMODE_32_BIT;
1209
1210 case SUPPAGINGMODE_PAE:
1211 case SUPPAGINGMODE_PAE_GLOBAL:
1212 return PGMMODE_PAE;
1213
1214 case SUPPAGINGMODE_PAE_NX:
1215 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1216 return PGMMODE_PAE_NX;
1217
1218 case SUPPAGINGMODE_AMD64:
1219 case SUPPAGINGMODE_AMD64_GLOBAL:
1220 return PGMMODE_AMD64;
1221
1222 case SUPPAGINGMODE_AMD64_NX:
1223 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1224 return PGMMODE_AMD64_NX;
1225
1226 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1227 }
1228
1229 return PGMMODE_INVALID;
1230}
1231
1232
1233/**
1234 * Get mode name.
1235 *
1236 * @returns read-only name string.
1237 * @param enmMode The mode which name is desired.
1238 */
1239PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1240{
1241 switch (enmMode)
1242 {
1243 case PGMMODE_REAL: return "real";
1244 case PGMMODE_PROTECTED: return "protected";
1245 case PGMMODE_32_BIT: return "32-bit";
1246 case PGMMODE_PAE: return "PAE";
1247 case PGMMODE_PAE_NX: return "PAE+NX";
1248 case PGMMODE_AMD64: return "AMD64";
1249 case PGMMODE_AMD64_NX: return "AMD64+NX";
1250 default: return "unknown mode value";
1251 }
1252}
1253
1254
1255/**
1256 * Acquire the PGM lock.
1257 *
1258 * @returns VBox status code
1259 * @param pVM The VM to operate on.
1260 */
1261int pgmLock(PVM pVM)
1262{
1263 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1264#ifdef IN_GC
1265 if (rc == VERR_SEM_BUSY)
1266 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1267#elif defined(IN_RING0)
1268 if (rc == VERR_SEM_BUSY)
1269 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1270#endif
1271 AssertRC(rc);
1272 return rc;
1273}
1274
1275
1276/**
1277 * Release the PGM lock.
1278 *
1279 * @returns VBox status code
1280 * @param pVM The VM to operate on.
1281 */
1282void pgmUnlock(PVM pVM)
1283{
1284 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1285}
1286
1287
1288#ifdef VBOX_STRICT
1289
1290/**
1291 * Asserts that there are no mapping conflicts.
1292 *
1293 * @returns Number of conflicts.
1294 * @param pVM The VM Handle.
1295 */
1296PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1297{
1298 unsigned cErrors = 0;
1299
1300 /*
1301 * Check for mapping conflicts.
1302 */
1303 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1304 pMapping;
1305 pMapping = CTXALLSUFF(pMapping->pNext))
1306 {
1307 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1308 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1309 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1310 GCPtr += PAGE_SIZE)
1311 {
1312 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1313 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1314 {
1315 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1316 cErrors++;
1317 break;
1318 }
1319 }
1320 }
1321
1322 return cErrors;
1323}
1324
1325
1326/**
1327 * Asserts that everything related to the guest CR3 is correctly shadowed.
1328 *
1329 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1330 * and assert the correctness of the guest CR3 mapping before asserting that the
1331 * shadow page tables is in sync with the guest page tables.
1332 *
1333 * @returns Number of conflicts.
1334 * @param pVM The VM Handle.
1335 * @param cr3 The current guest CR3 register value.
1336 * @param cr4 The current guest CR4 register value.
1337 */
1338PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1339{
1340 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1341 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1342 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1343 return cErrors;
1344}
1345
1346#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette