VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 7689

Last change on this file since 7689 was 7676, checked in by vboxsync, 17 years ago

Cleaned up.
AMD64 shadow paging is only valid with AMD64 guest paging. Other combinations removed.
Simplified paging #ifdefs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.5 KB
Line 
1/* $Id: PGMAll.cpp 7676 2008-04-01 09:18:10Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/pgm.h>
23#include <VBox/cpum.h>
24#include <VBox/selm.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/csam.h>
30#include <VBox/patm.h>
31#include <VBox/trpm.h>
32#include <VBox/rem.h>
33#include <VBox/em.h>
34#include "PGMInternal.h"
35#include <VBox/vm.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/log.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42
43
44/*******************************************************************************
45* Structures and Typedefs *
46*******************************************************************************/
47/**
48 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
49 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
50 */
51typedef struct PGMHVUSTATE
52{
53 /** The VM handle. */
54 PVM pVM;
55 /** The todo flags. */
56 RTUINT fTodo;
57 /** The CR4 register value. */
58 uint32_t cr4;
59} PGMHVUSTATE, *PPGMHVUSTATE;
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65
66#if 1///@todo ndef RT_ARCH_AMD64
67/*
68 * Shadow - 32-bit mode
69 */
70#define PGM_SHW_TYPE PGM_TYPE_32BIT
71#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
72#include "PGMAllShw.h"
73
74/* Guest - real mode */
75#define PGM_GST_TYPE PGM_TYPE_REAL
76#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
77#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
78#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
79#include "PGMAllGst.h"
80#include "PGMAllBth.h"
81#undef BTH_PGMPOOLKIND_PT_FOR_PT
82#undef PGM_BTH_NAME
83#undef PGM_GST_TYPE
84#undef PGM_GST_NAME
85
86/* Guest - protected mode */
87#define PGM_GST_TYPE PGM_TYPE_PROT
88#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
89#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
90#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
91#include "PGMAllGst.h"
92#include "PGMAllBth.h"
93#undef BTH_PGMPOOLKIND_PT_FOR_PT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - 32-bit mode */
99#define PGM_GST_TYPE PGM_TYPE_32BIT
100#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
103#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
104#include "PGMAllGst.h"
105#include "PGMAllBth.h"
106#undef BTH_PGMPOOLKIND_PT_FOR_BIG
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef PGM_BTH_NAME
109#undef PGM_GST_TYPE
110#undef PGM_GST_NAME
111
112#undef PGM_SHW_TYPE
113#undef PGM_SHW_NAME
114#endif /* !RT_ARCH_AMD64 */
115
116
117/*
118 * Shadow - PAE mode
119 */
120#define PGM_SHW_TYPE PGM_TYPE_PAE
121#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
122#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
123#include "PGMAllShw.h"
124
125/* Guest - real mode */
126#define PGM_GST_TYPE PGM_TYPE_REAL
127#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
128#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
129#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
130#include "PGMAllBth.h"
131#undef BTH_PGMPOOLKIND_PT_FOR_PT
132#undef PGM_BTH_NAME
133#undef PGM_GST_TYPE
134#undef PGM_GST_NAME
135
136/* Guest - protected mode */
137#define PGM_GST_TYPE PGM_TYPE_PROT
138#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
140#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
141#include "PGMAllBth.h"
142#undef BTH_PGMPOOLKIND_PT_FOR_PT
143#undef PGM_BTH_NAME
144#undef PGM_GST_TYPE
145#undef PGM_GST_NAME
146
147/* Guest - 32-bit mode */
148#define PGM_GST_TYPE PGM_TYPE_32BIT
149#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
150#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
151#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
152#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
153#include "PGMAllBth.h"
154#undef BTH_PGMPOOLKIND_PT_FOR_BIG
155#undef BTH_PGMPOOLKIND_PT_FOR_PT
156#undef PGM_BTH_NAME
157#undef PGM_GST_TYPE
158#undef PGM_GST_NAME
159
160
161/* Guest - PAE mode */
162#define PGM_GST_TYPE PGM_TYPE_PAE
163#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
164#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
165#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
166#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
167#include "PGMAllGst.h"
168#include "PGMAllBth.h"
169#undef BTH_PGMPOOLKIND_PT_FOR_BIG
170#undef BTH_PGMPOOLKIND_PT_FOR_PT
171#undef PGM_BTH_NAME
172#undef PGM_GST_TYPE
173#undef PGM_GST_NAME
174
175#undef PGM_SHW_TYPE
176#undef PGM_SHW_NAME
177
178
179/*
180 * Shadow - AMD64 mode
181 */
182#define PGM_SHW_TYPE PGM_TYPE_AMD64
183#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
184#include "PGMAllShw.h"
185
186/* Guest - AMD64 mode */
187#define PGM_GST_TYPE PGM_TYPE_AMD64
188#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
189#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
190#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
191#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
192#include "PGMAllGst.h"
193#include "PGMAllBth.h"
194#undef BTH_PGMPOOLKIND_PT_FOR_BIG
195#undef BTH_PGMPOOLKIND_PT_FOR_PT
196#undef PGM_BTH_NAME
197#undef PGM_GST_TYPE
198#undef PGM_GST_NAME
199
200#undef PGM_SHW_TYPE
201#undef PGM_SHW_NAME
202
203
204
205/**
206 * #PF Handler.
207 *
208 * @returns VBox status code (appropriate for trap handling and GC return).
209 * @param pVM VM Handle.
210 * @param uErr The trap error code.
211 * @param pRegFrame Trap register frame.
212 * @param pvFault The fault address.
213 */
214PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
215{
216 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
217 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
218 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
219
220
221#ifdef VBOX_WITH_STATISTICS
222 /*
223 * Error code stats.
224 */
225 if (uErr & X86_TRAP_PF_US)
226 {
227 if (!(uErr & X86_TRAP_PF_P))
228 {
229 if (uErr & X86_TRAP_PF_RW)
230 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
231 else
232 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
233 }
234 else if (uErr & X86_TRAP_PF_RW)
235 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
236 else if (uErr & X86_TRAP_PF_RSVD)
237 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
238 else if (uErr & X86_TRAP_PF_ID)
239 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
240 else
241 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
242 }
243 else
244 { /* Supervisor */
245 if (!(uErr & X86_TRAP_PF_P))
246 {
247 if (uErr & X86_TRAP_PF_RW)
248 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
249 else
250 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
251 }
252 else if (uErr & X86_TRAP_PF_RW)
253 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
254 else if (uErr & X86_TRAP_PF_ID)
255 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
256 else if (uErr & X86_TRAP_PF_RSVD)
257 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
258 }
259#endif
260
261 /*
262 * Call the worker.
263 */
264 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
265 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
266 rc = VINF_SUCCESS;
267 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
268 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
269 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
270 return rc;
271}
272
273
274/**
275 * Prefetch a page
276 *
277 * Typically used to sync commonly used pages before entering raw mode
278 * after a CR3 reload.
279 *
280 * @returns VBox status code suitable for scheduling.
281 * @retval VINF_SUCCESS on success.
282 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
283 * @param pVM VM handle.
284 * @param GCPtrPage Page to invalidate.
285 */
286PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
287{
288 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
289 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
290 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
291 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
292 return rc;
293}
294
295
296/**
297 * Gets the mapping corresponding to the specified address (if any).
298 *
299 * @returns Pointer to the mapping.
300 * @returns NULL if not
301 *
302 * @param pVM The virtual machine.
303 * @param GCPtr The guest context pointer.
304 */
305PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
306{
307 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
308 while (pMapping)
309 {
310 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
311 break;
312 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
313 {
314 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
315 return pMapping;
316 }
317 pMapping = CTXALLSUFF(pMapping->pNext);
318 }
319 return NULL;
320}
321
322
323/**
324 * Verifies a range of pages for read or write access
325 *
326 * Only checks the guest's page tables
327 *
328 * @returns VBox status code.
329 * @param pVM VM handle.
330 * @param Addr Guest virtual address to check
331 * @param cbSize Access size
332 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
333 */
334PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
335{
336 /*
337 * Validate input.
338 */
339 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
340 {
341 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
342 return VERR_INVALID_PARAMETER;
343 }
344
345 uint64_t fPage;
346 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
347 if (VBOX_FAILURE(rc))
348 {
349 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
350 return VINF_EM_RAW_GUEST_TRAP;
351 }
352
353 /*
354 * Check if the access would cause a page fault
355 *
356 * Note that hypervisor page directories are not present in the guest's tables, so this check
357 * is sufficient.
358 */
359 bool fWrite = !!(fAccess & X86_PTE_RW);
360 bool fUser = !!(fAccess & X86_PTE_US);
361 if ( !(fPage & X86_PTE_P)
362 || (fWrite && !(fPage & X86_PTE_RW))
363 || (fUser && !(fPage & X86_PTE_US)) )
364 {
365 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
366 return VINF_EM_RAW_GUEST_TRAP;
367 }
368 if ( VBOX_SUCCESS(rc)
369 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
370 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
371 return rc;
372}
373
374
375/**
376 * Verifies a range of pages for read or write access
377 *
378 * Supports handling of pages marked for dirty bit tracking and CSAM
379 *
380 * @returns VBox status code.
381 * @param pVM VM handle.
382 * @param Addr Guest virtual address to check
383 * @param cbSize Access size
384 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
385 */
386PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
387{
388 /*
389 * Validate input.
390 */
391 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
392 {
393 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
394 return VERR_INVALID_PARAMETER;
395 }
396
397 uint64_t fPageGst;
398 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
399 if (VBOX_FAILURE(rc))
400 {
401 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
402 return VINF_EM_RAW_GUEST_TRAP;
403 }
404
405 /*
406 * Check if the access would cause a page fault
407 *
408 * Note that hypervisor page directories are not present in the guest's tables, so this check
409 * is sufficient.
410 */
411 const bool fWrite = !!(fAccess & X86_PTE_RW);
412 const bool fUser = !!(fAccess & X86_PTE_US);
413 if ( !(fPageGst & X86_PTE_P)
414 || (fWrite && !(fPageGst & X86_PTE_RW))
415 || (fUser && !(fPageGst & X86_PTE_US)) )
416 {
417 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
418 return VINF_EM_RAW_GUEST_TRAP;
419 }
420
421 /*
422 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
423 */
424 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
425 if ( rc == VERR_PAGE_NOT_PRESENT
426 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
427 {
428 /*
429 * Page is not present in our page tables.
430 * Try to sync it!
431 */
432 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
433 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
434 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
435 if (rc != VINF_SUCCESS)
436 return rc;
437 }
438 else
439 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
440
441#if 0 /* def VBOX_STRICT; triggers too often now */
442 /*
443 * This check is a bit paranoid, but useful.
444 */
445 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
446 uint64_t fPageShw;
447 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
448 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
449 || (fWrite && !(fPageShw & X86_PTE_RW))
450 || (fUser && !(fPageShw & X86_PTE_US)) )
451 {
452 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
453 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
454 return VINF_EM_RAW_GUEST_TRAP;
455 }
456#endif
457
458 if ( VBOX_SUCCESS(rc)
459 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
460 || Addr + cbSize < Addr))
461 {
462 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
463 for (;;)
464 {
465 Addr += PAGE_SIZE;
466 if (cbSize > PAGE_SIZE)
467 cbSize -= PAGE_SIZE;
468 else
469 cbSize = 1;
470 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
471 if (rc != VINF_SUCCESS)
472 break;
473 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
474 break;
475 }
476 }
477 return rc;
478}
479
480
481#ifndef IN_GC
482/**
483 * Emulation of the invlpg instruction (HC only actually).
484 *
485 * @returns VBox status code.
486 * @param pVM VM handle.
487 * @param GCPtrPage Page to invalidate.
488 * @remark ASSUMES the page table entry or page directory is
489 * valid. Fairly safe, but there could be edge cases!
490 * @todo Flush page or page directory only if necessary!
491 */
492PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
493{
494 int rc;
495
496 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
497
498 /** @todo merge PGMGCInvalidatePage with this one */
499
500#ifndef IN_RING3
501 /*
502 * Notify the recompiler so it can record this instruction.
503 * Failure happens when it's out of space. We'll return to HC in that case.
504 */
505 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
506 if (VBOX_FAILURE(rc))
507 return rc;
508#endif
509
510 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
511 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
512 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
513
514#ifndef IN_RING0
515 /*
516 * Check if we have a pending update of the CR3 monitoring.
517 */
518 if ( VBOX_SUCCESS(rc)
519 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
520 {
521 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
522 Assert(!pVM->pgm.s.fMappingsFixed);
523 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
524 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
525 }
526#endif
527
528#ifdef IN_RING3
529 /*
530 * Inform CSAM about the flush
531 */
532 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
533 CSAMR3FlushPage(pVM, GCPtrPage);
534#endif
535 return rc;
536}
537#endif
538
539
540/**
541 * Executes an instruction using the interpreter.
542 *
543 * @returns VBox status code (appropriate for trap handling and GC return).
544 * @param pVM VM handle.
545 * @param pRegFrame Register frame.
546 * @param pvFault Fault address.
547 */
548PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
549{
550 uint32_t cb;
551 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
552 if (rc == VERR_EM_INTERPRETER)
553 rc = VINF_EM_RAW_EMULATE_INSTR;
554 if (rc != VINF_SUCCESS)
555 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
556 return rc;
557}
558
559
560/**
561 * Gets effective page information (from the VMM page directory).
562 *
563 * @returns VBox status.
564 * @param pVM VM Handle.
565 * @param GCPtr Guest Context virtual address of the page.
566 * @param pfFlags Where to store the flags. These are X86_PTE_*.
567 * @param pHCPhys Where to store the HC physical address of the page.
568 * This is page aligned.
569 * @remark You should use PGMMapGetPage() for pages in a mapping.
570 */
571PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
572{
573 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
574}
575
576
577/**
578 * Sets (replaces) the page flags for a range of pages in the shadow context.
579 *
580 * @returns VBox status.
581 * @param pVM VM handle.
582 * @param GCPtr The address of the first page.
583 * @param cb The size of the range in bytes.
584 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
585 * @remark You must use PGMMapSetPage() for pages in a mapping.
586 */
587PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
588{
589 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
590}
591
592
593/**
594 * Modify page flags for a range of pages in the shadow context.
595 *
596 * The existing flags are ANDed with the fMask and ORed with the fFlags.
597 *
598 * @returns VBox status code.
599 * @param pVM VM handle.
600 * @param GCPtr Virtual address of the first page in the range.
601 * @param cb Size (in bytes) of the range to apply the modification to.
602 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
603 * @param fMask The AND mask - page flags X86_PTE_*.
604 * Be very CAREFUL when ~'ing constants which could be 32-bit!
605 * @remark You must use PGMMapModifyPage() for pages in a mapping.
606 */
607PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
608{
609 /*
610 * Validate input.
611 */
612 if (fFlags & X86_PTE_PAE_PG_MASK)
613 {
614 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
615 return VERR_INVALID_PARAMETER;
616 }
617 if (!cb)
618 {
619 AssertFailed();
620 return VERR_INVALID_PARAMETER;
621 }
622
623 /*
624 * Align the input.
625 */
626 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
627 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
628 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
629
630 /*
631 * Call worker.
632 */
633 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
634}
635
636
637/**
638 * Gets effective Guest OS page information.
639 *
640 * When GCPtr is in a big page, the function will return as if it was a normal
641 * 4KB page. If the need for distinguishing between big and normal page becomes
642 * necessary at a later point, a PGMGstGetPage() will be created for that
643 * purpose.
644 *
645 * @returns VBox status.
646 * @param pVM VM Handle.
647 * @param GCPtr Guest Context virtual address of the page.
648 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
649 * @param pGCPhys Where to store the GC physical address of the page.
650 * This is page aligned. The fact that the
651 */
652PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
653{
654 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
655}
656
657
658/**
659 * Checks if the page is present.
660 *
661 * @returns true if the page is present.
662 * @returns false if the page is not present.
663 * @param pVM The VM handle.
664 * @param GCPtr Address within the page.
665 */
666PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
667{
668 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
669 return VBOX_SUCCESS(rc);
670}
671
672
673/**
674 * Sets (replaces) the page flags for a range of pages in the guest's tables.
675 *
676 * @returns VBox status.
677 * @param pVM VM handle.
678 * @param GCPtr The address of the first page.
679 * @param cb The size of the range in bytes.
680 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
681 */
682PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
683{
684 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
685}
686
687
688/**
689 * Modify page flags for a range of pages in the guest's tables
690 *
691 * The existing flags are ANDed with the fMask and ORed with the fFlags.
692 *
693 * @returns VBox status code.
694 * @param pVM VM handle.
695 * @param GCPtr Virtual address of the first page in the range.
696 * @param cb Size (in bytes) of the range to apply the modification to.
697 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
698 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
699 * Be very CAREFUL when ~'ing constants which could be 32-bit!
700 */
701PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
702{
703 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
704
705 /*
706 * Validate input.
707 */
708 if (fFlags & X86_PTE_PAE_PG_MASK)
709 {
710 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
711 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
712 return VERR_INVALID_PARAMETER;
713 }
714
715 if (!cb)
716 {
717 AssertFailed();
718 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
719 return VERR_INVALID_PARAMETER;
720 }
721
722 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
723
724 /*
725 * Adjust input.
726 */
727 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
728 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
729 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
730
731 /*
732 * Call worker.
733 */
734 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
735
736 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
737 return rc;
738}
739
740
741/**
742 * Gets the current CR3 register value for the shadow memory context.
743 * @returns CR3 value.
744 * @param pVM The VM handle.
745 */
746PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
747{
748 switch (pVM->pgm.s.enmShadowMode)
749 {
750 case PGMMODE_32_BIT:
751 return pVM->pgm.s.HCPhys32BitPD;
752
753 case PGMMODE_PAE:
754 case PGMMODE_PAE_NX:
755 return pVM->pgm.s.HCPhysPaePDPTR;
756
757 case PGMMODE_AMD64:
758 case PGMMODE_AMD64_NX:
759 return pVM->pgm.s.HCPhysPaePML4;
760
761 default:
762 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
763 return ~0;
764 }
765}
766
767
768/**
769 * Gets the CR3 register value for the 32-Bit shadow memory context.
770 * @returns CR3 value.
771 * @param pVM The VM handle.
772 */
773PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
774{
775 return pVM->pgm.s.HCPhys32BitPD;
776}
777
778
779/**
780 * Gets the CR3 register value for the PAE shadow memory context.
781 * @returns CR3 value.
782 * @param pVM The VM handle.
783 */
784PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
785{
786 return pVM->pgm.s.HCPhysPaePDPTR;
787}
788
789
790/**
791 * Gets the CR3 register value for the AMD64 shadow memory context.
792 * @returns CR3 value.
793 * @param pVM The VM handle.
794 */
795PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
796{
797 return pVM->pgm.s.HCPhysPaePML4;
798}
799
800
801/**
802 * Gets the current CR3 register value for the HC intermediate memory context.
803 * @returns CR3 value.
804 * @param pVM The VM handle.
805 */
806PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
807{
808 switch (pVM->pgm.s.enmHostMode)
809 {
810 case SUPPAGINGMODE_32_BIT:
811 case SUPPAGINGMODE_32_BIT_GLOBAL:
812 return pVM->pgm.s.HCPhysInterPD;
813
814 case SUPPAGINGMODE_PAE:
815 case SUPPAGINGMODE_PAE_GLOBAL:
816 case SUPPAGINGMODE_PAE_NX:
817 case SUPPAGINGMODE_PAE_GLOBAL_NX:
818 return pVM->pgm.s.HCPhysInterPaePDPTR;
819
820 case SUPPAGINGMODE_AMD64:
821 case SUPPAGINGMODE_AMD64_GLOBAL:
822 case SUPPAGINGMODE_AMD64_NX:
823 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
824 return pVM->pgm.s.HCPhysInterPaePDPTR;
825
826 default:
827 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
828 return ~0;
829 }
830}
831
832
833/**
834 * Gets the current CR3 register value for the GC intermediate memory context.
835 * @returns CR3 value.
836 * @param pVM The VM handle.
837 */
838PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
839{
840 switch (pVM->pgm.s.enmShadowMode)
841 {
842 case PGMMODE_32_BIT:
843 return pVM->pgm.s.HCPhysInterPD;
844
845 case PGMMODE_PAE:
846 case PGMMODE_PAE_NX:
847 return pVM->pgm.s.HCPhysInterPaePDPTR;
848
849 case PGMMODE_AMD64:
850 case PGMMODE_AMD64_NX:
851 return pVM->pgm.s.HCPhysInterPaePML4;
852
853 default:
854 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
855 return ~0;
856 }
857}
858
859
860/**
861 * Gets the CR3 register value for the 32-Bit intermediate memory context.
862 * @returns CR3 value.
863 * @param pVM The VM handle.
864 */
865PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
866{
867 return pVM->pgm.s.HCPhysInterPD;
868}
869
870
871/**
872 * Gets the CR3 register value for the PAE intermediate memory context.
873 * @returns CR3 value.
874 * @param pVM The VM handle.
875 */
876PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
877{
878 return pVM->pgm.s.HCPhysInterPaePDPTR;
879}
880
881
882/**
883 * Gets the CR3 register value for the AMD64 intermediate memory context.
884 * @returns CR3 value.
885 * @param pVM The VM handle.
886 */
887PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
888{
889 return pVM->pgm.s.HCPhysInterPaePML4;
890}
891
892
893/**
894 * Performs and schedules necessary updates following a CR3 load or reload.
895 *
896 * This will normally involve mapping the guest PD or nPDPTR
897 *
898 * @returns VBox status code.
899 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
900 * safely be ignored and overridden since the FF will be set too then.
901 * @param pVM VM handle.
902 * @param cr3 The new cr3.
903 * @param fGlobal Indicates whether this is a global flush or not.
904 */
905PGMDECL(int) PGMFlushTLB(PVM pVM, uint32_t cr3, bool fGlobal)
906{
907 /*
908 * Always flag the necessary updates; necessary for hardware acceleration
909 */
910 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
911 if (fGlobal)
912 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
913
914 /*
915 * When in real or protected mode there is no TLB flushing, but
916 * we may still be called because of REM not caring/knowing this.
917 * REM is simple and we wish to keep it that way.
918 */
919 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
920 return VINF_SUCCESS;
921 LogFlow(("PGMFlushTLB: cr3=%#x OldCr3=%#x fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
922 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
923
924 /*
925 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
926 */
927 int rc = VINF_SUCCESS;
928 RTGCPHYS GCPhysCR3;
929 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
930 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
931 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
932 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
933 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
934 else
935 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
936 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
937 {
938 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
939 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
940 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
941 {
942 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
943 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
944 }
945 if (fGlobal)
946 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
947 else
948 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
949 }
950 else
951 {
952 /*
953 * Check if we have a pending update of the CR3 monitoring.
954 */
955 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
956 {
957 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
958 Assert(!pVM->pgm.s.fMappingsFixed);
959 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
960 }
961 if (fGlobal)
962 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
963 else
964 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
965 }
966
967 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
968 return rc;
969}
970
971
972/**
973 * Synchronize the paging structures.
974 *
975 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
976 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
977 * in several places, most importantly whenever the CR3 is loaded.
978 *
979 * @returns VBox status code.
980 * @param pVM The virtual machine.
981 * @param cr0 Guest context CR0 register
982 * @param cr3 Guest context CR3 register
983 * @param cr4 Guest context CR4 register
984 * @param fGlobal Including global page directories or not
985 */
986PGMDECL(int) PGMSyncCR3(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal)
987{
988 /*
989 * We might be called when we shouldn't.
990 *
991 * The mode switching will ensure that the PD is resynced
992 * after every mode switch. So, if we find ourselves here
993 * when in protected or real mode we can safely disable the
994 * FF and return immediately.
995 */
996 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
997 {
998 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
999 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1000 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1001 return VINF_SUCCESS;
1002 }
1003
1004 /* If global pages are not supported, then all flushes are global */
1005 if (!(cr4 & X86_CR4_PGE))
1006 fGlobal = true;
1007 LogFlow(("PGMSyncCR3: cr0=%08x cr3=%08x cr4=%08x fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1008 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1009
1010 /*
1011 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1012 */
1013 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1014 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1015 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1016 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1017 if (rc == VINF_SUCCESS)
1018 {
1019 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1020 {
1021 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1022 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1023 }
1024
1025 /*
1026 * Check if we have a pending update of the CR3 monitoring.
1027 */
1028 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1029 {
1030 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1031 Assert(!pVM->pgm.s.fMappingsFixed);
1032 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1033 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1034 }
1035 }
1036
1037 /*
1038 * Now flush the CR3 (guest context).
1039 */
1040 if (rc == VINF_SUCCESS)
1041 PGM_INVL_GUEST_TLBS();
1042 return rc;
1043}
1044
1045
1046/**
1047 * Called whenever CR0 or CR4 in a way which may change
1048 * the paging mode.
1049 *
1050 * @returns VBox status code fit for scheduling in GC and R0.
1051 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1052 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1053 * @param pVM VM handle.
1054 * @param cr0 The new cr0.
1055 * @param cr4 The new cr4.
1056 * @param efer The new extended feature enable register.
1057 */
1058PGMDECL(int) PGMChangeMode(PVM pVM, uint32_t cr0, uint32_t cr4, uint64_t efer)
1059{
1060 PGMMODE enmGuestMode;
1061
1062 /*
1063 * Calc the new guest mode.
1064 */
1065 if (!(cr0 & X86_CR0_PE))
1066 enmGuestMode = PGMMODE_REAL;
1067 else if (!(cr0 & X86_CR0_PG))
1068 enmGuestMode = PGMMODE_PROTECTED;
1069 else if (!(cr4 & X86_CR4_PAE))
1070 enmGuestMode = PGMMODE_32_BIT;
1071 else if (!(efer & MSR_K6_EFER_LME))
1072 {
1073 if (!(efer & MSR_K6_EFER_NXE))
1074 enmGuestMode = PGMMODE_PAE;
1075 else
1076 enmGuestMode = PGMMODE_PAE_NX;
1077 }
1078 else
1079 {
1080 if (!(efer & MSR_K6_EFER_NXE))
1081 enmGuestMode = PGMMODE_AMD64;
1082 else
1083 enmGuestMode = PGMMODE_AMD64_NX;
1084 }
1085
1086 /*
1087 * Did it change?
1088 */
1089 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1090 return VINF_SUCCESS;
1091#ifdef IN_RING3
1092 return pgmR3ChangeMode(pVM, enmGuestMode);
1093#else
1094 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1095 return VINF_PGM_CHANGE_MODE;
1096#endif
1097}
1098
1099
1100/**
1101 * Gets the current guest paging mode.
1102 *
1103 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1104 *
1105 * @returns The current paging mode.
1106 * @param pVM The VM handle.
1107 */
1108PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1109{
1110 return pVM->pgm.s.enmGuestMode;
1111}
1112
1113
1114/**
1115 * Gets the current shadow paging mode.
1116 *
1117 * @returns The current paging mode.
1118 * @param pVM The VM handle.
1119 */
1120PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1121{
1122 return pVM->pgm.s.enmShadowMode;
1123}
1124
1125
1126/**
1127 * Get mode name.
1128 *
1129 * @returns read-only name string.
1130 * @param enmMode The mode which name is desired.
1131 */
1132PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1133{
1134 switch (enmMode)
1135 {
1136 case PGMMODE_REAL: return "real";
1137 case PGMMODE_PROTECTED: return "protected";
1138 case PGMMODE_32_BIT: return "32-bit";
1139 case PGMMODE_PAE: return "PAE";
1140 case PGMMODE_PAE_NX: return "PAE+NX";
1141 case PGMMODE_AMD64: return "AMD64";
1142 case PGMMODE_AMD64_NX: return "AMD64+NX";
1143 default: return "unknown mode value";
1144 }
1145}
1146
1147
1148/**
1149 * Acquire the PGM lock.
1150 *
1151 * @returns VBox status code
1152 * @param pVM The VM to operate on.
1153 */
1154int pgmLock(PVM pVM)
1155{
1156 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1157#ifdef IN_GC
1158 if (rc == VERR_SEM_BUSY)
1159 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1160#elif defined(IN_RING0)
1161 if (rc == VERR_SEM_BUSY)
1162 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1163#endif
1164 AssertRC(rc);
1165 return rc;
1166}
1167
1168
1169/**
1170 * Release the PGM lock.
1171 *
1172 * @returns VBox status code
1173 * @param pVM The VM to operate on.
1174 */
1175void pgmUnlock(PVM pVM)
1176{
1177 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1178}
1179
1180
1181#ifdef VBOX_STRICT
1182
1183/**
1184 * Asserts that there are no mapping conflicts.
1185 *
1186 * @returns Number of conflicts.
1187 * @param pVM The VM Handle.
1188 */
1189PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1190{
1191 unsigned cErrors = 0;
1192
1193 /*
1194 * Check for mapping conflicts.
1195 */
1196 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1197 pMapping;
1198 pMapping = CTXALLSUFF(pMapping->pNext))
1199 {
1200 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1201 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1202 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1203 GCPtr += PAGE_SIZE)
1204 {
1205 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1206 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1207 {
1208 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1209 cErrors++;
1210 break;
1211 }
1212 }
1213 }
1214
1215 return cErrors;
1216}
1217
1218
1219/**
1220 * Asserts that everything related to the guest CR3 is correctly shadowed.
1221 *
1222 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1223 * and assert the correctness of the guest CR3 mapping before asserting that the
1224 * shadow page tables is in sync with the guest page tables.
1225 *
1226 * @returns Number of conflicts.
1227 * @param pVM The VM Handle.
1228 * @param cr3 The current guest CR3 register value.
1229 * @param cr4 The current guest CR4 register value.
1230 */
1231PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint32_t cr3, uint32_t cr4)
1232{
1233 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1234 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1235 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1236 return cErrors;
1237}
1238
1239#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette