VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 9001

Last change on this file since 9001 was 9001, checked in by vboxsync, 17 years ago

Enabled the PGMMODE_PROTECTED (Guest) & PGMMODE_AMD64 (shadow) combination again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.2 KB
Line 
1/* $Id: PGMAll.cpp 9001 2008-05-21 09:14:26Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include "PGMInternal.h"
40#include <VBox/vm.h>
41#include <iprt/assert.h>
42#include <iprt/asm.h>
43#include <iprt/string.h>
44#include <VBox/log.h>
45#include <VBox/param.h>
46#include <VBox/err.h>
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/**
53 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
54 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
55 */
56typedef struct PGMHVUSTATE
57{
58 /** The VM handle. */
59 PVM pVM;
60 /** The todo flags. */
61 RTUINT fTodo;
62 /** The CR4 register value. */
63 uint32_t cr4;
64} PGMHVUSTATE, *PPGMHVUSTATE;
65
66
67/*******************************************************************************
68* Internal Functions *
69*******************************************************************************/
70
71/*
72 * Shadow - 32-bit mode
73 */
74#define PGM_SHW_TYPE PGM_TYPE_32BIT
75#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
76#include "PGMAllShw.h"
77
78/* Guest - real mode */
79#define PGM_GST_TYPE PGM_TYPE_REAL
80#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
81#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
82#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
83#include "PGMAllGst.h"
84#include "PGMAllBth.h"
85#undef BTH_PGMPOOLKIND_PT_FOR_PT
86#undef PGM_BTH_NAME
87#undef PGM_GST_TYPE
88#undef PGM_GST_NAME
89
90/* Guest - protected mode */
91#define PGM_GST_TYPE PGM_TYPE_PROT
92#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
93#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
94#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
95#include "PGMAllGst.h"
96#include "PGMAllBth.h"
97#undef BTH_PGMPOOLKIND_PT_FOR_PT
98#undef PGM_BTH_NAME
99#undef PGM_GST_TYPE
100#undef PGM_GST_NAME
101
102/* Guest - 32-bit mode */
103#define PGM_GST_TYPE PGM_TYPE_32BIT
104#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
105#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
106#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
107#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
108#include "PGMAllGst.h"
109#include "PGMAllBth.h"
110#undef BTH_PGMPOOLKIND_PT_FOR_BIG
111#undef BTH_PGMPOOLKIND_PT_FOR_PT
112#undef PGM_BTH_NAME
113#undef PGM_GST_TYPE
114#undef PGM_GST_NAME
115
116#undef PGM_SHW_TYPE
117#undef PGM_SHW_NAME
118
119
120/*
121 * Shadow - PAE mode
122 */
123#define PGM_SHW_TYPE PGM_TYPE_PAE
124#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
125#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
126#include "PGMAllShw.h"
127
128/* Guest - real mode */
129#define PGM_GST_TYPE PGM_TYPE_REAL
130#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
131#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
132#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
133#include "PGMAllBth.h"
134#undef BTH_PGMPOOLKIND_PT_FOR_PT
135#undef PGM_BTH_NAME
136#undef PGM_GST_TYPE
137#undef PGM_GST_NAME
138
139/* Guest - protected mode */
140#define PGM_GST_TYPE PGM_TYPE_PROT
141#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
142#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
143#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
144#include "PGMAllBth.h"
145#undef BTH_PGMPOOLKIND_PT_FOR_PT
146#undef PGM_BTH_NAME
147#undef PGM_GST_TYPE
148#undef PGM_GST_NAME
149
150/* Guest - 32-bit mode */
151#define PGM_GST_TYPE PGM_TYPE_32BIT
152#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
153#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
154#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
155#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
156#include "PGMAllBth.h"
157#undef BTH_PGMPOOLKIND_PT_FOR_BIG
158#undef BTH_PGMPOOLKIND_PT_FOR_PT
159#undef PGM_BTH_NAME
160#undef PGM_GST_TYPE
161#undef PGM_GST_NAME
162
163
164/* Guest - PAE mode */
165#define PGM_GST_TYPE PGM_TYPE_PAE
166#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
167#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
168#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
169#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
170#include "PGMAllGst.h"
171#include "PGMAllBth.h"
172#undef BTH_PGMPOOLKIND_PT_FOR_BIG
173#undef BTH_PGMPOOLKIND_PT_FOR_PT
174#undef PGM_BTH_NAME
175#undef PGM_GST_TYPE
176#undef PGM_GST_NAME
177
178#undef PGM_SHW_TYPE
179#undef PGM_SHW_NAME
180
181
182#ifndef IN_GC /* AMD64 implies VT-x/AMD-V */
183/*
184 * Shadow - AMD64 mode
185 */
186#define PGM_SHW_TYPE PGM_TYPE_AMD64
187#define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
188#include "PGMAllShw.h"
189
190/* Guest - protected mode */
191#define PGM_GST_TYPE PGM_TYPE_PROT
192#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
193#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
194#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
195#include "PGMAllBth.h"
196#undef BTH_PGMPOOLKIND_PT_FOR_PT
197#undef PGM_BTH_NAME
198#undef PGM_GST_TYPE
199#undef PGM_GST_NAME
200
201/* Guest - AMD64 mode */
202#define PGM_GST_TYPE PGM_TYPE_AMD64
203#define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
204#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
205#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
206#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
207#include "PGMAllGst.h"
208#include "PGMAllBth.h"
209#undef BTH_PGMPOOLKIND_PT_FOR_BIG
210#undef BTH_PGMPOOLKIND_PT_FOR_PT
211#undef PGM_BTH_NAME
212#undef PGM_GST_TYPE
213#undef PGM_GST_NAME
214
215#undef PGM_SHW_TYPE
216#undef PGM_SHW_NAME
217#endif
218
219
220/**
221 * #PF Handler.
222 *
223 * @returns VBox status code (appropriate for trap handling and GC return).
224 * @param pVM VM Handle.
225 * @param uErr The trap error code.
226 * @param pRegFrame Trap register frame.
227 * @param pvFault The fault address.
228 */
229PGMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
230{
231 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->eip));
232 STAM_PROFILE_START(&pVM->pgm.s.StatGCTrap0e, a);
233 STAM_STATS({ pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = NULL; } );
234
235
236#ifdef VBOX_WITH_STATISTICS
237 /*
238 * Error code stats.
239 */
240 if (uErr & X86_TRAP_PF_US)
241 {
242 if (!(uErr & X86_TRAP_PF_P))
243 {
244 if (uErr & X86_TRAP_PF_RW)
245 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentWrite);
246 else
247 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNotPresentRead);
248 }
249 else if (uErr & X86_TRAP_PF_RW)
250 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSWrite);
251 else if (uErr & X86_TRAP_PF_RSVD)
252 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSReserved);
253 else if (uErr & X86_TRAP_PF_ID)
254 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSNXE);
255 else
256 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eUSRead);
257 }
258 else
259 { /* Supervisor */
260 if (!(uErr & X86_TRAP_PF_P))
261 {
262 if (uErr & X86_TRAP_PF_RW)
263 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentWrite);
264 else
265 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVNotPresentRead);
266 }
267 else if (uErr & X86_TRAP_PF_RW)
268 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVWrite);
269 else if (uErr & X86_TRAP_PF_ID)
270 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSNXE);
271 else if (uErr & X86_TRAP_PF_RSVD)
272 STAM_COUNTER_INC(&pVM->pgm.s.StatGCTrap0eSVReserved);
273 }
274#endif
275
276 /*
277 * Call the worker.
278 */
279 int rc = PGM_BTH_PFN(Trap0eHandler, pVM)(pVM, uErr, pRegFrame, pvFault);
280 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
281 rc = VINF_SUCCESS;
282 STAM_STATS({ if (!pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution))
283 pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatTrap0eMisc; });
284 STAM_PROFILE_STOP_EX(&pVM->pgm.s.StatGCTrap0e, pVM->pgm.s.CTXSUFF(pStatTrap0eAttribution), a);
285 return rc;
286}
287
288/**
289 * Prefetch a page
290 *
291 * Typically used to sync commonly used pages before entering raw mode
292 * after a CR3 reload.
293 *
294 * @returns VBox status code suitable for scheduling.
295 * @retval VINF_SUCCESS on success.
296 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
297 * @param pVM VM handle.
298 * @param GCPtrPage Page to invalidate.
299 */
300PGMDECL(int) PGMPrefetchPage(PVM pVM, RTGCPTR GCPtrPage)
301{
302 STAM_PROFILE_START(&pVM->pgm.s.StatHCPrefetch, a);
303 int rc = PGM_BTH_PFN(PrefetchPage, pVM)(pVM, (RTGCUINTPTR)GCPtrPage);
304 STAM_PROFILE_STOP(&pVM->pgm.s.StatHCPrefetch, a);
305 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%Vrc\n", rc));
306 return rc;
307}
308
309
310/**
311 * Gets the mapping corresponding to the specified address (if any).
312 *
313 * @returns Pointer to the mapping.
314 * @returns NULL if not
315 *
316 * @param pVM The virtual machine.
317 * @param GCPtr The guest context pointer.
318 */
319PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
320{
321 PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
322 while (pMapping)
323 {
324 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
325 break;
326 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
327 {
328 STAM_COUNTER_INC(&pVM->pgm.s.StatGCSyncPTConflict);
329 return pMapping;
330 }
331 pMapping = CTXALLSUFF(pMapping->pNext);
332 }
333 return NULL;
334}
335
336
337/**
338 * Verifies a range of pages for read or write access
339 *
340 * Only checks the guest's page tables
341 *
342 * @returns VBox status code.
343 * @param pVM VM handle.
344 * @param Addr Guest virtual address to check
345 * @param cbSize Access size
346 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
347 */
348PGMDECL(int) PGMIsValidAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
349{
350 /*
351 * Validate input.
352 */
353 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
354 {
355 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
356 return VERR_INVALID_PARAMETER;
357 }
358
359 uint64_t fPage;
360 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPage, NULL);
361 if (VBOX_FAILURE(rc))
362 {
363 Log(("PGMIsValidAccess: access violation for %VGv rc=%d\n", Addr, rc));
364 return VINF_EM_RAW_GUEST_TRAP;
365 }
366
367 /*
368 * Check if the access would cause a page fault
369 *
370 * Note that hypervisor page directories are not present in the guest's tables, so this check
371 * is sufficient.
372 */
373 bool fWrite = !!(fAccess & X86_PTE_RW);
374 bool fUser = !!(fAccess & X86_PTE_US);
375 if ( !(fPage & X86_PTE_P)
376 || (fWrite && !(fPage & X86_PTE_RW))
377 || (fUser && !(fPage & X86_PTE_US)) )
378 {
379 Log(("PGMIsValidAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
380 return VINF_EM_RAW_GUEST_TRAP;
381 }
382 if ( VBOX_SUCCESS(rc)
383 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
384 return PGMIsValidAccess(pVM, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
385 return rc;
386}
387
388
389/**
390 * Verifies a range of pages for read or write access
391 *
392 * Supports handling of pages marked for dirty bit tracking and CSAM
393 *
394 * @returns VBox status code.
395 * @param pVM VM handle.
396 * @param Addr Guest virtual address to check
397 * @param cbSize Access size
398 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
399 */
400PGMDECL(int) PGMVerifyAccess(PVM pVM, RTGCUINTPTR Addr, uint32_t cbSize, uint32_t fAccess)
401{
402 /*
403 * Validate input.
404 */
405 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
406 {
407 AssertMsgFailed(("PGMVerifyAccess: invalid access type %08x\n", fAccess));
408 return VERR_INVALID_PARAMETER;
409 }
410
411 uint64_t fPageGst;
412 int rc = PGMGstGetPage(pVM, (RTGCPTR)Addr, &fPageGst, NULL);
413 if (VBOX_FAILURE(rc))
414 {
415 Log(("PGMVerifyAccess: access violation for %VGv rc=%d\n", Addr, rc));
416 return VINF_EM_RAW_GUEST_TRAP;
417 }
418
419 /*
420 * Check if the access would cause a page fault
421 *
422 * Note that hypervisor page directories are not present in the guest's tables, so this check
423 * is sufficient.
424 */
425 const bool fWrite = !!(fAccess & X86_PTE_RW);
426 const bool fUser = !!(fAccess & X86_PTE_US);
427 if ( !(fPageGst & X86_PTE_P)
428 || (fWrite && !(fPageGst & X86_PTE_RW))
429 || (fUser && !(fPageGst & X86_PTE_US)) )
430 {
431 Log(("PGMVerifyAccess: access violation for %VGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
432 return VINF_EM_RAW_GUEST_TRAP;
433 }
434
435 /*
436 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
437 */
438 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, NULL, NULL);
439 if ( rc == VERR_PAGE_NOT_PRESENT
440 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
441 {
442 /*
443 * Page is not present in our page tables.
444 * Try to sync it!
445 */
446 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
447 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
448 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVM)(pVM, Addr, fPageGst, uErr);
449 if (rc != VINF_SUCCESS)
450 return rc;
451 }
452 else
453 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %VGv failed with %Vrc\n", Addr, rc));
454
455#if 0 /* def VBOX_STRICT; triggers too often now */
456 /*
457 * This check is a bit paranoid, but useful.
458 */
459 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
460 uint64_t fPageShw;
461 rc = PGMShwGetPage(pVM, (RTGCPTR)Addr, &fPageShw, NULL);
462 if ( (rc == VERR_PAGE_NOT_PRESENT || VBOX_FAILURE(rc))
463 || (fWrite && !(fPageShw & X86_PTE_RW))
464 || (fUser && !(fPageShw & X86_PTE_US)) )
465 {
466 AssertMsgFailed(("Unexpected access violation for %VGv! rc=%Vrc write=%d user=%d\n",
467 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
468 return VINF_EM_RAW_GUEST_TRAP;
469 }
470#endif
471
472 if ( VBOX_SUCCESS(rc)
473 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
474 || Addr + cbSize < Addr))
475 {
476 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
477 for (;;)
478 {
479 Addr += PAGE_SIZE;
480 if (cbSize > PAGE_SIZE)
481 cbSize -= PAGE_SIZE;
482 else
483 cbSize = 1;
484 rc = PGMVerifyAccess(pVM, Addr, 1, fAccess);
485 if (rc != VINF_SUCCESS)
486 break;
487 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
488 break;
489 }
490 }
491 return rc;
492}
493
494
495#ifndef IN_GC
496/**
497 * Emulation of the invlpg instruction (HC only actually).
498 *
499 * @returns VBox status code.
500 * @param pVM VM handle.
501 * @param GCPtrPage Page to invalidate.
502 * @remark ASSUMES the page table entry or page directory is
503 * valid. Fairly safe, but there could be edge cases!
504 * @todo Flush page or page directory only if necessary!
505 */
506PGMDECL(int) PGMInvalidatePage(PVM pVM, RTGCPTR GCPtrPage)
507{
508 int rc;
509
510 LogFlow(("PGMInvalidatePage: GCPtrPage=%VGv\n", GCPtrPage));
511
512 /** @todo merge PGMGCInvalidatePage with this one */
513
514#ifndef IN_RING3
515 /*
516 * Notify the recompiler so it can record this instruction.
517 * Failure happens when it's out of space. We'll return to HC in that case.
518 */
519 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
520 if (VBOX_FAILURE(rc))
521 return rc;
522#endif
523
524 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
525 rc = PGM_BTH_PFN(InvalidatePage, pVM)(pVM, GCPtrPage);
526 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,InvalidatePage), a);
527
528#ifndef IN_RING0
529 /*
530 * Check if we have a pending update of the CR3 monitoring.
531 */
532 if ( VBOX_SUCCESS(rc)
533 && (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
534 {
535 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
536 Assert(!pVM->pgm.s.fMappingsFixed);
537 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
538 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
539 }
540#endif
541
542#ifdef IN_RING3
543 /*
544 * Inform CSAM about the flush
545 */
546 /** @note this is to check if monitored pages have been changed; when we implement callbacks for virtual handlers, this is no longer required. */
547 CSAMR3FlushPage(pVM, GCPtrPage);
548#endif
549 return rc;
550}
551#endif
552
553
554/**
555 * Executes an instruction using the interpreter.
556 *
557 * @returns VBox status code (appropriate for trap handling and GC return).
558 * @param pVM VM handle.
559 * @param pRegFrame Register frame.
560 * @param pvFault Fault address.
561 */
562PGMDECL(int) PGMInterpretInstruction(PVM pVM, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
563{
564 uint32_t cb;
565 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
566 if (rc == VERR_EM_INTERPRETER)
567 rc = VINF_EM_RAW_EMULATE_INSTR;
568 if (rc != VINF_SUCCESS)
569 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%VGv)\n", rc, pvFault));
570 return rc;
571}
572
573
574/**
575 * Gets effective page information (from the VMM page directory).
576 *
577 * @returns VBox status.
578 * @param pVM VM Handle.
579 * @param GCPtr Guest Context virtual address of the page.
580 * @param pfFlags Where to store the flags. These are X86_PTE_*.
581 * @param pHCPhys Where to store the HC physical address of the page.
582 * This is page aligned.
583 * @remark You should use PGMMapGetPage() for pages in a mapping.
584 */
585PGMDECL(int) PGMShwGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
586{
587 return PGM_SHW_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pHCPhys);
588}
589
590
591/**
592 * Sets (replaces) the page flags for a range of pages in the shadow context.
593 *
594 * @returns VBox status.
595 * @param pVM VM handle.
596 * @param GCPtr The address of the first page.
597 * @param cb The size of the range in bytes.
598 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
599 * @remark You must use PGMMapSetPage() for pages in a mapping.
600 */
601PGMDECL(int) PGMShwSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
602{
603 return PGMShwModifyPage(pVM, GCPtr, cb, fFlags, 0);
604}
605
606
607/**
608 * Modify page flags for a range of pages in the shadow context.
609 *
610 * The existing flags are ANDed with the fMask and ORed with the fFlags.
611 *
612 * @returns VBox status code.
613 * @param pVM VM handle.
614 * @param GCPtr Virtual address of the first page in the range.
615 * @param cb Size (in bytes) of the range to apply the modification to.
616 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
617 * @param fMask The AND mask - page flags X86_PTE_*.
618 * Be very CAREFUL when ~'ing constants which could be 32-bit!
619 * @remark You must use PGMMapModifyPage() for pages in a mapping.
620 */
621PGMDECL(int) PGMShwModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
622{
623 /*
624 * Validate input.
625 */
626 if (fFlags & X86_PTE_PAE_PG_MASK)
627 {
628 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
629 return VERR_INVALID_PARAMETER;
630 }
631 if (!cb)
632 {
633 AssertFailed();
634 return VERR_INVALID_PARAMETER;
635 }
636
637 /*
638 * Align the input.
639 */
640 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
641 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
642 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
643
644 /*
645 * Call worker.
646 */
647 return PGM_SHW_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
648}
649
650#ifndef IN_GC
651/**
652 * Gets the SHADOW page directory pointer for the specified address. Allocates
653 * backing pages in case the PDPT or page dirctory is missing.
654 *
655 * @returns VBox status.
656 * @param pVM VM handle.
657 * @param GCPtr The address.
658 * @param ppPD Receives address of page directory
659 */
660PGMDECL(int) PGMShwGetLongModePDPtr(PVM pVM, RTGCUINTPTR64 GCPtr, PX86PDPAE *ppPD)
661{
662 PPGM pPGM = &pVM->pgm.s;
663 const unsigned iPml4e = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
664 PPGMPOOL pPool = pPGM->CTXSUFF(pPool);
665 PX86PML4E pPml4e;
666 PPGMPOOLPAGE pShwPage;
667 int rc;
668
669 pPml4e = &pPGM->pHCPaePML4->a[iPml4e];
670 if ( !pPml4e->n.u1Present
671 && !(pPml4e->u & X86_PML4E_PG_MASK))
672 {
673 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
674
675 Assert(!(pPml4e->u & X86_PML4E_PG_MASK));
676 rc = pgmPoolAlloc(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, PGMPOOL_IDX_PML4, iPml4e, &pShwPage);
677 if (rc == VERR_PGM_POOL_FLUSHED)
678 return VINF_PGM_SYNC_CR3;
679
680 AssertRCReturn(rc, rc);
681
682 /* The PDPT was cached or created; hook it up now. */
683 pPml4e->u |= pShwPage->Core.Key;
684 }
685 else
686 {
687 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
688 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
689 }
690
691 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
692 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
693 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
694
695 if ( !pPdpe->n.u1Present
696 && !(pPdpe->u & X86_PDPE_PG_MASK))
697 {
698 PX86PML4E pPml4eGst = &pPGM->pGstPaePML4HC->a[iPml4e];
699 PX86PDPT pPdptGst;
700 rc = PGM_GCPHYS_2_PTR(pVM, pPml4eGst->u & X86_PML4E_PG_MASK, &pPdptGst);
701 AssertRCReturn(rc, rc);
702
703 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
704 rc = pgmPoolAlloc(pVM, pPdptGst->a[iPdPt].u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, PGMPOOL_IDX_PDPT, iPdPt, &pShwPage);
705 if (rc == VERR_PGM_POOL_FLUSHED)
706 return VINF_PGM_SYNC_CR3;
707
708 AssertRCReturn(rc, rc);
709
710 /* The PDPT was cached or created; hook it up now. */
711 pPdpe->u |= pShwPage->Core.Key;
712 }
713 else
714 {
715 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
716 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
717 }
718
719 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
720 return VINF_SUCCESS;
721}
722#endif
723
724/**
725 * Gets effective Guest OS page information.
726 *
727 * When GCPtr is in a big page, the function will return as if it was a normal
728 * 4KB page. If the need for distinguishing between big and normal page becomes
729 * necessary at a later point, a PGMGstGetPage() will be created for that
730 * purpose.
731 *
732 * @returns VBox status.
733 * @param pVM VM Handle.
734 * @param GCPtr Guest Context virtual address of the page.
735 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
736 * @param pGCPhys Where to store the GC physical address of the page.
737 * This is page aligned. The fact that the
738 */
739PGMDECL(int) PGMGstGetPage(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
740{
741 return PGM_GST_PFN(GetPage,pVM)(pVM, (RTGCUINTPTR)GCPtr, pfFlags, pGCPhys);
742}
743
744
745/**
746 * Checks if the page is present.
747 *
748 * @returns true if the page is present.
749 * @returns false if the page is not present.
750 * @param pVM The VM handle.
751 * @param GCPtr Address within the page.
752 */
753PGMDECL(bool) PGMGstIsPagePresent(PVM pVM, RTGCPTR GCPtr)
754{
755 int rc = PGMGstGetPage(pVM, GCPtr, NULL, NULL);
756 return VBOX_SUCCESS(rc);
757}
758
759
760/**
761 * Sets (replaces) the page flags for a range of pages in the guest's tables.
762 *
763 * @returns VBox status.
764 * @param pVM VM handle.
765 * @param GCPtr The address of the first page.
766 * @param cb The size of the range in bytes.
767 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
768 */
769PGMDECL(int) PGMGstSetPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
770{
771 return PGMGstModifyPage(pVM, GCPtr, cb, fFlags, 0);
772}
773
774
775/**
776 * Modify page flags for a range of pages in the guest's tables
777 *
778 * The existing flags are ANDed with the fMask and ORed with the fFlags.
779 *
780 * @returns VBox status code.
781 * @param pVM VM handle.
782 * @param GCPtr Virtual address of the first page in the range.
783 * @param cb Size (in bytes) of the range to apply the modification to.
784 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
785 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
786 * Be very CAREFUL when ~'ing constants which could be 32-bit!
787 */
788PGMDECL(int) PGMGstModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
789{
790 STAM_PROFILE_START(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
791
792 /*
793 * Validate input.
794 */
795 if (fFlags & X86_PTE_PAE_PG_MASK)
796 {
797 AssertMsgFailed(("fFlags=%#llx\n", fFlags));
798 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
799 return VERR_INVALID_PARAMETER;
800 }
801
802 if (!cb)
803 {
804 AssertFailed();
805 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
806 return VERR_INVALID_PARAMETER;
807 }
808
809 LogFlow(("PGMGstModifyPage %VGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
810
811 /*
812 * Adjust input.
813 */
814 cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
815 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
816 GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
817
818 /*
819 * Call worker.
820 */
821 int rc = PGM_GST_PFN(ModifyPage, pVM)(pVM, (RTGCUINTPTR)GCPtr, cb, fFlags, fMask);
822
823 STAM_PROFILE_STOP(&CTXMID(pVM->pgm.s.Stat,GstModifyPage), a);
824 return rc;
825}
826
827
828/**
829 * Gets the current CR3 register value for the shadow memory context.
830 * @returns CR3 value.
831 * @param pVM The VM handle.
832 */
833PGMDECL(uint32_t) PGMGetHyperCR3(PVM pVM)
834{
835 switch (pVM->pgm.s.enmShadowMode)
836 {
837 case PGMMODE_32_BIT:
838 return pVM->pgm.s.HCPhys32BitPD;
839
840 case PGMMODE_PAE:
841 case PGMMODE_PAE_NX:
842 return pVM->pgm.s.HCPhysPaePDPT;
843
844 case PGMMODE_AMD64:
845 case PGMMODE_AMD64_NX:
846 return pVM->pgm.s.HCPhysPaePML4;
847
848 default:
849 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
850 return ~0;
851 }
852}
853
854
855/**
856 * Gets the CR3 register value for the 32-Bit shadow memory context.
857 * @returns CR3 value.
858 * @param pVM The VM handle.
859 */
860PGMDECL(uint32_t) PGMGetHyper32BitCR3(PVM pVM)
861{
862 return pVM->pgm.s.HCPhys32BitPD;
863}
864
865
866/**
867 * Gets the CR3 register value for the PAE shadow memory context.
868 * @returns CR3 value.
869 * @param pVM The VM handle.
870 */
871PGMDECL(uint32_t) PGMGetHyperPaeCR3(PVM pVM)
872{
873 return pVM->pgm.s.HCPhysPaePDPT;
874}
875
876
877/**
878 * Gets the CR3 register value for the AMD64 shadow memory context.
879 * @returns CR3 value.
880 * @param pVM The VM handle.
881 */
882PGMDECL(uint32_t) PGMGetHyperAmd64CR3(PVM pVM)
883{
884 return pVM->pgm.s.HCPhysPaePML4;
885}
886
887
888/**
889 * Gets the current CR3 register value for the HC intermediate memory context.
890 * @returns CR3 value.
891 * @param pVM The VM handle.
892 */
893PGMDECL(uint32_t) PGMGetInterHCCR3(PVM pVM)
894{
895 switch (pVM->pgm.s.enmHostMode)
896 {
897 case SUPPAGINGMODE_32_BIT:
898 case SUPPAGINGMODE_32_BIT_GLOBAL:
899 return pVM->pgm.s.HCPhysInterPD;
900
901 case SUPPAGINGMODE_PAE:
902 case SUPPAGINGMODE_PAE_GLOBAL:
903 case SUPPAGINGMODE_PAE_NX:
904 case SUPPAGINGMODE_PAE_GLOBAL_NX:
905 return pVM->pgm.s.HCPhysInterPaePDPT;
906
907 case SUPPAGINGMODE_AMD64:
908 case SUPPAGINGMODE_AMD64_GLOBAL:
909 case SUPPAGINGMODE_AMD64_NX:
910 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
911 return pVM->pgm.s.HCPhysInterPaePDPT;
912
913 default:
914 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
915 return ~0;
916 }
917}
918
919
920/**
921 * Gets the current CR3 register value for the GC intermediate memory context.
922 * @returns CR3 value.
923 * @param pVM The VM handle.
924 */
925PGMDECL(uint32_t) PGMGetInterGCCR3(PVM pVM)
926{
927 switch (pVM->pgm.s.enmShadowMode)
928 {
929 case PGMMODE_32_BIT:
930 return pVM->pgm.s.HCPhysInterPD;
931
932 case PGMMODE_PAE:
933 case PGMMODE_PAE_NX:
934 return pVM->pgm.s.HCPhysInterPaePDPT;
935
936 case PGMMODE_AMD64:
937 case PGMMODE_AMD64_NX:
938 return pVM->pgm.s.HCPhysInterPaePML4;
939
940 default:
941 AssertMsgFailed(("enmShadowMode=%d\n", pVM->pgm.s.enmShadowMode));
942 return ~0;
943 }
944}
945
946
947/**
948 * Gets the CR3 register value for the 32-Bit intermediate memory context.
949 * @returns CR3 value.
950 * @param pVM The VM handle.
951 */
952PGMDECL(uint32_t) PGMGetInter32BitCR3(PVM pVM)
953{
954 return pVM->pgm.s.HCPhysInterPD;
955}
956
957
958/**
959 * Gets the CR3 register value for the PAE intermediate memory context.
960 * @returns CR3 value.
961 * @param pVM The VM handle.
962 */
963PGMDECL(uint32_t) PGMGetInterPaeCR3(PVM pVM)
964{
965 return pVM->pgm.s.HCPhysInterPaePDPT;
966}
967
968
969/**
970 * Gets the CR3 register value for the AMD64 intermediate memory context.
971 * @returns CR3 value.
972 * @param pVM The VM handle.
973 */
974PGMDECL(uint32_t) PGMGetInterAmd64CR3(PVM pVM)
975{
976 return pVM->pgm.s.HCPhysInterPaePML4;
977}
978
979
980/**
981 * Performs and schedules necessary updates following a CR3 load or reload.
982 *
983 * This will normally involve mapping the guest PD or nPDPT
984 *
985 * @returns VBox status code.
986 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
987 * safely be ignored and overridden since the FF will be set too then.
988 * @param pVM VM handle.
989 * @param cr3 The new cr3.
990 * @param fGlobal Indicates whether this is a global flush or not.
991 */
992PGMDECL(int) PGMFlushTLB(PVM pVM, uint64_t cr3, bool fGlobal)
993{
994 STAM_PROFILE_START(&pVM->pgm.s.StatFlushTLB, a);
995
996 /*
997 * Always flag the necessary updates; necessary for hardware acceleration
998 */
999 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1000 if (fGlobal)
1001 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1002 LogFlow(("PGMFlushTLB: cr3=%VX64 OldCr3=%VX64 fGlobal=%d\n", cr3, pVM->pgm.s.GCPhysCR3, fGlobal));
1003
1004 /*
1005 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1006 */
1007 int rc = VINF_SUCCESS;
1008 RTGCPHYS GCPhysCR3;
1009 if ( pVM->pgm.s.enmGuestMode == PGMMODE_PAE
1010 || pVM->pgm.s.enmGuestMode == PGMMODE_PAE_NX
1011 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64
1012 || pVM->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
1013 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1014 else
1015 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1016 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
1017 {
1018 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
1019 rc = PGM_GST_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
1020 if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1021 {
1022 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1023 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1024 }
1025 if (fGlobal)
1026 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3Global);
1027 else
1028 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBNewCR3);
1029 }
1030 else
1031 {
1032 /*
1033 * Check if we have a pending update of the CR3 monitoring.
1034 */
1035 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1036 {
1037 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1038 Assert(!pVM->pgm.s.fMappingsFixed);
1039 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
1040 }
1041 if (fGlobal)
1042 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3Global);
1043 else
1044 STAM_COUNTER_INC(&pVM->pgm.s.StatFlushTLBSameCR3);
1045 }
1046
1047 STAM_PROFILE_STOP(&pVM->pgm.s.StatFlushTLB, a);
1048 return rc;
1049}
1050
1051
1052/**
1053 * Synchronize the paging structures.
1054 *
1055 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1056 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1057 * in several places, most importantly whenever the CR3 is loaded.
1058 *
1059 * @returns VBox status code.
1060 * @param pVM The virtual machine.
1061 * @param cr0 Guest context CR0 register
1062 * @param cr3 Guest context CR3 register
1063 * @param cr4 Guest context CR4 register
1064 * @param fGlobal Including global page directories or not
1065 */
1066PGMDECL(int) PGMSyncCR3(PVM pVM, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1067{
1068 /*
1069 * We might be called when we shouldn't.
1070 *
1071 * The mode switching will ensure that the PD is resynced
1072 * after every mode switch. So, if we find ourselves here
1073 * when in protected or real mode we can safely disable the
1074 * FF and return immediately.
1075 */
1076 if (pVM->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1077 {
1078 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1079 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1080 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1081 return VINF_SUCCESS;
1082 }
1083
1084 /* If global pages are not supported, then all flushes are global */
1085 if (!(cr4 & X86_CR4_PGE))
1086 fGlobal = true;
1087 LogFlow(("PGMSyncCR3: cr0=%VX64 cr3=%VX64 cr4=%VX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1088 VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1089
1090 /*
1091 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1092 */
1093 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1094 int rc = PGM_BTH_PFN(SyncCR3, pVM)(pVM, cr0, cr3, cr4, fGlobal);
1095 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1096 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || VBOX_FAILURE(rc), ("rc=%VRc\n", rc));
1097 if (rc == VINF_SUCCESS)
1098 {
1099 if (!(pVM->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1100 {
1101 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3);
1102 VM_FF_CLEAR(pVM, VM_FF_PGM_SYNC_CR3_NON_GLOBAL);
1103 }
1104
1105 /*
1106 * Check if we have a pending update of the CR3 monitoring.
1107 */
1108 if (pVM->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1109 {
1110 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1111 Assert(!pVM->pgm.s.fMappingsFixed);
1112 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
1113 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
1114 }
1115 }
1116
1117 /*
1118 * Now flush the CR3 (guest context).
1119 */
1120 if (rc == VINF_SUCCESS)
1121 PGM_INVL_GUEST_TLBS();
1122 return rc;
1123}
1124
1125
1126/**
1127 * Called whenever CR0 or CR4 in a way which may change
1128 * the paging mode.
1129 *
1130 * @returns VBox status code fit for scheduling in GC and R0.
1131 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1132 * @retval VINF_PGM_CHANGE_MODE if we're in GC or R0 and the mode changes.
1133 * @param pVM VM handle.
1134 * @param cr0 The new cr0.
1135 * @param cr4 The new cr4.
1136 * @param efer The new extended feature enable register.
1137 */
1138PGMDECL(int) PGMChangeMode(PVM pVM, uint64_t cr0, uint64_t cr4, uint64_t efer)
1139{
1140 PGMMODE enmGuestMode;
1141
1142 /*
1143 * Calc the new guest mode.
1144 */
1145 if (!(cr0 & X86_CR0_PE))
1146 enmGuestMode = PGMMODE_REAL;
1147 else if (!(cr0 & X86_CR0_PG))
1148 enmGuestMode = PGMMODE_PROTECTED;
1149 else if (!(cr4 & X86_CR4_PAE))
1150 enmGuestMode = PGMMODE_32_BIT;
1151 else if (!(efer & MSR_K6_EFER_LME))
1152 {
1153 if (!(efer & MSR_K6_EFER_NXE))
1154 enmGuestMode = PGMMODE_PAE;
1155 else
1156 enmGuestMode = PGMMODE_PAE_NX;
1157 }
1158 else
1159 {
1160 if (!(efer & MSR_K6_EFER_NXE))
1161 enmGuestMode = PGMMODE_AMD64;
1162 else
1163 enmGuestMode = PGMMODE_AMD64_NX;
1164 }
1165
1166 /*
1167 * Did it change?
1168 */
1169 if (pVM->pgm.s.enmGuestMode == enmGuestMode)
1170 return VINF_SUCCESS;
1171#ifdef IN_RING3
1172 return pgmR3ChangeMode(pVM, enmGuestMode);
1173#else
1174 Log(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1175 return VINF_PGM_CHANGE_MODE;
1176#endif
1177}
1178
1179
1180/**
1181 * Gets the current guest paging mode.
1182 *
1183 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1184 *
1185 * @returns The current paging mode.
1186 * @param pVM The VM handle.
1187 */
1188PGMDECL(PGMMODE) PGMGetGuestMode(PVM pVM)
1189{
1190 return pVM->pgm.s.enmGuestMode;
1191}
1192
1193
1194/**
1195 * Gets the current shadow paging mode.
1196 *
1197 * @returns The current paging mode.
1198 * @param pVM The VM handle.
1199 */
1200PGMDECL(PGMMODE) PGMGetShadowMode(PVM pVM)
1201{
1202 return pVM->pgm.s.enmShadowMode;
1203}
1204
1205/**
1206 * Gets the current host paging mode.
1207 *
1208 * @returns The current paging mode.
1209 * @param pVM The VM handle.
1210 */
1211PGMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
1212{
1213 switch (pVM->pgm.s.enmHostMode)
1214 {
1215 case SUPPAGINGMODE_32_BIT:
1216 case SUPPAGINGMODE_32_BIT_GLOBAL:
1217 return PGMMODE_32_BIT;
1218
1219 case SUPPAGINGMODE_PAE:
1220 case SUPPAGINGMODE_PAE_GLOBAL:
1221 return PGMMODE_PAE;
1222
1223 case SUPPAGINGMODE_PAE_NX:
1224 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1225 return PGMMODE_PAE_NX;
1226
1227 case SUPPAGINGMODE_AMD64:
1228 case SUPPAGINGMODE_AMD64_GLOBAL:
1229 return PGMMODE_AMD64;
1230
1231 case SUPPAGINGMODE_AMD64_NX:
1232 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1233 return PGMMODE_AMD64_NX;
1234
1235 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
1236 }
1237
1238 return PGMMODE_INVALID;
1239}
1240
1241
1242/**
1243 * Get mode name.
1244 *
1245 * @returns read-only name string.
1246 * @param enmMode The mode which name is desired.
1247 */
1248PGMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
1249{
1250 switch (enmMode)
1251 {
1252 case PGMMODE_REAL: return "real";
1253 case PGMMODE_PROTECTED: return "protected";
1254 case PGMMODE_32_BIT: return "32-bit";
1255 case PGMMODE_PAE: return "PAE";
1256 case PGMMODE_PAE_NX: return "PAE+NX";
1257 case PGMMODE_AMD64: return "AMD64";
1258 case PGMMODE_AMD64_NX: return "AMD64+NX";
1259 default: return "unknown mode value";
1260 }
1261}
1262
1263
1264/**
1265 * Acquire the PGM lock.
1266 *
1267 * @returns VBox status code
1268 * @param pVM The VM to operate on.
1269 */
1270int pgmLock(PVM pVM)
1271{
1272 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
1273#ifdef IN_GC
1274 if (rc == VERR_SEM_BUSY)
1275 rc = VMMGCCallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1276#elif defined(IN_RING0)
1277 if (rc == VERR_SEM_BUSY)
1278 rc = VMMR0CallHost(pVM, VMMCALLHOST_PGM_LOCK, 0);
1279#endif
1280 AssertRC(rc);
1281 return rc;
1282}
1283
1284
1285/**
1286 * Release the PGM lock.
1287 *
1288 * @returns VBox status code
1289 * @param pVM The VM to operate on.
1290 */
1291void pgmUnlock(PVM pVM)
1292{
1293 PDMCritSectLeave(&pVM->pgm.s.CritSect);
1294}
1295
1296
1297#ifdef VBOX_STRICT
1298
1299/**
1300 * Asserts that there are no mapping conflicts.
1301 *
1302 * @returns Number of conflicts.
1303 * @param pVM The VM Handle.
1304 */
1305PGMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
1306{
1307 unsigned cErrors = 0;
1308
1309 /*
1310 * Check for mapping conflicts.
1311 */
1312 for (PPGMMAPPING pMapping = CTXALLSUFF(pVM->pgm.s.pMappings);
1313 pMapping;
1314 pMapping = CTXALLSUFF(pMapping->pNext))
1315 {
1316 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
1317 for (RTGCUINTPTR GCPtr = (RTGCUINTPTR)pMapping->GCPtr;
1318 GCPtr <= (RTGCUINTPTR)pMapping->GCPtrLast;
1319 GCPtr += PAGE_SIZE)
1320 {
1321 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, NULL, NULL);
1322 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
1323 {
1324 AssertMsgFailed(("Conflict at %VGv with %s\n", GCPtr, HCSTRING(pMapping->pszDesc)));
1325 cErrors++;
1326 break;
1327 }
1328 }
1329 }
1330
1331 return cErrors;
1332}
1333
1334
1335/**
1336 * Asserts that everything related to the guest CR3 is correctly shadowed.
1337 *
1338 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
1339 * and assert the correctness of the guest CR3 mapping before asserting that the
1340 * shadow page tables is in sync with the guest page tables.
1341 *
1342 * @returns Number of conflicts.
1343 * @param pVM The VM Handle.
1344 * @param cr3 The current guest CR3 register value.
1345 * @param cr4 The current guest CR4 register value.
1346 */
1347PGMDECL(unsigned) PGMAssertCR3(PVM pVM, uint64_t cr3, uint64_t cr4)
1348{
1349 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1350 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVM)(pVM, cr3, cr4, 0, ~(RTGCUINTPTR)0);
1351 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3), a);
1352 return cErrors;
1353}
1354
1355#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette