VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 41741

Last change on this file since 41741 was 39402, checked in by vboxsync, 13 years ago

VMM: don't use generic IPE status codes, use specific ones. Part 1.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 21.8 KB
Line 
1/* $Id: PGMR0.cpp 39402 2011-11-23 16:25:04Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2011 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM
22#include <VBox/rawpci.h>
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/gmm.h>
25#include <VBox/vmm/gvm.h>
26#include "PGMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "PGMInline.h"
29#include <VBox/log.h>
30#include <VBox/err.h>
31#include <iprt/assert.h>
32#include <iprt/mem.h>
33
34
35/*
36 * Instantiate the ring-0 header/code templates.
37 */
38#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
39#include "PGMR0Bth.h"
40#undef PGM_BTH_NAME
41
42#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
43#include "PGMR0Bth.h"
44#undef PGM_BTH_NAME
45
46#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
47#include "PGMR0Bth.h"
48#undef PGM_BTH_NAME
49
50#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
51#include "PGMR0Bth.h"
52#undef PGM_BTH_NAME
53
54
55/**
56 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
57 *
58 * @returns The following VBox status codes.
59 * @retval VINF_SUCCESS on success. FF cleared.
60 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
61 *
62 * @param pVM The VM handle.
63 * @param pVCpu The VMCPU handle.
64 *
65 * @remarks Must be called from within the PGM critical section. The caller
66 * must clear the new pages.
67 */
68VMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
69{
70 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
71
72 /*
73 * Check for error injection.
74 */
75 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
76 return VERR_NO_MEMORY;
77
78 /*
79 * Try allocate a full set of handy pages.
80 */
81 uint32_t iFirst = pVM->pgm.s.cHandyPages;
82 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
83 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
84 if (!cPages)
85 return VINF_SUCCESS;
86 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
87 if (RT_SUCCESS(rc))
88 {
89#ifdef VBOX_STRICT
90 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
91 {
92 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
93 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
94 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
95 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
96 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
97 }
98#endif
99
100 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
101 }
102 else if (rc != VERR_GMM_SEED_ME)
103 {
104 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
105 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
106 && iFirst < PGM_HANDY_PAGES_MIN)
107 {
108
109#ifdef VBOX_STRICT
110 /* We're ASSUMING that GMM has updated all the entires before failing us. */
111 uint32_t i;
112 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
113 {
114 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
115 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
116 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
117 }
118#endif
119
120 /*
121 * Reduce the number of pages until we hit the minimum limit.
122 */
123 do
124 {
125 cPages >>= 1;
126 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
127 cPages = PGM_HANDY_PAGES_MIN - iFirst;
128 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
129 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
130 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
131 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
132 if (RT_SUCCESS(rc))
133 {
134#ifdef VBOX_STRICT
135 i = iFirst + cPages;
136 while (i-- > 0)
137 {
138 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
139 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
140 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
141 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
142 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
143 }
144
145 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
146 {
147 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
148 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
150 }
151#endif
152
153 pVM->pgm.s.cHandyPages = iFirst + cPages;
154 }
155 }
156
157 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
158 {
159 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
160 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
161 }
162 }
163
164
165 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
166 return rc;
167}
168
169
170/**
171 * Worker function for PGMR3PhysAllocateLargeHandyPage
172 *
173 * @returns The following VBox status codes.
174 * @retval VINF_SUCCESS on success.
175 * @retval VINF_EM_NO_MEMORY if we're out of memory.
176 *
177 * @param pVM The VM handle.
178 * @param pVCpu The VMCPU handle.
179 *
180 * @remarks Must be called from within the PGM critical section. The caller
181 * must clear the new pages.
182 */
183VMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
184{
185 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
186 Assert(!pVM->pgm.s.cLargeHandyPages);
187
188 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
189 &pVM->pgm.s.aLargeHandyPage[0].idPage,
190 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
191 if (RT_SUCCESS(rc))
192 pVM->pgm.s.cLargeHandyPages = 1;
193
194 return rc;
195}
196
197
198#ifdef VBOX_WITH_PCI_PASSTHROUGH
199/* Interface sketch. The interface belongs to a global PCI pass-through
200 manager. It shall use the global VM handle, not the user VM handle to
201 store the per-VM info (domain) since that is all ring-0 stuff, thus
202 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
203 we can discuss the PciRaw code re-organtization when I'm back from
204 vacation.
205
206 I've implemented the initial IOMMU set up below. For things to work
207 reliably, we will probably need add a whole bunch of checks and
208 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
209 assuming nested paging (enforced) and prealloc (enforced), no
210 ballooning (check missing), page sharing (check missing) or live
211 migration (check missing), it might work fine. At least if some
212 VM power-off hook is present and can tear down the IOMMU page tables. */
213
214/**
215 * Tells the global PCI pass-through manager that we are about to set up the
216 * guest page to host page mappings for the specfied VM.
217 *
218 * @returns VBox status code.
219 *
220 * @param pGVM The ring-0 VM structure.
221 */
222VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
223{
224 NOREF(pGVM);
225 return VINF_SUCCESS;
226}
227
228
229/**
230 * Assigns a host page mapping for a guest page.
231 *
232 * This is only used when setting up the mappings, i.e. between
233 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
234 *
235 * @returns VBox status code.
236 * @param pGVM The ring-0 VM structure.
237 * @param GCPhys The address of the guest page (page aligned).
238 * @param HCPhys The address of the host page (page aligned).
239 */
240VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
241{
242 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
243 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
244
245 if (pGVM->rawpci.s.pfnContigMemInfo)
246 /** @todo: what do we do on failure? */
247 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
248
249 return VINF_SUCCESS;
250}
251
252
253/**
254 * Indicates that the specified guest page doesn't exists but doesn't have host
255 * page mapping we trust PCI pass-through with.
256 *
257 * This is only used when setting up the mappings, i.e. between
258 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
259 *
260 * @returns VBox status code.
261 * @param pGVM The ring-0 VM structure.
262 * @param GCPhys The address of the guest page (page aligned).
263 * @param HCPhys The address of the host page (page aligned).
264 */
265VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
266{
267 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
268
269 if (pGVM->rawpci.s.pfnContigMemInfo)
270 /** @todo: what do we do on failure? */
271 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
272
273 return VINF_SUCCESS;
274}
275
276
277/**
278 * Tells the global PCI pass-through manager that we have completed setting up
279 * the guest page to host page mappings for the specfied VM.
280 *
281 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
282 * if some page assignment failed.
283 *
284 * @returns VBox status code.
285 *
286 * @param pGVM The ring-0 VM structure.
287 */
288VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
289{
290 NOREF(pGVM);
291 return VINF_SUCCESS;
292}
293
294
295/**
296 * Tells the global PCI pass-through manager that a guest page mapping has
297 * changed after the initial setup.
298 *
299 * @returns VBox status code.
300 * @param pGVM The ring-0 VM structure.
301 * @param GCPhys The address of the guest page (page aligned).
302 * @param HCPhys The new host page address or NIL_RTHCPHYS if
303 * now unassigned.
304 */
305VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
306{
307 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
308 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
309 NOREF(pGVM);
310 return VINF_SUCCESS;
311}
312
313#endif /* VBOX_WITH_PCI_PASSTHROUGH */
314
315
316/**
317 * Sets up the IOMMU when raw PCI device is enabled.
318 *
319 * @note This is a hack that will probably be remodelled and refined later!
320 *
321 * @returns VBox status code.
322 *
323 * @param pVM The VM handle.
324 */
325VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
326{
327 PGVM pGVM;
328 int rc = GVMMR0ByVM(pVM, &pGVM);
329 if (RT_FAILURE(rc))
330 return rc;
331
332#ifdef VBOX_WITH_PCI_PASSTHROUGH
333 if (pVM->pgm.s.fPciPassthrough)
334 {
335 /*
336 * The Simplistic Approach - Enumerate all the pages and call tell the
337 * IOMMU about each of them.
338 */
339 pgmLock(pVM);
340 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
341 if (RT_SUCCESS(rc))
342 {
343 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
344 {
345 PPGMPAGE pPage = &pRam->aPages[0];
346 RTGCPHYS GCPhys = pRam->GCPhys;
347 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
348 while (cLeft-- > 0)
349 {
350 /* Only expose pages that are 100% safe for now. */
351 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
352 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
353 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
354 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
355 else
356 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
357
358 /* next */
359 pPage++;
360 GCPhys += PAGE_SIZE;
361 }
362 }
363
364 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
365 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
366 rc = rc2;
367 }
368 pgmUnlock(pVM);
369 }
370 else
371#endif
372 rc = VERR_NOT_SUPPORTED;
373 return rc;
374}
375
376
377/**
378 * #PF Handler for nested paging.
379 *
380 * @returns VBox status code (appropriate for trap handling and GC return).
381 * @param pVM VM Handle.
382 * @param pVCpu VMCPU Handle.
383 * @param enmShwPagingMode Paging mode for the nested page tables.
384 * @param uErr The trap error code.
385 * @param pRegFrame Trap register frame.
386 * @param GCPhysFault The fault address.
387 */
388VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
389 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
390{
391 int rc;
392
393 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
394 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
395 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
396
397 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
398 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
399 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
400 ("enmShwPagingMode=%d\n", enmShwPagingMode));
401
402 /* Reserved shouldn't end up here. */
403 Assert(!(uErr & X86_TRAP_PF_RSVD));
404
405#ifdef VBOX_WITH_STATISTICS
406 /*
407 * Error code stats.
408 */
409 if (uErr & X86_TRAP_PF_US)
410 {
411 if (!(uErr & X86_TRAP_PF_P))
412 {
413 if (uErr & X86_TRAP_PF_RW)
414 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
415 else
416 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
417 }
418 else if (uErr & X86_TRAP_PF_RW)
419 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
420 else if (uErr & X86_TRAP_PF_RSVD)
421 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
422 else if (uErr & X86_TRAP_PF_ID)
423 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
424 else
425 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
426 }
427 else
428 { /* Supervisor */
429 if (!(uErr & X86_TRAP_PF_P))
430 {
431 if (uErr & X86_TRAP_PF_RW)
432 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
433 else
434 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
435 }
436 else if (uErr & X86_TRAP_PF_RW)
437 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
438 else if (uErr & X86_TRAP_PF_ID)
439 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
440 else if (uErr & X86_TRAP_PF_RSVD)
441 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
442 }
443#endif
444
445 /*
446 * Call the worker.
447 *
448 * Note! We pretend the guest is in protected mode without paging, so we
449 * can use existing code to build the nested page tables.
450 */
451 bool fLockTaken = false;
452 switch(enmShwPagingMode)
453 {
454 case PGMMODE_32_BIT:
455 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
456 break;
457 case PGMMODE_PAE:
458 case PGMMODE_PAE_NX:
459 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
460 break;
461 case PGMMODE_AMD64:
462 case PGMMODE_AMD64_NX:
463 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
464 break;
465 case PGMMODE_EPT:
466 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
467 break;
468 default:
469 AssertFailed();
470 rc = VERR_INVALID_PARAMETER;
471 break;
472 }
473 if (fLockTaken)
474 {
475 PGM_LOCK_ASSERT_OWNER(pVM);
476 pgmUnlock(pVM);
477 }
478
479 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
480 rc = VINF_SUCCESS;
481 /* Note: hack alert for difficult to reproduce problem. */
482 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
483 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
484 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
485 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
486 {
487 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
488 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
489 single VCPU VMs though. */
490 rc = VINF_SUCCESS;
491 }
492
493 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
494 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
495 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
496 return rc;
497}
498
499
500/**
501 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
502 * employed for MMIO pages.
503 *
504 * @returns VBox status code (appropriate for trap handling and GC return).
505 * @param pVM The VM Handle.
506 * @param pVCpu The current CPU.
507 * @param enmShwPagingMode Paging mode for the nested page tables.
508 * @param pRegFrame Trap register frame.
509 * @param GCPhysFault The fault address.
510 * @param uErr The error code, UINT32_MAX if not available
511 * (VT-x).
512 */
513VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
514 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
515{
516#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
517 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
518 VBOXSTRICTRC rc;
519
520 /*
521 * Try lookup the all access physical handler for the address.
522 */
523 pgmLock(pVM);
524 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
525 if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
526 {
527 /*
528 * If the handle has aliases page or pages that have been temporarily
529 * disabled, we'll have to take a detour to make sure we resync them
530 * to avoid lots of unnecessary exits.
531 */
532 PPGMPAGE pPage;
533 if ( ( pHandler->cAliasedPages
534 || pHandler->cTmpOffPages)
535 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
536 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
537 )
538 {
539 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
540 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
541 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
542 pgmUnlock(pVM);
543 }
544 else
545 {
546 if (pHandler->CTX_SUFF(pfnHandler))
547 {
548 CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
549 void *pvUser = pHandler->CTX_SUFF(pvUser);
550 STAM_PROFILE_START(&pHandler->Stat, h);
551 pgmUnlock(pVM);
552
553 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
554 rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
555
556#ifdef VBOX_WITH_STATISTICS
557 pgmLock(pVM);
558 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
559 if (pHandler)
560 STAM_PROFILE_STOP(&pHandler->Stat, h);
561 pgmUnlock(pVM);
562#endif
563 }
564 else
565 {
566 pgmUnlock(pVM);
567 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
568 rc = VINF_EM_RAW_EMULATE_INSTR;
569 }
570 }
571 }
572 else
573 {
574 /*
575 * Must be out of sync, so do a SyncPage and restart the instruction.
576 *
577 * ASSUMES that ALL handlers are page aligned and covers whole pages
578 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
579 */
580 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
581 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
582 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
583 pgmUnlock(pVM);
584 }
585
586 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
587 return rc;
588
589#else
590 AssertLogRelFailed();
591 return VERR_PGM_NOT_USED_IN_MODE;
592#endif
593}
594
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette