VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 57471

Last change on this file since 57471 was 57358, checked in by vboxsync, 9 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 23.2 KB
Line 
1/* $Id: PGMR0.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/rawpci.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/gmm.h>
26#include <VBox/vmm/gvm.h>
27#include "PGMInternal.h"
28#include <VBox/vmm/vm.h>
29#include "PGMInline.h"
30#include <VBox/log.h>
31#include <VBox/err.h>
32#include <iprt/assert.h>
33#include <iprt/mem.h>
34
35
36/*
37 * Instantiate the ring-0 header/code templates.
38 */
39#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
40#include "PGMR0Bth.h"
41#undef PGM_BTH_NAME
42
43#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
44#include "PGMR0Bth.h"
45#undef PGM_BTH_NAME
46
47#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
48#include "PGMR0Bth.h"
49#undef PGM_BTH_NAME
50
51#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
52#include "PGMR0Bth.h"
53#undef PGM_BTH_NAME
54
55
56/**
57 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
58 *
59 * @returns The following VBox status codes.
60 * @retval VINF_SUCCESS on success. FF cleared.
61 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
62 *
63 * @param pVM Pointer to the VM.
64 * @param pVCpu Pointer to the VMCPU.
65 *
66 * @remarks Must be called from within the PGM critical section. The caller
67 * must clear the new pages.
68 */
69VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
70{
71 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
72
73 /*
74 * Check for error injection.
75 */
76 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
77 return VERR_NO_MEMORY;
78
79 /*
80 * Try allocate a full set of handy pages.
81 */
82 uint32_t iFirst = pVM->pgm.s.cHandyPages;
83 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
84 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
85 if (!cPages)
86 return VINF_SUCCESS;
87 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
88 if (RT_SUCCESS(rc))
89 {
90#ifdef VBOX_STRICT
91 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
92 {
93 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
94 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
95 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
96 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
97 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
98 }
99#endif
100
101 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
102 }
103 else if (rc != VERR_GMM_SEED_ME)
104 {
105 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
106 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
107 && iFirst < PGM_HANDY_PAGES_MIN)
108 {
109
110#ifdef VBOX_STRICT
111 /* We're ASSUMING that GMM has updated all the entires before failing us. */
112 uint32_t i;
113 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
114 {
115 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
116 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
117 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
118 }
119#endif
120
121 /*
122 * Reduce the number of pages until we hit the minimum limit.
123 */
124 do
125 {
126 cPages >>= 1;
127 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
128 cPages = PGM_HANDY_PAGES_MIN - iFirst;
129 rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
130 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
131 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
132 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
133 if (RT_SUCCESS(rc))
134 {
135#ifdef VBOX_STRICT
136 i = iFirst + cPages;
137 while (i-- > 0)
138 {
139 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
140 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
141 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
142 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
143 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
144 }
145
146 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
147 {
148 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
150 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
151 }
152#endif
153
154 pVM->pgm.s.cHandyPages = iFirst + cPages;
155 }
156 }
157
158 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
159 {
160 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
161 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
162 }
163 }
164
165
166 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
167 return rc;
168}
169
170
171/**
172 * Flushes any changes pending in the handy page array.
173 *
174 * It is very important that this gets done when page sharing is enabled.
175 *
176 * @returns The following VBox status codes.
177 * @retval VINF_SUCCESS on success. FF cleared.
178 *
179 * @param pVM Pointer to the VM.
180 * @param pVCpu Pointer to the VMCPU.
181 *
182 * @remarks Must be called from within the PGM critical section.
183 */
184VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PVM pVM, PVMCPU pVCpu)
185{
186 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
187
188 /*
189 * Try allocate a full set of handy pages.
190 */
191 uint32_t iFirst = pVM->pgm.s.cHandyPages;
192 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
193 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
194 if (!cPages)
195 return VINF_SUCCESS;
196 int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);
197
198 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
199 return rc;
200}
201
202
203/**
204 * Worker function for PGMR3PhysAllocateLargeHandyPage
205 *
206 * @returns The following VBox status codes.
207 * @retval VINF_SUCCESS on success.
208 * @retval VINF_EM_NO_MEMORY if we're out of memory.
209 *
210 * @param pVM Pointer to the VM.
211 * @param pVCpu Pointer to the VMCPU.
212 *
213 * @remarks Must be called from within the PGM critical section. The caller
214 * must clear the new pages.
215 */
216VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
217{
218 PGM_LOCK_ASSERT_OWNER_EX(pVM, pVCpu);
219 Assert(!pVM->pgm.s.cLargeHandyPages);
220
221 int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M,
222 &pVM->pgm.s.aLargeHandyPage[0].idPage,
223 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
224 if (RT_SUCCESS(rc))
225 pVM->pgm.s.cLargeHandyPages = 1;
226
227 return rc;
228}
229
230
231#ifdef VBOX_WITH_PCI_PASSTHROUGH
232/* Interface sketch. The interface belongs to a global PCI pass-through
233 manager. It shall use the global VM handle, not the user VM handle to
234 store the per-VM info (domain) since that is all ring-0 stuff, thus
235 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
236 we can discuss the PciRaw code re-organtization when I'm back from
237 vacation.
238
239 I've implemented the initial IOMMU set up below. For things to work
240 reliably, we will probably need add a whole bunch of checks and
241 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
242 assuming nested paging (enforced) and prealloc (enforced), no
243 ballooning (check missing), page sharing (check missing) or live
244 migration (check missing), it might work fine. At least if some
245 VM power-off hook is present and can tear down the IOMMU page tables. */
246
247/**
248 * Tells the global PCI pass-through manager that we are about to set up the
249 * guest page to host page mappings for the specfied VM.
250 *
251 * @returns VBox status code.
252 *
253 * @param pGVM The ring-0 VM structure.
254 */
255VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
256{
257 NOREF(pGVM);
258 return VINF_SUCCESS;
259}
260
261
262/**
263 * Assigns a host page mapping for a guest page.
264 *
265 * This is only used when setting up the mappings, i.e. between
266 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
267 *
268 * @returns VBox status code.
269 * @param pGVM The ring-0 VM structure.
270 * @param GCPhys The address of the guest page (page aligned).
271 * @param HCPhys The address of the host page (page aligned).
272 */
273VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
274{
275 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
276 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
277
278 if (pGVM->rawpci.s.pfnContigMemInfo)
279 /** @todo: what do we do on failure? */
280 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
281
282 return VINF_SUCCESS;
283}
284
285
286/**
287 * Indicates that the specified guest page doesn't exists but doesn't have host
288 * page mapping we trust PCI pass-through with.
289 *
290 * This is only used when setting up the mappings, i.e. between
291 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
292 *
293 * @returns VBox status code.
294 * @param pGVM The ring-0 VM structure.
295 * @param GCPhys The address of the guest page (page aligned).
296 * @param HCPhys The address of the host page (page aligned).
297 */
298VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
299{
300 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
301
302 if (pGVM->rawpci.s.pfnContigMemInfo)
303 /** @todo: what do we do on failure? */
304 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
305
306 return VINF_SUCCESS;
307}
308
309
310/**
311 * Tells the global PCI pass-through manager that we have completed setting up
312 * the guest page to host page mappings for the specfied VM.
313 *
314 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
315 * if some page assignment failed.
316 *
317 * @returns VBox status code.
318 *
319 * @param pGVM The ring-0 VM structure.
320 */
321VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
322{
323 NOREF(pGVM);
324 return VINF_SUCCESS;
325}
326
327
328/**
329 * Tells the global PCI pass-through manager that a guest page mapping has
330 * changed after the initial setup.
331 *
332 * @returns VBox status code.
333 * @param pGVM The ring-0 VM structure.
334 * @param GCPhys The address of the guest page (page aligned).
335 * @param HCPhys The new host page address or NIL_RTHCPHYS if
336 * now unassigned.
337 */
338VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
339{
340 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
341 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
342 NOREF(pGVM);
343 return VINF_SUCCESS;
344}
345
346#endif /* VBOX_WITH_PCI_PASSTHROUGH */
347
348
349/**
350 * Sets up the IOMMU when raw PCI device is enabled.
351 *
352 * @note This is a hack that will probably be remodelled and refined later!
353 *
354 * @returns VBox status code.
355 *
356 * @param pVM Pointer to the VM.
357 */
358VMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
359{
360 PGVM pGVM;
361 int rc = GVMMR0ByVM(pVM, &pGVM);
362 if (RT_FAILURE(rc))
363 return rc;
364
365#ifdef VBOX_WITH_PCI_PASSTHROUGH
366 if (pVM->pgm.s.fPciPassthrough)
367 {
368 /*
369 * The Simplistic Approach - Enumerate all the pages and call tell the
370 * IOMMU about each of them.
371 */
372 pgmLock(pVM);
373 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
374 if (RT_SUCCESS(rc))
375 {
376 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
377 {
378 PPGMPAGE pPage = &pRam->aPages[0];
379 RTGCPHYS GCPhys = pRam->GCPhys;
380 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
381 while (cLeft-- > 0)
382 {
383 /* Only expose pages that are 100% safe for now. */
384 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
385 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
386 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
387 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
388 else
389 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
390
391 /* next */
392 pPage++;
393 GCPhys += PAGE_SIZE;
394 }
395 }
396
397 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
398 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
399 rc = rc2;
400 }
401 pgmUnlock(pVM);
402 }
403 else
404#endif
405 rc = VERR_NOT_SUPPORTED;
406 return rc;
407}
408
409
410/**
411 * #PF Handler for nested paging.
412 *
413 * @returns VBox status code (appropriate for trap handling and GC return).
414 * @param pVM Pointer to the VM.
415 * @param pVCpu Pointer to the VMCPU.
416 * @param enmShwPagingMode Paging mode for the nested page tables.
417 * @param uErr The trap error code.
418 * @param pRegFrame Trap register frame.
419 * @param GCPhysFault The fault address.
420 */
421VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
422 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
423{
424 int rc;
425
426 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
427 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
428 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
429
430 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
431 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
432 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
433 ("enmShwPagingMode=%d\n", enmShwPagingMode));
434
435 /* Reserved shouldn't end up here. */
436 Assert(!(uErr & X86_TRAP_PF_RSVD));
437
438#ifdef VBOX_WITH_STATISTICS
439 /*
440 * Error code stats.
441 */
442 if (uErr & X86_TRAP_PF_US)
443 {
444 if (!(uErr & X86_TRAP_PF_P))
445 {
446 if (uErr & X86_TRAP_PF_RW)
447 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
448 else
449 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
450 }
451 else if (uErr & X86_TRAP_PF_RW)
452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
453 else if (uErr & X86_TRAP_PF_RSVD)
454 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
455 else if (uErr & X86_TRAP_PF_ID)
456 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
457 else
458 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
459 }
460 else
461 { /* Supervisor */
462 if (!(uErr & X86_TRAP_PF_P))
463 {
464 if (uErr & X86_TRAP_PF_RW)
465 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
466 else
467 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
468 }
469 else if (uErr & X86_TRAP_PF_RW)
470 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
471 else if (uErr & X86_TRAP_PF_ID)
472 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
473 else if (uErr & X86_TRAP_PF_RSVD)
474 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
475 }
476#endif
477
478 /*
479 * Call the worker.
480 *
481 * Note! We pretend the guest is in protected mode without paging, so we
482 * can use existing code to build the nested page tables.
483 */
484 bool fLockTaken = false;
485 switch (enmShwPagingMode)
486 {
487 case PGMMODE_32_BIT:
488 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
489 break;
490 case PGMMODE_PAE:
491 case PGMMODE_PAE_NX:
492 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
493 break;
494 case PGMMODE_AMD64:
495 case PGMMODE_AMD64_NX:
496 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
497 break;
498 case PGMMODE_EPT:
499 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
500 break;
501 default:
502 AssertFailed();
503 rc = VERR_INVALID_PARAMETER;
504 break;
505 }
506 if (fLockTaken)
507 {
508 PGM_LOCK_ASSERT_OWNER(pVM);
509 pgmUnlock(pVM);
510 }
511
512 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
513 rc = VINF_SUCCESS;
514 /*
515 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
516 * via its page tables, see @bugref{6043}.
517 */
518 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
519 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
520 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
521 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
522 {
523 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
524 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
525 single VCPU VMs though. */
526 rc = VINF_SUCCESS;
527 }
528
529 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
530 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
531 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
532 return rc;
533}
534
535
536/**
537 * #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
538 * employed for MMIO pages.
539 *
540 * @returns VBox status code (appropriate for trap handling and GC return).
541 * @param pVM Pointer to the VM.
542 * @param pVCpu Pointer to the VMCPU.
543 * @param enmShwPagingMode Paging mode for the nested page tables.
544 * @param pRegFrame Trap register frame.
545 * @param GCPhysFault The fault address.
546 * @param uErr The error code, UINT32_MAX if not available
547 * (VT-x).
548 */
549VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
550 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
551{
552#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
553 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
554 VBOXSTRICTRC rc;
555
556 /*
557 * Try lookup the all access physical handler for the address.
558 */
559 pgmLock(pVM);
560 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
561 PPGMPHYSHANDLERTYPEINT pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pVM, pHandler) : NULL;
562 if (RT_LIKELY(pHandler && pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE))
563 {
564 /*
565 * If the handle has aliases page or pages that have been temporarily
566 * disabled, we'll have to take a detour to make sure we resync them
567 * to avoid lots of unnecessary exits.
568 */
569 PPGMPAGE pPage;
570 if ( ( pHandler->cAliasedPages
571 || pHandler->cTmpOffPages)
572 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
573 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
574 )
575 {
576 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
577 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
578 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
579 pgmUnlock(pVM);
580 }
581 else
582 {
583 if (pHandlerType->CTX_SUFF(pfnPfHandler))
584 {
585 void *pvUser = pHandler->CTX_SUFF(pvUser);
586 STAM_PROFILE_START(&pHandler->Stat, h);
587 pgmUnlock(pVM);
588
589 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->CTX_SUFF(pfnPfHandler), uErr, GCPhysFault, pvUser));
590 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
591 GCPhysFault, GCPhysFault, pvUser);
592
593#ifdef VBOX_WITH_STATISTICS
594 pgmLock(pVM);
595 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
596 if (pHandler)
597 STAM_PROFILE_STOP(&pHandler->Stat, h);
598 pgmUnlock(pVM);
599#endif
600 }
601 else
602 {
603 pgmUnlock(pVM);
604 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
605 rc = VINF_EM_RAW_EMULATE_INSTR;
606 }
607 }
608 }
609 else
610 {
611 /*
612 * Must be out of sync, so do a SyncPage and restart the instruction.
613 *
614 * ASSUMES that ALL handlers are page aligned and covers whole pages
615 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
616 */
617 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
618 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
619 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
620 pgmUnlock(pVM);
621 }
622
623 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
624 return rc;
625
626#else
627 AssertLogRelFailed();
628 return VERR_PGM_NOT_USED_IN_MODE;
629#endif
630}
631
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette