VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 69713

Last change on this file since 69713 was 69111, checked in by vboxsync, 7 years ago

(C) year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.5 KB
Line 
1/* $Id: PGMR0.cpp 69111 2017-10-17 14:26:02Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/rawpci.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/gmm.h>
26#include <VBox/vmm/gvm.h>
27#include "PGMInternal.h"
28#include <VBox/vmm/vm.h>
29#include "PGMInline.h"
30#include <VBox/log.h>
31#include <VBox/err.h>
32#include <iprt/assert.h>
33#include <iprt/mem.h>
34
35
36/*
37 * Instantiate the ring-0 header/code templates.
38 */
39#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
40#include "PGMR0Bth.h"
41#undef PGM_BTH_NAME
42
43#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
44#include "PGMR0Bth.h"
45#undef PGM_BTH_NAME
46
47#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
48#include "PGMR0Bth.h"
49#undef PGM_BTH_NAME
50
51#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
52#include "PGMR0Bth.h"
53#undef PGM_BTH_NAME
54
55
56/**
57 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
58 *
59 * @returns The following VBox status codes.
60 * @retval VINF_SUCCESS on success. FF cleared.
61 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
62 *
63 * @param pGVM The global (ring-0) VM structure.
64 * @param pVM The cross context VM structure.
65 * @param idCpu The ID of the calling EMT.
66 *
67 * @thread EMT(idCpu)
68 *
69 * @remarks Must be called from within the PGM critical section. The caller
70 * must clear the new pages.
71 */
72VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
73{
74 /*
75 * Validate inputs.
76 */
77 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
78 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
79 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
80
81 /*
82 * Check for error injection.
83 */
84 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
85 return VERR_NO_MEMORY;
86
87 /*
88 * Try allocate a full set of handy pages.
89 */
90 uint32_t iFirst = pVM->pgm.s.cHandyPages;
91 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
92 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
93 if (!cPages)
94 return VINF_SUCCESS;
95 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
96 if (RT_SUCCESS(rc))
97 {
98#ifdef VBOX_STRICT
99 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
100 {
101 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
102 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
103 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
104 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
105 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
106 }
107#endif
108
109 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
110 }
111 else if (rc != VERR_GMM_SEED_ME)
112 {
113 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
114 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
115 && iFirst < PGM_HANDY_PAGES_MIN)
116 {
117
118#ifdef VBOX_STRICT
119 /* We're ASSUMING that GMM has updated all the entires before failing us. */
120 uint32_t i;
121 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
122 {
123 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
124 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
125 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
126 }
127#endif
128
129 /*
130 * Reduce the number of pages until we hit the minimum limit.
131 */
132 do
133 {
134 cPages >>= 1;
135 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
136 cPages = PGM_HANDY_PAGES_MIN - iFirst;
137 rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
138 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
139 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
140 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
141 if (RT_SUCCESS(rc))
142 {
143#ifdef VBOX_STRICT
144 i = iFirst + cPages;
145 while (i-- > 0)
146 {
147 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
148 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
149 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
150 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
151 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
152 }
153
154 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
155 {
156 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
157 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
158 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
159 }
160#endif
161
162 pVM->pgm.s.cHandyPages = iFirst + cPages;
163 }
164 }
165
166 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
167 {
168 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
169 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
170 }
171 }
172
173
174 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
175 return rc;
176}
177
178
179/**
180 * Flushes any changes pending in the handy page array.
181 *
182 * It is very important that this gets done when page sharing is enabled.
183 *
184 * @returns The following VBox status codes.
185 * @retval VINF_SUCCESS on success. FF cleared.
186 *
187 * @param pGVM The global (ring-0) VM structure.
188 * @param pVM The cross context VM structure.
189 * @param idCpu The ID of the calling EMT.
190 *
191 * @thread EMT(idCpu)
192 *
193 * @remarks Must be called from within the PGM critical section.
194 */
195VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
196{
197 /*
198 * Validate inputs.
199 */
200 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
201 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
202 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
203
204 /*
205 * Try allocate a full set of handy pages.
206 */
207 uint32_t iFirst = pVM->pgm.s.cHandyPages;
208 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
209 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
210 if (!cPages)
211 return VINF_SUCCESS;
212 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);
213
214 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
215 return rc;
216}
217
218
219/**
220 * Worker function for PGMR3PhysAllocateLargeHandyPage
221 *
222 * @returns The following VBox status codes.
223 * @retval VINF_SUCCESS on success.
224 * @retval VINF_EM_NO_MEMORY if we're out of memory.
225 *
226 * @param pGVM The global (ring-0) VM structure.
227 * @param pVM The cross context VM structure.
228 * @param idCpu The ID of the calling EMT.
229 *
230 * @thread EMT(idCpu)
231 *
232 * @remarks Must be called from within the PGM critical section. The caller
233 * must clear the new pages.
234 */
235VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, PVM pVM, VMCPUID idCpu)
236{
237 /*
238 * Validate inputs.
239 */
240 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
241 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
242 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
243 Assert(!pVM->pgm.s.cLargeHandyPages);
244
245 /*
246 * Do the job.
247 */
248 int rc = GMMR0AllocateLargePage(pGVM, pVM, idCpu, _2M,
249 &pVM->pgm.s.aLargeHandyPage[0].idPage,
250 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
251 if (RT_SUCCESS(rc))
252 pVM->pgm.s.cLargeHandyPages = 1;
253
254 return rc;
255}
256
257
258#ifdef VBOX_WITH_PCI_PASSTHROUGH
259/* Interface sketch. The interface belongs to a global PCI pass-through
260 manager. It shall use the global VM handle, not the user VM handle to
261 store the per-VM info (domain) since that is all ring-0 stuff, thus
262 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
263 we can discuss the PciRaw code re-organtization when I'm back from
264 vacation.
265
266 I've implemented the initial IOMMU set up below. For things to work
267 reliably, we will probably need add a whole bunch of checks and
268 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
269 assuming nested paging (enforced) and prealloc (enforced), no
270 ballooning (check missing), page sharing (check missing) or live
271 migration (check missing), it might work fine. At least if some
272 VM power-off hook is present and can tear down the IOMMU page tables. */
273
274/**
275 * Tells the global PCI pass-through manager that we are about to set up the
276 * guest page to host page mappings for the specfied VM.
277 *
278 * @returns VBox status code.
279 *
280 * @param pGVM The ring-0 VM structure.
281 */
282VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
283{
284 NOREF(pGVM);
285 return VINF_SUCCESS;
286}
287
288
289/**
290 * Assigns a host page mapping for a guest page.
291 *
292 * This is only used when setting up the mappings, i.e. between
293 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
294 *
295 * @returns VBox status code.
296 * @param pGVM The ring-0 VM structure.
297 * @param GCPhys The address of the guest page (page aligned).
298 * @param HCPhys The address of the host page (page aligned).
299 */
300VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
301{
302 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
303 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
304
305 if (pGVM->rawpci.s.pfnContigMemInfo)
306 /** @todo what do we do on failure? */
307 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
308
309 return VINF_SUCCESS;
310}
311
312
313/**
314 * Indicates that the specified guest page doesn't exists but doesn't have host
315 * page mapping we trust PCI pass-through with.
316 *
317 * This is only used when setting up the mappings, i.e. between
318 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
319 *
320 * @returns VBox status code.
321 * @param pGVM The ring-0 VM structure.
322 * @param GCPhys The address of the guest page (page aligned).
323 * @param HCPhys The address of the host page (page aligned).
324 */
325VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
326{
327 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
328
329 if (pGVM->rawpci.s.pfnContigMemInfo)
330 /** @todo what do we do on failure? */
331 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
332
333 return VINF_SUCCESS;
334}
335
336
337/**
338 * Tells the global PCI pass-through manager that we have completed setting up
339 * the guest page to host page mappings for the specfied VM.
340 *
341 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
342 * if some page assignment failed.
343 *
344 * @returns VBox status code.
345 *
346 * @param pGVM The ring-0 VM structure.
347 */
348VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
349{
350 NOREF(pGVM);
351 return VINF_SUCCESS;
352}
353
354
355/**
356 * Tells the global PCI pass-through manager that a guest page mapping has
357 * changed after the initial setup.
358 *
359 * @returns VBox status code.
360 * @param pGVM The ring-0 VM structure.
361 * @param GCPhys The address of the guest page (page aligned).
362 * @param HCPhys The new host page address or NIL_RTHCPHYS if
363 * now unassigned.
364 */
365VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
366{
367 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
368 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
369 NOREF(pGVM);
370 return VINF_SUCCESS;
371}
372
373#endif /* VBOX_WITH_PCI_PASSTHROUGH */
374
375
376/**
377 * Sets up the IOMMU when raw PCI device is enabled.
378 *
379 * @note This is a hack that will probably be remodelled and refined later!
380 *
381 * @returns VBox status code.
382 *
383 * @param pGVM The global (ring-0) VM structure.
384 * @param pVM The cross context VM structure.
385 */
386VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM, PVM pVM)
387{
388 int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
389 if (RT_FAILURE(rc))
390 return rc;
391
392#ifdef VBOX_WITH_PCI_PASSTHROUGH
393 if (pVM->pgm.s.fPciPassthrough)
394 {
395 /*
396 * The Simplistic Approach - Enumerate all the pages and call tell the
397 * IOMMU about each of them.
398 */
399 pgmLock(pVM);
400 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
401 if (RT_SUCCESS(rc))
402 {
403 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
404 {
405 PPGMPAGE pPage = &pRam->aPages[0];
406 RTGCPHYS GCPhys = pRam->GCPhys;
407 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
408 while (cLeft-- > 0)
409 {
410 /* Only expose pages that are 100% safe for now. */
411 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
412 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
413 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
414 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
415 else
416 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
417
418 /* next */
419 pPage++;
420 GCPhys += PAGE_SIZE;
421 }
422 }
423
424 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
425 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
426 rc = rc2;
427 }
428 pgmUnlock(pVM);
429 }
430 else
431#endif
432 rc = VERR_NOT_SUPPORTED;
433 return rc;
434}
435
436
437/**
438 * \#PF Handler for nested paging.
439 *
440 * @returns VBox status code (appropriate for trap handling and GC return).
441 * @param pVM The cross context VM structure.
442 * @param pVCpu The cross context virtual CPU structure.
443 * @param enmShwPagingMode Paging mode for the nested page tables.
444 * @param uErr The trap error code.
445 * @param pRegFrame Trap register frame.
446 * @param GCPhysFault The fault address.
447 */
448VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
449 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
450{
451 int rc;
452
453 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
454 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
455 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
456
457 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
458 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
459 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
460 ("enmShwPagingMode=%d\n", enmShwPagingMode));
461
462 /* Reserved shouldn't end up here. */
463 Assert(!(uErr & X86_TRAP_PF_RSVD));
464
465#ifdef VBOX_WITH_STATISTICS
466 /*
467 * Error code stats.
468 */
469 if (uErr & X86_TRAP_PF_US)
470 {
471 if (!(uErr & X86_TRAP_PF_P))
472 {
473 if (uErr & X86_TRAP_PF_RW)
474 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
475 else
476 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
477 }
478 else if (uErr & X86_TRAP_PF_RW)
479 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
480 else if (uErr & X86_TRAP_PF_RSVD)
481 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
482 else if (uErr & X86_TRAP_PF_ID)
483 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
484 else
485 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
486 }
487 else
488 { /* Supervisor */
489 if (!(uErr & X86_TRAP_PF_P))
490 {
491 if (uErr & X86_TRAP_PF_RW)
492 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
493 else
494 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
495 }
496 else if (uErr & X86_TRAP_PF_RW)
497 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
498 else if (uErr & X86_TRAP_PF_ID)
499 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
500 else if (uErr & X86_TRAP_PF_RSVD)
501 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
502 }
503#endif
504
505 /*
506 * Call the worker.
507 *
508 * Note! We pretend the guest is in protected mode without paging, so we
509 * can use existing code to build the nested page tables.
510 */
511 bool fLockTaken = false;
512 switch (enmShwPagingMode)
513 {
514 case PGMMODE_32_BIT:
515 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
516 break;
517 case PGMMODE_PAE:
518 case PGMMODE_PAE_NX:
519 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
520 break;
521 case PGMMODE_AMD64:
522 case PGMMODE_AMD64_NX:
523 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
524 break;
525 case PGMMODE_EPT:
526 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
527 break;
528 default:
529 AssertFailed();
530 rc = VERR_INVALID_PARAMETER;
531 break;
532 }
533 if (fLockTaken)
534 {
535 PGM_LOCK_ASSERT_OWNER(pVM);
536 pgmUnlock(pVM);
537 }
538
539 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
540 rc = VINF_SUCCESS;
541 /*
542 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
543 * via its page tables, see @bugref{6043}.
544 */
545 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
546 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
547 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
548 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
549 {
550 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
551 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
552 single VCPU VMs though. */
553 rc = VINF_SUCCESS;
554 }
555
556 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
557 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
558 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
559 return rc;
560}
561
562
563/**
564 * \#PF Handler for deliberate nested paging misconfiguration (/reserved bit)
565 * employed for MMIO pages.
566 *
567 * @returns VBox status code (appropriate for trap handling and GC return).
568 * @param pVM The cross context VM structure.
569 * @param pVCpu The cross context virtual CPU structure.
570 * @param enmShwPagingMode Paging mode for the nested page tables.
571 * @param pRegFrame Trap register frame.
572 * @param GCPhysFault The fault address.
573 * @param uErr The error code, UINT32_MAX if not available
574 * (VT-x).
575 */
576VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
577 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
578{
579#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
580 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
581 VBOXSTRICTRC rc;
582
583 /*
584 * Try lookup the all access physical handler for the address.
585 */
586 pgmLock(pVM);
587 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
588 PPGMPHYSHANDLERTYPEINT pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pVM, pHandler) : NULL;
589 if (RT_LIKELY(pHandler && pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE))
590 {
591 /*
592 * If the handle has aliases page or pages that have been temporarily
593 * disabled, we'll have to take a detour to make sure we resync them
594 * to avoid lots of unnecessary exits.
595 */
596 PPGMPAGE pPage;
597 if ( ( pHandler->cAliasedPages
598 || pHandler->cTmpOffPages)
599 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
600 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
601 )
602 {
603 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
604 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
605 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
606 pgmUnlock(pVM);
607 }
608 else
609 {
610 if (pHandlerType->CTX_SUFF(pfnPfHandler))
611 {
612 void *pvUser = pHandler->CTX_SUFF(pvUser);
613 STAM_PROFILE_START(&pHandler->Stat, h);
614 pgmUnlock(pVM);
615
616 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->CTX_SUFF(pfnPfHandler), uErr, GCPhysFault, pvUser));
617 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
618 GCPhysFault, GCPhysFault, pvUser);
619
620#ifdef VBOX_WITH_STATISTICS
621 pgmLock(pVM);
622 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
623 if (pHandler)
624 STAM_PROFILE_STOP(&pHandler->Stat, h);
625 pgmUnlock(pVM);
626#endif
627 }
628 else
629 {
630 pgmUnlock(pVM);
631 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
632 rc = VINF_EM_RAW_EMULATE_INSTR;
633 }
634 }
635 }
636 else
637 {
638 /*
639 * Must be out of sync, so do a SyncPage and restart the instruction.
640 *
641 * ASSUMES that ALL handlers are page aligned and covers whole pages
642 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
643 */
644 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
645 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
646 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
647 pgmUnlock(pVM);
648 }
649
650 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
651 return rc;
652
653#else
654 AssertLogRelFailed();
655 return VERR_PGM_NOT_USED_IN_MODE;
656#endif
657}
658
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette