VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 75649

Last change on this file since 75649 was 73253, checked in by vboxsync, 6 years ago

PGM,HM: Added todos about cleaning up the nested packing hacks. (bugref:9044)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.7 KB
Line 
1/* $Id: PGMR0.cpp 73253 2018-07-19 20:01:45Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/rawpci.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/gmm.h>
26#include <VBox/vmm/gvm.h>
27#include "PGMInternal.h"
28#include <VBox/vmm/vm.h>
29#include "PGMInline.h"
30#include <VBox/log.h>
31#include <VBox/err.h>
32#include <iprt/assert.h>
33#include <iprt/mem.h>
34
35
36/*
37 * Instantiate the ring-0 header/code templates.
38 */
39/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
40#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
41#include "PGMR0Bth.h"
42#undef PGM_BTH_NAME
43
44#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
45#include "PGMR0Bth.h"
46#undef PGM_BTH_NAME
47
48#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
49#include "PGMR0Bth.h"
50#undef PGM_BTH_NAME
51
52#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
53#include "PGMR0Bth.h"
54#undef PGM_BTH_NAME
55
56
57/**
58 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
59 *
60 * @returns The following VBox status codes.
61 * @retval VINF_SUCCESS on success. FF cleared.
62 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
63 *
64 * @param pGVM The global (ring-0) VM structure.
65 * @param pVM The cross context VM structure.
66 * @param idCpu The ID of the calling EMT.
67 *
68 * @thread EMT(idCpu)
69 *
70 * @remarks Must be called from within the PGM critical section. The caller
71 * must clear the new pages.
72 */
73VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
74{
75 /*
76 * Validate inputs.
77 */
78 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
79 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
80 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
81
82 /*
83 * Check for error injection.
84 */
85 if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
86 return VERR_NO_MEMORY;
87
88 /*
89 * Try allocate a full set of handy pages.
90 */
91 uint32_t iFirst = pVM->pgm.s.cHandyPages;
92 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
93 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
94 if (!cPages)
95 return VINF_SUCCESS;
96 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
97 if (RT_SUCCESS(rc))
98 {
99#ifdef VBOX_STRICT
100 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
101 {
102 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
103 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
104 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
105 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
106 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
107 }
108#endif
109
110 pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
111 }
112 else if (rc != VERR_GMM_SEED_ME)
113 {
114 if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
115 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
116 && iFirst < PGM_HANDY_PAGES_MIN)
117 {
118
119#ifdef VBOX_STRICT
120 /* We're ASSUMING that GMM has updated all the entires before failing us. */
121 uint32_t i;
122 for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
123 {
124 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
125 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
126 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
127 }
128#endif
129
130 /*
131 * Reduce the number of pages until we hit the minimum limit.
132 */
133 do
134 {
135 cPages >>= 1;
136 if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
137 cPages = PGM_HANDY_PAGES_MIN - iFirst;
138 rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
139 } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
140 || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
141 && cPages + iFirst > PGM_HANDY_PAGES_MIN);
142 if (RT_SUCCESS(rc))
143 {
144#ifdef VBOX_STRICT
145 i = iFirst + cPages;
146 while (i-- > 0)
147 {
148 Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
149 Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
150 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
151 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
152 Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
153 }
154
155 for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
156 {
157 Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
158 Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
159 Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
160 }
161#endif
162
163 pVM->pgm.s.cHandyPages = iFirst + cPages;
164 }
165 }
166
167 if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
168 {
169 LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
170 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
171 }
172 }
173
174
175 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
176 return rc;
177}
178
179
180/**
181 * Flushes any changes pending in the handy page array.
182 *
183 * It is very important that this gets done when page sharing is enabled.
184 *
185 * @returns The following VBox status codes.
186 * @retval VINF_SUCCESS on success. FF cleared.
187 *
188 * @param pGVM The global (ring-0) VM structure.
189 * @param pVM The cross context VM structure.
190 * @param idCpu The ID of the calling EMT.
191 *
192 * @thread EMT(idCpu)
193 *
194 * @remarks Must be called from within the PGM critical section.
195 */
196VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, PVM pVM, VMCPUID idCpu)
197{
198 /*
199 * Validate inputs.
200 */
201 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
202 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
203 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
204
205 /*
206 * Try allocate a full set of handy pages.
207 */
208 uint32_t iFirst = pVM->pgm.s.cHandyPages;
209 AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
210 uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
211 if (!cPages)
212 return VINF_SUCCESS;
213 int rc = GMMR0AllocateHandyPages(pGVM, pVM, idCpu, cPages, 0, &pVM->pgm.s.aHandyPages[iFirst]);
214
215 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
216 return rc;
217}
218
219
220/**
221 * Worker function for PGMR3PhysAllocateLargeHandyPage
222 *
223 * @returns The following VBox status codes.
224 * @retval VINF_SUCCESS on success.
225 * @retval VINF_EM_NO_MEMORY if we're out of memory.
226 *
227 * @param pGVM The global (ring-0) VM structure.
228 * @param pVM The cross context VM structure.
229 * @param idCpu The ID of the calling EMT.
230 *
231 * @thread EMT(idCpu)
232 *
233 * @remarks Must be called from within the PGM critical section. The caller
234 * must clear the new pages.
235 */
236VMMR0_INT_DECL(int) PGMR0PhysAllocateLargeHandyPage(PGVM pGVM, PVM pVM, VMCPUID idCpu)
237{
238 /*
239 * Validate inputs.
240 */
241 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
242 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
243 PGM_LOCK_ASSERT_OWNER_EX(pVM, &pVM->aCpus[idCpu]);
244 Assert(!pVM->pgm.s.cLargeHandyPages);
245
246 /*
247 * Do the job.
248 */
249 int rc = GMMR0AllocateLargePage(pGVM, pVM, idCpu, _2M,
250 &pVM->pgm.s.aLargeHandyPage[0].idPage,
251 &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
252 if (RT_SUCCESS(rc))
253 pVM->pgm.s.cLargeHandyPages = 1;
254
255 return rc;
256}
257
258
259#ifdef VBOX_WITH_PCI_PASSTHROUGH
260/* Interface sketch. The interface belongs to a global PCI pass-through
261 manager. It shall use the global VM handle, not the user VM handle to
262 store the per-VM info (domain) since that is all ring-0 stuff, thus
263 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
264 we can discuss the PciRaw code re-organtization when I'm back from
265 vacation.
266
267 I've implemented the initial IOMMU set up below. For things to work
268 reliably, we will probably need add a whole bunch of checks and
269 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
270 assuming nested paging (enforced) and prealloc (enforced), no
271 ballooning (check missing), page sharing (check missing) or live
272 migration (check missing), it might work fine. At least if some
273 VM power-off hook is present and can tear down the IOMMU page tables. */
274
275/**
276 * Tells the global PCI pass-through manager that we are about to set up the
277 * guest page to host page mappings for the specfied VM.
278 *
279 * @returns VBox status code.
280 *
281 * @param pGVM The ring-0 VM structure.
282 */
283VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
284{
285 NOREF(pGVM);
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Assigns a host page mapping for a guest page.
292 *
293 * This is only used when setting up the mappings, i.e. between
294 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
295 *
296 * @returns VBox status code.
297 * @param pGVM The ring-0 VM structure.
298 * @param GCPhys The address of the guest page (page aligned).
299 * @param HCPhys The address of the host page (page aligned).
300 */
301VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
302{
303 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
304 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
305
306 if (pGVM->rawpci.s.pfnContigMemInfo)
307 /** @todo what do we do on failure? */
308 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
309
310 return VINF_SUCCESS;
311}
312
313
314/**
315 * Indicates that the specified guest page doesn't exists but doesn't have host
316 * page mapping we trust PCI pass-through with.
317 *
318 * This is only used when setting up the mappings, i.e. between
319 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
320 *
321 * @returns VBox status code.
322 * @param pGVM The ring-0 VM structure.
323 * @param GCPhys The address of the guest page (page aligned).
324 * @param HCPhys The address of the host page (page aligned).
325 */
326VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
327{
328 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
329
330 if (pGVM->rawpci.s.pfnContigMemInfo)
331 /** @todo what do we do on failure? */
332 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
333
334 return VINF_SUCCESS;
335}
336
337
338/**
339 * Tells the global PCI pass-through manager that we have completed setting up
340 * the guest page to host page mappings for the specfied VM.
341 *
342 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
343 * if some page assignment failed.
344 *
345 * @returns VBox status code.
346 *
347 * @param pGVM The ring-0 VM structure.
348 */
349VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
350{
351 NOREF(pGVM);
352 return VINF_SUCCESS;
353}
354
355
356/**
357 * Tells the global PCI pass-through manager that a guest page mapping has
358 * changed after the initial setup.
359 *
360 * @returns VBox status code.
361 * @param pGVM The ring-0 VM structure.
362 * @param GCPhys The address of the guest page (page aligned).
363 * @param HCPhys The new host page address or NIL_RTHCPHYS if
364 * now unassigned.
365 */
366VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
367{
368 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
369 AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
370 NOREF(pGVM);
371 return VINF_SUCCESS;
372}
373
374#endif /* VBOX_WITH_PCI_PASSTHROUGH */
375
376
377/**
378 * Sets up the IOMMU when raw PCI device is enabled.
379 *
380 * @note This is a hack that will probably be remodelled and refined later!
381 *
382 * @returns VBox status code.
383 *
384 * @param pGVM The global (ring-0) VM structure.
385 * @param pVM The cross context VM structure.
386 */
387VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM, PVM pVM)
388{
389 int rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef VBOX_WITH_PCI_PASSTHROUGH
394 if (pVM->pgm.s.fPciPassthrough)
395 {
396 /*
397 * The Simplistic Approach - Enumerate all the pages and call tell the
398 * IOMMU about each of them.
399 */
400 pgmLock(pVM);
401 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
402 if (RT_SUCCESS(rc))
403 {
404 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
405 {
406 PPGMPAGE pPage = &pRam->aPages[0];
407 RTGCPHYS GCPhys = pRam->GCPhys;
408 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
409 while (cLeft-- > 0)
410 {
411 /* Only expose pages that are 100% safe for now. */
412 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
413 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
414 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
415 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
416 else
417 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
418
419 /* next */
420 pPage++;
421 GCPhys += PAGE_SIZE;
422 }
423 }
424
425 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
426 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
427 rc = rc2;
428 }
429 pgmUnlock(pVM);
430 }
431 else
432#endif
433 rc = VERR_NOT_SUPPORTED;
434 return rc;
435}
436
437
438/**
439 * \#PF Handler for nested paging.
440 *
441 * @returns VBox status code (appropriate for trap handling and GC return).
442 * @param pVM The cross context VM structure.
443 * @param pVCpu The cross context virtual CPU structure.
444 * @param enmShwPagingMode Paging mode for the nested page tables.
445 * @param uErr The trap error code.
446 * @param pRegFrame Trap register frame.
447 * @param GCPhysFault The fault address.
448 */
449VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
450 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
451{
452 int rc;
453
454 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
455 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
456 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
457
458 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
459 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
460 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
461 ("enmShwPagingMode=%d\n", enmShwPagingMode));
462
463 /* Reserved shouldn't end up here. */
464 Assert(!(uErr & X86_TRAP_PF_RSVD));
465
466#ifdef VBOX_WITH_STATISTICS
467 /*
468 * Error code stats.
469 */
470 if (uErr & X86_TRAP_PF_US)
471 {
472 if (!(uErr & X86_TRAP_PF_P))
473 {
474 if (uErr & X86_TRAP_PF_RW)
475 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
476 else
477 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
478 }
479 else if (uErr & X86_TRAP_PF_RW)
480 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
481 else if (uErr & X86_TRAP_PF_RSVD)
482 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
483 else if (uErr & X86_TRAP_PF_ID)
484 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
485 else
486 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
487 }
488 else
489 { /* Supervisor */
490 if (!(uErr & X86_TRAP_PF_P))
491 {
492 if (uErr & X86_TRAP_PF_RW)
493 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
494 else
495 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
496 }
497 else if (uErr & X86_TRAP_PF_RW)
498 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
499 else if (uErr & X86_TRAP_PF_ID)
500 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
501 else if (uErr & X86_TRAP_PF_RSVD)
502 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
503 }
504#endif
505
506 /*
507 * Call the worker.
508 *
509 * Note! We pretend the guest is in protected mode without paging, so we
510 * can use existing code to build the nested page tables.
511 */
512/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
513 bool fLockTaken = false;
514 switch (enmShwPagingMode)
515 {
516 case PGMMODE_32_BIT:
517 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
518 break;
519 case PGMMODE_PAE:
520 case PGMMODE_PAE_NX:
521 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
522 break;
523 case PGMMODE_AMD64:
524 case PGMMODE_AMD64_NX:
525 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
526 break;
527 case PGMMODE_EPT:
528 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
529 break;
530 default:
531 AssertFailed();
532 rc = VERR_INVALID_PARAMETER;
533 break;
534 }
535 if (fLockTaken)
536 {
537 PGM_LOCK_ASSERT_OWNER(pVM);
538 pgmUnlock(pVM);
539 }
540
541 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
542 rc = VINF_SUCCESS;
543 /*
544 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
545 * via its page tables, see @bugref{6043}.
546 */
547 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
548 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
549 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
550 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
551 {
552 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
553 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
554 single VCPU VMs though. */
555 rc = VINF_SUCCESS;
556 }
557
558 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
559 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
560 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
561 return rc;
562}
563
564
565/**
566 * \#PF Handler for deliberate nested paging misconfiguration (/reserved bit)
567 * employed for MMIO pages.
568 *
569 * @returns VBox status code (appropriate for trap handling and GC return).
570 * @param pVM The cross context VM structure.
571 * @param pVCpu The cross context virtual CPU structure.
572 * @param enmShwPagingMode Paging mode for the nested page tables.
573 * @param pRegFrame Trap register frame.
574 * @param GCPhysFault The fault address.
575 * @param uErr The error code, UINT32_MAX if not available
576 * (VT-x).
577 */
578VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
579 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
580{
581#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
582 STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
583 VBOXSTRICTRC rc;
584
585 /*
586 * Try lookup the all access physical handler for the address.
587 */
588 pgmLock(pVM);
589 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
590 PPGMPHYSHANDLERTYPEINT pHandlerType = RT_LIKELY(pHandler) ? PGMPHYSHANDLER_GET_TYPE(pVM, pHandler) : NULL;
591 if (RT_LIKELY(pHandler && pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE))
592 {
593 /*
594 * If the handle has aliases page or pages that have been temporarily
595 * disabled, we'll have to take a detour to make sure we resync them
596 * to avoid lots of unnecessary exits.
597 */
598 PPGMPAGE pPage;
599 if ( ( pHandler->cAliasedPages
600 || pHandler->cTmpOffPages)
601 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
602 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
603 )
604 {
605 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
606 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
607 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
608 pgmUnlock(pVM);
609 }
610 else
611 {
612 if (pHandlerType->CTX_SUFF(pfnPfHandler))
613 {
614 void *pvUser = pHandler->CTX_SUFF(pvUser);
615 STAM_PROFILE_START(&pHandler->Stat, h);
616 pgmUnlock(pVM);
617
618 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->CTX_SUFF(pfnPfHandler), uErr, GCPhysFault, pvUser));
619 rc = pHandlerType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame,
620 GCPhysFault, GCPhysFault, pvUser);
621
622#ifdef VBOX_WITH_STATISTICS
623 pgmLock(pVM);
624 pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
625 if (pHandler)
626 STAM_PROFILE_STOP(&pHandler->Stat, h);
627 pgmUnlock(pVM);
628#endif
629 }
630 else
631 {
632 pgmUnlock(pVM);
633 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
634 rc = VINF_EM_RAW_EMULATE_INSTR;
635 }
636 }
637 }
638 else
639 {
640 /*
641 * Must be out of sync, so do a SyncPage and restart the instruction.
642 *
643 * ASSUMES that ALL handlers are page aligned and covers whole pages
644 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
645 */
646 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
647 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
648 rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
649 pgmUnlock(pVM);
650 }
651
652 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
653 return rc;
654
655#else
656 AssertLogRelFailed();
657 return VERR_PGM_NOT_USED_IN_MODE;
658#endif
659}
660
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette