VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/PGMR0.cpp@ 103099

Last change on this file since 103099 was 100966, checked in by vboxsync, 16 months ago

VMM/PGM,IEM: Prepare work for write monitoring page containing recompiled code. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 54.2 KB
Line 
1/* $Id: PGMR0.cpp 100966 2023-08-24 23:23:58Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Ring-0.
4 */
5
6/*
7 * Copyright (C) 2007-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/rawpci.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/gmm.h>
38#include "PGMInternal.h"
39#include <VBox/vmm/pdmdev.h>
40#include <VBox/vmm/vmcc.h>
41#include <VBox/vmm/gvm.h>
42#include "PGMInline.h"
43#include <VBox/log.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <iprt/mem.h>
47#include <iprt/memobj.h>
48#include <iprt/process.h>
49#include <iprt/rand.h>
50#include <iprt/string.h>
51#include <iprt/time.h>
52
53
54/*
55 * Instantiate the ring-0 header/code templates.
56 */
57/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
58#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
59#include "PGMR0Bth.h"
60#undef PGM_BTH_NAME
61
62#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
63#include "PGMR0Bth.h"
64#undef PGM_BTH_NAME
65
66#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
67#include "PGMR0Bth.h"
68#undef PGM_BTH_NAME
69
70#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
71#include "PGMR0Bth.h"
72#undef PGM_BTH_NAME
73
74
75/**
76 * Initializes the per-VM data for the PGM.
77 *
78 * This is called from under the GVMM lock, so it should only initialize the
79 * data so PGMR0CleanupVM and others will work smoothly.
80 *
81 * @returns VBox status code.
82 * @param pGVM Pointer to the global VM structure.
83 * @param hMemObj Handle to the memory object backing pGVM.
84 */
85VMMR0_INT_DECL(int) PGMR0InitPerVMData(PGVM pGVM, RTR0MEMOBJ hMemObj)
86{
87 AssertCompile(sizeof(pGVM->pgm.s) <= sizeof(pGVM->pgm.padding));
88 AssertCompile(sizeof(pGVM->pgmr0.s) <= sizeof(pGVM->pgmr0.padding));
89
90 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs) == RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMapObjs));
91 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
92 {
93 pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
94 pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
95 }
96 pGVM->pgmr0.s.hPhysHandlerMemObj = NIL_RTR0MEMOBJ;
97 pGVM->pgmr0.s.hPhysHandlerMapObj = NIL_RTR0MEMOBJ;
98
99 /*
100 * Initialize the handler type table with return to ring-3 callbacks so we
101 * don't have to do anything special for ring-3 only registrations.
102 *
103 * Note! The random bits of the hType value is mainly for prevent trouble
104 * with zero initialized handles w/o needing to sacrifice handle zero.
105 */
106 for (size_t i = 0; i < RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes); i++)
107 {
108 pGVM->pgmr0.s.aPhysHandlerTypes[i].hType = i | (RTRandU64() & ~(uint64_t)PGMPHYSHANDLERTYPE_IDX_MASK);
109 pGVM->pgmr0.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
110 pGVM->pgmr0.s.aPhysHandlerTypes[i].pfnHandler = pgmR0HandlerPhysicalHandlerToRing3;
111 pGVM->pgmr0.s.aPhysHandlerTypes[i].pfnPfHandler = pgmR0HandlerPhysicalPfHandlerToRing3;
112
113 pGVM->pgm.s.aPhysHandlerTypes[i].hType = pGVM->pgmr0.s.aPhysHandlerTypes[i].hType;
114 pGVM->pgm.s.aPhysHandlerTypes[i].enmKind = PGMPHYSHANDLERKIND_INVALID;
115 }
116
117 /*
118 * Get the physical address of the ZERO and MMIO-dummy pages.
119 */
120 AssertReturn(((uintptr_t)&pGVM->pgm.s.abZeroPg[0] & HOST_PAGE_OFFSET_MASK) == 0, VERR_INTERNAL_ERROR_2);
121 pGVM->pgm.s.HCPhysZeroPg = RTR0MemObjGetPagePhysAddr(hMemObj, RT_UOFFSETOF_DYN(GVM, pgm.s.abZeroPg) >> HOST_PAGE_SHIFT);
122 AssertReturn(pGVM->pgm.s.HCPhysZeroPg != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
123
124 AssertReturn(((uintptr_t)&pGVM->pgm.s.abMmioPg[0] & HOST_PAGE_OFFSET_MASK) == 0, VERR_INTERNAL_ERROR_2);
125 pGVM->pgm.s.HCPhysMmioPg = RTR0MemObjGetPagePhysAddr(hMemObj, RT_UOFFSETOF_DYN(GVM, pgm.s.abMmioPg) >> HOST_PAGE_SHIFT);
126 AssertReturn(pGVM->pgm.s.HCPhysMmioPg != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
127
128 pGVM->pgm.s.HCPhysInvMmioPg = pGVM->pgm.s.HCPhysMmioPg;
129
130 return RTCritSectInit(&pGVM->pgmr0.s.PoolGrowCritSect);
131}
132
133
134/**
135 * Initalize the per-VM PGM for ring-0.
136 *
137 * @returns VBox status code.
138 * @param pGVM Pointer to the global VM structure.
139 */
140VMMR0_INT_DECL(int) PGMR0InitVM(PGVM pGVM)
141{
142 /*
143 * Set up the ring-0 context for our access handlers.
144 */
145 int rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_WRITE, 0 /*fFlags*/,
146 pgmPhysRomWriteHandler, pgmPhysRomWritePfHandler,
147 "ROM write protection", pGVM->pgm.s.hRomPhysHandlerType);
148 AssertLogRelRCReturn(rc, rc);
149
150 /*
151 * Register the physical access handler doing dirty MMIO2 tracing.
152 */
153 rc = PGMR0HandlerPhysicalTypeSetUpContext(pGVM, PGMPHYSHANDLERKIND_WRITE, PGMPHYSHANDLER_F_KEEP_PGM_LOCK,
154 pgmPhysMmio2WriteHandler, pgmPhysMmio2WritePfHandler,
155 "MMIO2 dirty page tracing", pGVM->pgm.s.hMmio2DirtyPhysHandlerType);
156 AssertLogRelRCReturn(rc, rc);
157
158 /*
159 * The page pool.
160 */
161 return pgmR0PoolInitVM(pGVM);
162}
163
164
165/**
166 * Called at the end of the ring-0 initialization to seal access handler types.
167 *
168 * @param pGVM Pointer to the global VM structure.
169 */
170VMMR0_INT_DECL(void) PGMR0DoneInitVM(PGVM pGVM)
171{
172 /*
173 * Seal all the access handler types. Does both ring-3 and ring-0.
174 *
175 * Note! Since this is a void function and we don't have any ring-0 state
176 * machinery for marking the VM as bogus, this code will just
177 * override corrupted values as best as it can.
178 */
179 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes));
180 for (size_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes); i++)
181 {
182 PPGMPHYSHANDLERTYPEINTR0 const pTypeR0 = &pGVM->pgmr0.s.aPhysHandlerTypes[i];
183 PPGMPHYSHANDLERTYPEINTR3 const pTypeR3 = &pGVM->pgm.s.aPhysHandlerTypes[i];
184 PGMPHYSHANDLERKIND const enmKindR3 = pTypeR3->enmKind;
185 PGMPHYSHANDLERKIND const enmKindR0 = pTypeR0->enmKind;
186 AssertLogRelMsgStmt(pTypeR0->hType == pTypeR3->hType,
187 ("i=%u %#RX64 vs %#RX64 %s\n", i, pTypeR0->hType, pTypeR3->hType, pTypeR0->pszDesc),
188 pTypeR3->hType = pTypeR0->hType);
189 switch (enmKindR3)
190 {
191 case PGMPHYSHANDLERKIND_ALL:
192 case PGMPHYSHANDLERKIND_MMIO:
193 if ( enmKindR0 == enmKindR3
194 || enmKindR0 == PGMPHYSHANDLERKIND_INVALID)
195 {
196 pTypeR3->fRing0Enabled = enmKindR0 == enmKindR3;
197 pTypeR0->uState = PGM_PAGE_HNDL_PHYS_STATE_ALL;
198 pTypeR3->uState = PGM_PAGE_HNDL_PHYS_STATE_ALL;
199 continue;
200 }
201 break;
202
203 case PGMPHYSHANDLERKIND_WRITE:
204 if ( enmKindR0 == enmKindR3
205 || enmKindR0 == PGMPHYSHANDLERKIND_INVALID)
206 {
207 pTypeR3->fRing0Enabled = enmKindR0 == enmKindR3;
208 pTypeR0->uState = PGM_PAGE_HNDL_PHYS_STATE_WRITE;
209 pTypeR3->uState = PGM_PAGE_HNDL_PHYS_STATE_WRITE;
210 continue;
211 }
212 break;
213
214 default:
215 AssertLogRelMsgFailed(("i=%u enmKindR3=%d\n", i, enmKindR3));
216 RT_FALL_THROUGH();
217 case PGMPHYSHANDLERKIND_INVALID:
218 AssertLogRelMsg(enmKindR0 == PGMPHYSHANDLERKIND_INVALID,
219 ("i=%u enmKind=%d %s\n", i, enmKindR0, pTypeR0->pszDesc));
220 AssertLogRelMsg(pTypeR0->pfnHandler == pgmR0HandlerPhysicalHandlerToRing3,
221 ("i=%u pfnHandler=%p %s\n", i, pTypeR0->pfnHandler, pTypeR0->pszDesc));
222 AssertLogRelMsg(pTypeR0->pfnPfHandler == pgmR0HandlerPhysicalPfHandlerToRing3,
223 ("i=%u pfnPfHandler=%p %s\n", i, pTypeR0->pfnPfHandler, pTypeR0->pszDesc));
224
225 /* Unused of bad ring-3 entry, make it and the ring-0 one harmless. */
226 pTypeR3->enmKind = PGMPHYSHANDLERKIND_END;
227 pTypeR3->fRing0DevInsIdx = false;
228 pTypeR3->fKeepPgmLock = false;
229 pTypeR3->uState = 0;
230 break;
231 }
232 pTypeR3->fRing0Enabled = false;
233
234 /* Make sure the entry is harmless and goes to ring-3. */
235 pTypeR0->enmKind = PGMPHYSHANDLERKIND_END;
236 pTypeR0->pfnHandler = pgmR0HandlerPhysicalHandlerToRing3;
237 pTypeR0->pfnPfHandler = pgmR0HandlerPhysicalPfHandlerToRing3;
238 pTypeR0->fRing0DevInsIdx = false;
239 pTypeR0->fKeepPgmLock = false;
240 pTypeR0->uState = 0;
241 pTypeR0->pszDesc = "invalid";
242 }
243}
244
245
246/**
247 * Cleans up any loose ends before the GVM structure is destroyed.
248 */
249VMMR0_INT_DECL(void) PGMR0CleanupVM(PGVM pGVM)
250{
251 for (uint32_t i = 0; i < RT_ELEMENTS(pGVM->pgmr0.s.ahPoolMemObjs); i++)
252 {
253 if (pGVM->pgmr0.s.ahPoolMapObjs[i] != NIL_RTR0MEMOBJ)
254 {
255 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMapObjs[i], true /*fFreeMappings*/);
256 AssertRC(rc);
257 pGVM->pgmr0.s.ahPoolMapObjs[i] = NIL_RTR0MEMOBJ;
258 }
259
260 if (pGVM->pgmr0.s.ahPoolMemObjs[i] != NIL_RTR0MEMOBJ)
261 {
262 int rc = RTR0MemObjFree(pGVM->pgmr0.s.ahPoolMemObjs[i], true /*fFreeMappings*/);
263 AssertRC(rc);
264 pGVM->pgmr0.s.ahPoolMemObjs[i] = NIL_RTR0MEMOBJ;
265 }
266 }
267
268 if (pGVM->pgmr0.s.hPhysHandlerMapObj != NIL_RTR0MEMOBJ)
269 {
270 int rc = RTR0MemObjFree(pGVM->pgmr0.s.hPhysHandlerMapObj, true /*fFreeMappings*/);
271 AssertRC(rc);
272 pGVM->pgmr0.s.hPhysHandlerMapObj = NIL_RTR0MEMOBJ;
273 }
274
275 if (pGVM->pgmr0.s.hPhysHandlerMemObj != NIL_RTR0MEMOBJ)
276 {
277 int rc = RTR0MemObjFree(pGVM->pgmr0.s.hPhysHandlerMemObj, true /*fFreeMappings*/);
278 AssertRC(rc);
279 pGVM->pgmr0.s.hPhysHandlerMemObj = NIL_RTR0MEMOBJ;
280 }
281
282 if (RTCritSectIsInitialized(&pGVM->pgmr0.s.PoolGrowCritSect))
283 RTCritSectDelete(&pGVM->pgmr0.s.PoolGrowCritSect);
284}
285
286
287/**
288 * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
289 *
290 * @returns The following VBox status codes.
291 * @retval VINF_SUCCESS on success. FF cleared.
292 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
293 *
294 * @param pGVM The global (ring-0) VM structure.
295 * @param idCpu The ID of the calling EMT.
296 * @param fRing3 Set if the caller is ring-3. Determins whether to
297 * return VINF_EM_NO_MEMORY or not.
298 *
299 * @thread EMT(idCpu)
300 *
301 * @remarks Must be called from within the PGM critical section. The caller
302 * must clear the new pages.
303 */
304int pgmR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu, bool fRing3)
305{
306 /*
307 * Validate inputs.
308 */
309 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
310 Assert(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf());
311 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
312
313 /*
314 * Check for error injection.
315 */
316 if (RT_LIKELY(!pGVM->pgm.s.fErrInjHandyPages))
317 { /* likely */ }
318 else
319 return VERR_NO_MEMORY;
320
321 /*
322 * Try allocate a full set of handy pages.
323 */
324 uint32_t const iFirst = pGVM->pgm.s.cHandyPages;
325 AssertMsgReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), ("%#x\n", iFirst), VERR_PGM_HANDY_PAGE_IPE);
326
327 uint32_t const cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
328 if (!cPages)
329 return VINF_SUCCESS;
330
331 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, cPages, &pGVM->pgm.s.aHandyPages[iFirst]);
332 if (RT_SUCCESS(rc))
333 {
334 uint32_t const cHandyPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages); /** @todo allow allocating less... */
335 pGVM->pgm.s.cHandyPages = cHandyPages;
336 VM_FF_CLEAR(pGVM, VM_FF_PGM_NEED_HANDY_PAGES);
337 VM_FF_CLEAR(pGVM, VM_FF_PGM_NO_MEMORY);
338
339#ifdef VBOX_STRICT
340 for (uint32_t i = 0; i < cHandyPages; i++)
341 {
342 Assert(pGVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
343 Assert(pGVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
344 Assert(pGVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
345 Assert(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
346 Assert(!(pGVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
347 }
348#endif
349
350 /*
351 * Clear the pages.
352 */
353 for (uint32_t iPage = iFirst; iPage < cHandyPages; iPage++)
354 {
355 PGMMPAGEDESC pPage = &pGVM->pgm.s.aHandyPages[iPage];
356 if (!pPage->fZeroed)
357 {
358 void *pv = NULL;
359#ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
360 rc = SUPR0HCPhysToVirt(pPage->HCPhysGCPhys, &pv);
361#else
362 rc = GMMR0PageIdToVirt(pGVM, pPage->idPage, &pv);
363#endif
364 AssertMsgRCReturn(rc, ("idPage=%#x HCPhys=%RHp rc=%Rrc\n", pPage->idPage, pPage->HCPhysGCPhys, rc), rc);
365
366 RT_BZERO(pv, GUEST_PAGE_SIZE);
367 pPage->fZeroed = true;
368 }
369#ifdef VBOX_STRICT
370 else
371 {
372 void *pv = NULL;
373# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
374 rc = SUPR0HCPhysToVirt(pPage->HCPhysGCPhys, &pv);
375# else
376 rc = GMMR0PageIdToVirt(pGVM, pPage->idPage, &pv);
377# endif
378 AssertMsgRCReturn(rc, ("idPage=%#x HCPhys=%RHp rc=%Rrc\n", pPage->idPage, pPage->HCPhysGCPhys, rc), rc);
379 AssertReturn(ASMMemIsZero(pv, GUEST_PAGE_SIZE), VERR_PGM_HANDY_PAGE_IPE);
380 }
381#endif
382 Log3(("PGMR0PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
383 }
384 }
385 else
386 {
387 /*
388 * We should never get here unless there is a genuine shortage of
389 * memory (or some internal error). Flag the error so the VM can be
390 * suspended ASAP and the user informed. If we're totally out of
391 * handy pages we will return failure.
392 */
393 /* Report the failure. */
394 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc cHandyPages=%#x\n"
395 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
396 rc, pGVM->pgm.s.cHandyPages,
397 pGVM->pgm.s.cAllPages, pGVM->pgm.s.cPrivatePages, pGVM->pgm.s.cSharedPages, pGVM->pgm.s.cZeroPages));
398
399 GMMMEMSTATSREQ Stats = { { SUPVMMR0REQHDR_MAGIC, sizeof(Stats) }, 0, 0, 0, 0, 0 };
400 if (RT_SUCCESS(GMMR0QueryMemoryStatsReq(pGVM, idCpu, &Stats)))
401 LogRel(("GMM: Statistics:\n"
402 " Allocated pages: %RX64\n"
403 " Free pages: %RX64\n"
404 " Shared pages: %RX64\n"
405 " Maximum pages: %RX64\n"
406 " Ballooned pages: %RX64\n",
407 Stats.cAllocPages, Stats.cFreePages, Stats.cSharedPages, Stats.cMaxPages, Stats.cBalloonedPages));
408
409 if ( rc != VERR_NO_MEMORY
410 && rc != VERR_NO_PHYS_MEMORY
411 && rc != VERR_LOCK_FAILED)
412 for (uint32_t iPage = 0; iPage < RT_ELEMENTS(pGVM->pgm.s.aHandyPages); iPage++)
413 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
414 iPage, pGVM->pgm.s.aHandyPages[iPage].HCPhysGCPhys, pGVM->pgm.s.aHandyPages[iPage].idPage,
415 pGVM->pgm.s.aHandyPages[iPage].idSharedPage));
416
417 /* Set the FFs and adjust rc. */
418 VM_FF_SET(pGVM, VM_FF_PGM_NEED_HANDY_PAGES);
419 VM_FF_SET(pGVM, VM_FF_PGM_NO_MEMORY);
420 if (!fRing3)
421 if ( rc == VERR_NO_MEMORY
422 || rc == VERR_NO_PHYS_MEMORY
423 || rc == VERR_LOCK_FAILED
424 || rc == VERR_MAP_FAILED)
425 rc = VINF_EM_NO_MEMORY;
426 }
427
428 LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
429 return rc;
430}
431
432
433/**
434 * Worker function for PGMR3PhysAllocateHandyPages / VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES.
435 *
436 * @returns The following VBox status codes.
437 * @retval VINF_SUCCESS on success. FF cleared.
438 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
439 *
440 * @param pGVM The global (ring-0) VM structure.
441 * @param idCpu The ID of the calling EMT.
442 *
443 * @thread EMT(idCpu)
444 *
445 * @remarks Must be called from within the PGM critical section. The caller
446 * must clear the new pages.
447 */
448VMMR0_INT_DECL(int) PGMR0PhysAllocateHandyPages(PGVM pGVM, VMCPUID idCpu)
449{
450 /*
451 * Validate inputs.
452 */
453 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
454 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
455
456 /*
457 * Enter the PGM lock and call the worker.
458 */
459 int rc = PGM_LOCK(pGVM);
460 if (RT_SUCCESS(rc))
461 {
462 rc = pgmR0PhysAllocateHandyPages(pGVM, idCpu, true /*fRing3*/);
463 PGM_UNLOCK(pGVM);
464 }
465 return rc;
466}
467
468
469/**
470 * Flushes any changes pending in the handy page array.
471 *
472 * It is very important that this gets done when page sharing is enabled.
473 *
474 * @returns The following VBox status codes.
475 * @retval VINF_SUCCESS on success. FF cleared.
476 *
477 * @param pGVM The global (ring-0) VM structure.
478 * @param idCpu The ID of the calling EMT.
479 *
480 * @thread EMT(idCpu)
481 *
482 * @remarks Must be called from within the PGM critical section.
483 */
484VMMR0_INT_DECL(int) PGMR0PhysFlushHandyPages(PGVM pGVM, VMCPUID idCpu)
485{
486 /*
487 * Validate inputs.
488 */
489 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID); /* caller already checked this, but just to be sure. */
490 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
491 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
492
493 /*
494 * Try allocate a full set of handy pages.
495 */
496 uint32_t iFirst = pGVM->pgm.s.cHandyPages;
497 AssertReturn(iFirst <= RT_ELEMENTS(pGVM->pgm.s.aHandyPages), VERR_PGM_HANDY_PAGE_IPE);
498 uint32_t cPages = RT_ELEMENTS(pGVM->pgm.s.aHandyPages) - iFirst;
499 if (!cPages)
500 return VINF_SUCCESS;
501 int rc = GMMR0AllocateHandyPages(pGVM, idCpu, cPages, 0, &pGVM->pgm.s.aHandyPages[iFirst]);
502
503 LogFlow(("PGMR0PhysFlushHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
504 return rc;
505}
506
507
508/**
509 * Allocate a large page at @a GCPhys.
510 *
511 * @returns The following VBox status codes.
512 * @retval VINF_SUCCESS on success.
513 * @retval VINF_EM_NO_MEMORY if we're out of memory.
514 *
515 * @param pGVM The global (ring-0) VM structure.
516 * @param idCpu The ID of the calling EMT.
517 * @param GCPhys The guest physical address of the page.
518 *
519 * @thread EMT(idCpu)
520 *
521 * @remarks Must be called from within the PGM critical section. The caller
522 * must clear the new pages.
523 */
524int pgmR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys)
525{
526 STAM_PROFILE_START(&pGVM->pgm.s.Stats.StatLargePageAlloc2, a);
527 PGM_LOCK_ASSERT_OWNER_EX(pGVM, &pGVM->aCpus[idCpu]);
528
529 /*
530 * Allocate a large page.
531 */
532 RTHCPHYS HCPhys = NIL_GMMPAGEDESC_PHYS;
533 uint32_t idPage = NIL_GMM_PAGEID;
534
535 if (true) /** @todo pre-allocate 2-3 pages on the allocation thread. */
536 {
537 uint64_t const nsAllocStart = RTTimeNanoTS();
538 if (nsAllocStart < pGVM->pgm.s.nsLargePageRetry)
539 {
540 LogFlowFunc(("returns VERR_TRY_AGAIN - %RU64 ns left of hold off period\n", pGVM->pgm.s.nsLargePageRetry - nsAllocStart));
541 return VERR_TRY_AGAIN;
542 }
543
544 int const rc = GMMR0AllocateLargePage(pGVM, idCpu, _2M, &idPage, &HCPhys);
545
546 uint64_t const nsAllocEnd = RTTimeNanoTS();
547 uint64_t const cNsElapsed = nsAllocEnd - nsAllocStart;
548 STAM_REL_PROFILE_ADD_PERIOD(&pGVM->pgm.s.StatLargePageAlloc, cNsElapsed);
549 if (cNsElapsed < RT_NS_100MS)
550 pGVM->pgm.s.cLargePageLongAllocRepeats = 0;
551 else
552 {
553 /* If a large page allocation takes more than 100ms back off for a
554 while so the host OS can reshuffle memory and make some more large
555 pages available. However if it took over a second, just disable it. */
556 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageOverflow);
557 pGVM->pgm.s.cLargePageLongAllocRepeats++;
558 if (cNsElapsed > RT_NS_1SEC)
559 {
560 LogRel(("PGMR0PhysAllocateLargePage: Disabling large pages after %'RU64 ns allocation time.\n", cNsElapsed));
561 PGMSetLargePageUsage(pGVM, false);
562 }
563 else
564 {
565 Log(("PGMR0PhysAllocateLargePage: Suspending large page allocations for %u sec after %'RU64 ns allocation time.\n",
566 30 * pGVM->pgm.s.cLargePageLongAllocRepeats, cNsElapsed));
567 pGVM->pgm.s.nsLargePageRetry = nsAllocEnd + RT_NS_30SEC * pGVM->pgm.s.cLargePageLongAllocRepeats;
568 }
569 }
570
571 if (RT_FAILURE(rc))
572 {
573 Log(("PGMR0PhysAllocateLargePage: Failed: %Rrc\n", rc));
574 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageAllocFailed);
575 if (rc == VERR_NOT_SUPPORTED)
576 {
577 LogRel(("PGM: Disabling large pages because of VERR_NOT_SUPPORTED status.\n"));
578 PGMSetLargePageUsage(pGVM, false);
579 }
580 return rc;
581 }
582 }
583
584 STAM_PROFILE_STOP_START(&pGVM->pgm.s.Stats.StatLargePageAlloc2, &pGVM->pgm.s.Stats.StatLargePageSetup, a);
585
586 /*
587 * Enter the pages into PGM.
588 */
589 bool fFlushTLBs = false;
590 VBOXSTRICTRC rc = VINF_SUCCESS;
591 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
592 while (cLeft-- > 0)
593 {
594 PPGMPAGE const pPage = pgmPhysGetPage(pGVM, GCPhys);
595 AssertReturn(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM && PGM_PAGE_IS_ZERO(pPage), VERR_PGM_UNEXPECTED_PAGE_STATE);
596
597 /* Make sure there are no zero mappings. */
598 uint16_t const u16Tracking = PGM_PAGE_GET_TRACKING(pPage);
599 if (u16Tracking == 0)
600 Assert(PGM_PAGE_GET_PTE_INDEX(pPage) == 0);
601 else
602 {
603 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageZeroEvict);
604 VBOXSTRICTRC rc3 = pgmPoolTrackUpdateGCPhys(pGVM, GCPhys, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
605 Log(("PGMR0PhysAllocateLargePage: GCPhys=%RGp: tracking=%#x rc3=%Rrc\n", GCPhys, u16Tracking, VBOXSTRICTRC_VAL(rc3)));
606 if (rc3 != VINF_SUCCESS && rc == VINF_SUCCESS)
607 rc = rc3; /** @todo not perfect... */
608 PGM_PAGE_SET_PTE_INDEX(pGVM, pPage, 0);
609 PGM_PAGE_SET_TRACKING(pGVM, pPage, 0);
610 }
611
612 /* Setup the new page. */
613 PGM_PAGE_SET_HCPHYS(pGVM, pPage, HCPhys);
614 PGM_PAGE_SET_STATE(pGVM, pPage, PGM_PAGE_STATE_ALLOCATED);
615 PGM_PAGE_SET_PDE_TYPE(pGVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
616 PGM_PAGE_SET_PAGEID(pGVM, pPage, idPage);
617 Log3(("PGMR0PhysAllocateLargePage: GCPhys=%RGp: idPage=%#x HCPhys=%RGp (old tracking=%#x)\n",
618 GCPhys, idPage, HCPhys, u16Tracking));
619
620 /* advance */
621 idPage++;
622 HCPhys += GUEST_PAGE_SIZE;
623 GCPhys += GUEST_PAGE_SIZE;
624 }
625
626 STAM_COUNTER_ADD(&pGVM->pgm.s.Stats.StatRZPageReplaceZero, _2M / GUEST_PAGE_SIZE);
627 pGVM->pgm.s.cZeroPages -= _2M / GUEST_PAGE_SIZE;
628 pGVM->pgm.s.cPrivatePages += _2M / GUEST_PAGE_SIZE;
629
630 /*
631 * Flush all TLBs.
632 */
633 if (!fFlushTLBs)
634 { /* likely as we shouldn't normally map zero pages */ }
635 else
636 {
637 STAM_REL_COUNTER_INC(&pGVM->pgm.s.StatLargePageTlbFlush);
638 PGM_INVL_ALL_VCPU_TLBS(pGVM);
639 }
640 /** @todo this is a little expensive (~3000 ticks) since we'll have to
641 * invalidate everything. Add a version to the TLB? */
642 pgmPhysInvalidatePageMapTLB(pGVM);
643 IEMTlbInvalidateAllPhysicalAllCpus(pGVM, idCpu, IEMTLBPHYSFLUSHREASON_ALLOCATED_LARGE);
644
645 STAM_PROFILE_STOP(&pGVM->pgm.s.Stats.StatLargePageSetup, a);
646#if 0 /** @todo returning info statuses here might not be a great idea... */
647 LogFlow(("PGMR0PhysAllocateLargePage: returns %Rrc\n", VBOXSTRICTRC_VAL(rc) ));
648 return VBOXSTRICTRC_TODO(rc);
649#else
650 LogFlow(("PGMR0PhysAllocateLargePage: returns VINF_SUCCESS (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rc) ));
651 return VINF_SUCCESS;
652#endif
653}
654
655
656/**
657 * Allocate a large page at @a GCPhys.
658 *
659 * @returns The following VBox status codes.
660 * @retval VINF_SUCCESS on success.
661 * @retval VINF_EM_NO_MEMORY if we're out of memory.
662 *
663 * @param pGVM The global (ring-0) VM structure.
664 * @param idCpu The ID of the calling EMT.
665 * @param GCPhys The guest physical address of the page.
666 *
667 * @thread EMT(idCpu)
668 *
669 * @remarks Must be called from within the PGM critical section. The caller
670 * must clear the new pages.
671 */
672VMMR0_INT_DECL(int) PGMR0PhysAllocateLargePage(PGVM pGVM, VMCPUID idCpu, RTGCPHYS GCPhys)
673{
674 /*
675 * Validate inputs.
676 */
677 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
678 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_NOT_OWNER);
679
680 int rc = PGM_LOCK(pGVM);
681 AssertRCReturn(rc, rc);
682
683 /* The caller might have done this already, but since we're ring-3 callable we
684 need to make sure everything is fine before starting the allocation here. */
685 for (unsigned i = 0; i < _2M / GUEST_PAGE_SIZE; i++)
686 {
687 PPGMPAGE pPage;
688 rc = pgmPhysGetPageEx(pGVM, GCPhys + i * GUEST_PAGE_SIZE, &pPage);
689 AssertRCReturnStmt(rc, PGM_UNLOCK(pGVM), rc);
690 AssertReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM, PGM_UNLOCK(pGVM), VERR_PGM_PHYS_NOT_RAM);
691 AssertReturnStmt(PGM_PAGE_IS_ZERO(pPage), PGM_UNLOCK(pGVM), VERR_PGM_UNEXPECTED_PAGE_STATE);
692 }
693
694 /*
695 * Call common code.
696 */
697 rc = pgmR0PhysAllocateLargePage(pGVM, idCpu, GCPhys);
698
699 PGM_UNLOCK(pGVM);
700 return rc;
701}
702
703
704/**
705 * Locate a MMIO2 range.
706 *
707 * @returns Pointer to the MMIO2 range.
708 * @param pGVM The global (ring-0) VM structure.
709 * @param pDevIns The device instance owning the region.
710 * @param hMmio2 Handle to look up.
711 */
712DECLINLINE(PPGMREGMMIO2RANGE) pgmR0PhysMmio2Find(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2)
713{
714 /*
715 * We use the lookup table here as list walking is tedious in ring-0 when using
716 * ring-3 pointers and this probably will require some kind of refactoring anyway.
717 */
718 if (hMmio2 <= RT_ELEMENTS(pGVM->pgm.s.apMmio2RangesR0) && hMmio2 != 0)
719 {
720 PPGMREGMMIO2RANGE pCur = pGVM->pgm.s.apMmio2RangesR0[hMmio2 - 1];
721 if (pCur && pCur->pDevInsR3 == pDevIns->pDevInsForR3)
722 {
723 Assert(pCur->idMmio2 == hMmio2);
724 return pCur;
725 }
726 Assert(!pCur);
727 }
728 return NULL;
729}
730
731
732/**
733 * Worker for PDMDEVHLPR0::pfnMmio2SetUpContext.
734 *
735 * @returns VBox status code.
736 * @param pGVM The global (ring-0) VM structure.
737 * @param pDevIns The device instance.
738 * @param hMmio2 The MMIO2 region to map into ring-0 address space.
739 * @param offSub The offset into the region.
740 * @param cbSub The size of the mapping, zero meaning all the rest.
741 * @param ppvMapping Where to return the ring-0 mapping address.
742 */
743VMMR0_INT_DECL(int) PGMR0PhysMMIO2MapKernel(PGVM pGVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2,
744 size_t offSub, size_t cbSub, void **ppvMapping)
745{
746 AssertReturn(!(offSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
747 AssertReturn(!(cbSub & HOST_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
748
749 /*
750 * Translate hRegion into a range pointer.
751 */
752 PPGMREGMMIO2RANGE pFirstRegMmio = pgmR0PhysMmio2Find(pGVM, pDevIns, hMmio2);
753 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
754#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
755 uint8_t * const pvR0 = (uint8_t *)pFirstRegMmio->pvR0;
756#else
757 RTR3PTR const pvR3 = pFirstRegMmio->pvR3;
758#endif
759 RTGCPHYS const cbReal = pFirstRegMmio->cbReal;
760 pFirstRegMmio = NULL;
761 ASMCompilerBarrier();
762
763 AssertReturn(offSub < cbReal, VERR_OUT_OF_RANGE);
764 if (cbSub == 0)
765 cbSub = cbReal - offSub;
766 else
767 AssertReturn(cbSub < cbReal && cbSub + offSub <= cbReal, VERR_OUT_OF_RANGE);
768
769 /*
770 * Do the mapping.
771 */
772#ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
773 AssertPtr(pvR0);
774 *ppvMapping = pvR0 + offSub;
775 return VINF_SUCCESS;
776#else
777 return SUPR0PageMapKernel(pGVM->pSession, pvR3, (uint32_t)offSub, (uint32_t)cbSub, 0 /*fFlags*/, ppvMapping);
778#endif
779}
780
781
782/**
783 * This is called during PGMR3Init to init the physical access handler allocator
784 * and tree.
785 *
786 * @returns VBox status code.
787 * @param pGVM Pointer to the global VM structure.
788 * @param cEntries Desired number of physical access handlers to reserve
789 * space for (will be adjusted).
790 * @thread EMT(0)
791 */
792VMMR0_INT_DECL(int) PGMR0PhysHandlerInitReqHandler(PGVM pGVM, uint32_t cEntries)
793{
794 /*
795 * Validate the input and state.
796 */
797 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
798 AssertRCReturn(rc, rc);
799 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); /** @todo ring-0 safe state check. */
800
801 AssertReturn(pGVM->pgmr0.s.PhysHandlerAllocator.m_paNodes == NULL, VERR_WRONG_ORDER);
802 AssertReturn(pGVM->pgm.s.PhysHandlerAllocator.m_paNodes == NULL, VERR_WRONG_ORDER);
803
804 AssertLogRelMsgReturn(cEntries <= _64K, ("%#x\n", cEntries), VERR_OUT_OF_RANGE);
805
806 /*
807 * Calculate the table size and allocate it.
808 */
809 uint32_t cbTreeAndBitmap = 0;
810 uint32_t const cbTotalAligned = pgmHandlerPhysicalCalcTableSizes(&cEntries, &cbTreeAndBitmap);
811 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
812 rc = RTR0MemObjAllocPage(&hMemObj, cbTotalAligned, false);
813 if (RT_SUCCESS(rc))
814 {
815 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
816 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
817 if (RT_SUCCESS(rc))
818 {
819 uint8_t *pb = (uint8_t *)RTR0MemObjAddress(hMemObj);
820 if (!RTR0MemObjWasZeroInitialized(hMemObj))
821 RT_BZERO(pb, cbTotalAligned);
822
823 pGVM->pgmr0.s.PhysHandlerAllocator.initSlabAllocator(cEntries, (PPGMPHYSHANDLER)&pb[cbTreeAndBitmap],
824 (uint64_t *)&pb[sizeof(PGMPHYSHANDLERTREE)]);
825 pGVM->pgmr0.s.pPhysHandlerTree = (PPGMPHYSHANDLERTREE)pb;
826 pGVM->pgmr0.s.pPhysHandlerTree->initWithAllocator(&pGVM->pgmr0.s.PhysHandlerAllocator);
827 pGVM->pgmr0.s.hPhysHandlerMemObj = hMemObj;
828 pGVM->pgmr0.s.hPhysHandlerMapObj = hMapObj;
829
830 AssertCompile(sizeof(pGVM->pgm.s.PhysHandlerAllocator) == sizeof(pGVM->pgmr0.s.PhysHandlerAllocator));
831 RTR3PTR R3Ptr = RTR0MemObjAddressR3(hMapObj);
832 pGVM->pgm.s.pPhysHandlerTree = R3Ptr;
833 pGVM->pgm.s.PhysHandlerAllocator.m_paNodes = R3Ptr + cbTreeAndBitmap;
834 pGVM->pgm.s.PhysHandlerAllocator.m_pbmAlloc = R3Ptr + sizeof(PGMPHYSHANDLERTREE);
835 pGVM->pgm.s.PhysHandlerAllocator.m_cNodes = cEntries;
836 pGVM->pgm.s.PhysHandlerAllocator.m_cErrors = 0;
837 pGVM->pgm.s.PhysHandlerAllocator.m_idxAllocHint = 0;
838 pGVM->pgm.s.PhysHandlerAllocator.m_uPadding = 0;
839 return VINF_SUCCESS;
840 }
841
842 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
843 }
844 return rc;
845}
846
847
848/**
849 * Updates a physical access handler type with ring-0 callback functions.
850 *
851 * The handler type must first have been registered in ring-3.
852 *
853 * @returns VBox status code.
854 * @param pGVM The global (ring-0) VM structure.
855 * @param enmKind The kind of access handler.
856 * @param fFlags PGMPHYSHANDLER_F_XXX
857 * @param pfnHandler Pointer to the ring-0 handler callback.
858 * @param pfnPfHandler Pointer to the ring-0 \#PF handler callback.
859 * callback. Can be NULL (not recommended though).
860 * @param pszDesc The type description.
861 * @param hType The handle to do ring-0 callback registrations for.
862 * @thread EMT(0)
863 */
864VMMR0_INT_DECL(int) PGMR0HandlerPhysicalTypeSetUpContext(PGVM pGVM, PGMPHYSHANDLERKIND enmKind, uint32_t fFlags,
865 PFNPGMPHYSHANDLER pfnHandler, PFNPGMRZPHYSPFHANDLER pfnPfHandler,
866 const char *pszDesc, PGMPHYSHANDLERTYPE hType)
867{
868 /*
869 * Validate input.
870 */
871 AssertPtrReturn(pfnHandler, VERR_INVALID_POINTER);
872 AssertPtrNullReturn(pfnPfHandler, VERR_INVALID_POINTER);
873
874 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
875 AssertReturn( enmKind == PGMPHYSHANDLERKIND_WRITE
876 || enmKind == PGMPHYSHANDLERKIND_ALL
877 || enmKind == PGMPHYSHANDLERKIND_MMIO,
878 VERR_INVALID_PARAMETER);
879 AssertMsgReturn(!(fFlags & ~PGMPHYSHANDLER_F_VALID_MASK), ("%#x\n", fFlags), VERR_INVALID_FLAGS);
880
881 PPGMPHYSHANDLERTYPEINTR0 const pTypeR0 = &pGVM->pgmr0.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
882 AssertMsgReturn(hType == pTypeR0->hType, ("%#RX64, expected=%#RX64\n", hType, pTypeR0->hType), VERR_INVALID_HANDLE);
883 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == RT_ELEMENTS(pGVM->pgm.s.aPhysHandlerTypes));
884 AssertCompile(RT_ELEMENTS(pGVM->pgmr0.s.aPhysHandlerTypes) == PGMPHYSHANDLERTYPE_IDX_MASK + 1);
885 AssertReturn(pTypeR0->enmKind == PGMPHYSHANDLERKIND_INVALID, VERR_ALREADY_INITIALIZED);
886
887 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0);
888 AssertRCReturn(rc, rc);
889 VM_ASSERT_STATE_RETURN(pGVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE); /** @todo ring-0 safe state check. */
890
891 PPGMPHYSHANDLERTYPEINTR3 const pTypeR3 = &pGVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK];
892 AssertMsgReturn(pTypeR3->enmKind == enmKind,
893 ("%#x: %d, expected %d\n", hType, pTypeR3->enmKind, enmKind),
894 VERR_INVALID_HANDLE);
895 AssertMsgReturn(pTypeR3->fKeepPgmLock == RT_BOOL(fFlags & PGMPHYSHANDLER_F_KEEP_PGM_LOCK),
896 ("%#x: %d, fFlags=%#x\n", hType, pTypeR3->fKeepPgmLock, fFlags),
897 VERR_INVALID_HANDLE);
898 AssertMsgReturn(pTypeR3->fRing0DevInsIdx == RT_BOOL(fFlags & PGMPHYSHANDLER_F_R0_DEVINS_IDX),
899 ("%#x: %d, fFlags=%#x\n", hType, pTypeR3->fRing0DevInsIdx, fFlags),
900 VERR_INVALID_HANDLE);
901 AssertMsgReturn(pTypeR3->fNotInHm == RT_BOOL(fFlags & PGMPHYSHANDLER_F_NOT_IN_HM),
902 ("%#x: %d, fFlags=%#x\n", hType, pTypeR3->fNotInHm, fFlags),
903 VERR_INVALID_HANDLE);
904
905 /*
906 * Update the entry.
907 */
908 pTypeR0->enmKind = enmKind;
909 pTypeR0->uState = enmKind == PGMPHYSHANDLERKIND_WRITE
910 ? PGM_PAGE_HNDL_PHYS_STATE_WRITE : PGM_PAGE_HNDL_PHYS_STATE_ALL;
911 pTypeR0->fKeepPgmLock = RT_BOOL(fFlags & PGMPHYSHANDLER_F_KEEP_PGM_LOCK);
912 pTypeR0->fRing0DevInsIdx = RT_BOOL(fFlags & PGMPHYSHANDLER_F_R0_DEVINS_IDX);
913 pTypeR0->fNotInHm = RT_BOOL(fFlags & PGMPHYSHANDLER_F_NOT_IN_HM);
914 pTypeR0->pfnHandler = pfnHandler;
915 pTypeR0->pfnPfHandler = pfnPfHandler;
916 pTypeR0->pszDesc = pszDesc;
917
918 pTypeR3->fRing0Enabled = true;
919
920 LogFlow(("PGMR0HandlerPhysicalTypeRegister: hType=%#x: enmKind=%d fFlags=%#x pfnHandler=%p pfnPfHandler=%p pszDesc=%s\n",
921 hType, enmKind, fFlags, pfnHandler, pfnPfHandler, pszDesc));
922 return VINF_SUCCESS;
923}
924
925
926#ifdef VBOX_WITH_PCI_PASSTHROUGH
927/* Interface sketch. The interface belongs to a global PCI pass-through
928 manager. It shall use the global VM handle, not the user VM handle to
929 store the per-VM info (domain) since that is all ring-0 stuff, thus
930 passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
931 we can discuss the PciRaw code re-organtization when I'm back from
932 vacation.
933
934 I've implemented the initial IOMMU set up below. For things to work
935 reliably, we will probably need add a whole bunch of checks and
936 GPciRawR0GuestPageUpdate call to the PGM code. For the present,
937 assuming nested paging (enforced) and prealloc (enforced), no
938 ballooning (check missing), page sharing (check missing) or live
939 migration (check missing), it might work fine. At least if some
940 VM power-off hook is present and can tear down the IOMMU page tables. */
941
942/**
943 * Tells the global PCI pass-through manager that we are about to set up the
944 * guest page to host page mappings for the specfied VM.
945 *
946 * @returns VBox status code.
947 *
948 * @param pGVM The ring-0 VM structure.
949 */
950VMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
951{
952 NOREF(pGVM);
953 return VINF_SUCCESS;
954}
955
956
957/**
958 * Assigns a host page mapping for a guest page.
959 *
960 * This is only used when setting up the mappings, i.e. between
961 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
962 *
963 * @returns VBox status code.
964 * @param pGVM The ring-0 VM structure.
965 * @param GCPhys The address of the guest page (page aligned).
966 * @param HCPhys The address of the host page (page aligned).
967 */
968VMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
969{
970 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
971 AssertReturn(!(HCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
972
973 if (pGVM->rawpci.s.pfnContigMemInfo)
974 /** @todo what do we do on failure? */
975 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, HOST_PAGE_SIZE, PCIRAW_MEMINFO_MAP);
976
977 return VINF_SUCCESS;
978}
979
980
981/**
982 * Indicates that the specified guest page doesn't exists but doesn't have host
983 * page mapping we trust PCI pass-through with.
984 *
985 * This is only used when setting up the mappings, i.e. between
986 * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
987 *
988 * @returns VBox status code.
989 * @param pGVM The ring-0 VM structure.
990 * @param GCPhys The address of the guest page (page aligned).
991 * @param HCPhys The address of the host page (page aligned).
992 */
993VMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
994{
995 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
996
997 if (pGVM->rawpci.s.pfnContigMemInfo)
998 /** @todo what do we do on failure? */
999 pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, HOST_PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
1000
1001 return VINF_SUCCESS;
1002}
1003
1004
1005/**
1006 * Tells the global PCI pass-through manager that we have completed setting up
1007 * the guest page to host page mappings for the specfied VM.
1008 *
1009 * This complements GPciRawR0GuestPageBeginAssignments and will be called even
1010 * if some page assignment failed.
1011 *
1012 * @returns VBox status code.
1013 *
1014 * @param pGVM The ring-0 VM structure.
1015 */
1016VMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
1017{
1018 NOREF(pGVM);
1019 return VINF_SUCCESS;
1020}
1021
1022
1023/**
1024 * Tells the global PCI pass-through manager that a guest page mapping has
1025 * changed after the initial setup.
1026 *
1027 * @returns VBox status code.
1028 * @param pGVM The ring-0 VM structure.
1029 * @param GCPhys The address of the guest page (page aligned).
1030 * @param HCPhys The new host page address or NIL_RTHCPHYS if
1031 * now unassigned.
1032 */
1033VMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
1034{
1035 AssertReturn(!(GCPhys & HOST_PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
1036 AssertReturn(!(HCPhys & HOST_PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
1037 NOREF(pGVM);
1038 return VINF_SUCCESS;
1039}
1040
1041#endif /* VBOX_WITH_PCI_PASSTHROUGH */
1042
1043
1044/**
1045 * Sets up the IOMMU when raw PCI device is enabled.
1046 *
1047 * @note This is a hack that will probably be remodelled and refined later!
1048 *
1049 * @returns VBox status code.
1050 *
1051 * @param pGVM The global (ring-0) VM structure.
1052 */
1053VMMR0_INT_DECL(int) PGMR0PhysSetupIoMmu(PGVM pGVM)
1054{
1055 int rc = GVMMR0ValidateGVM(pGVM);
1056 if (RT_FAILURE(rc))
1057 return rc;
1058
1059#ifdef VBOX_WITH_PCI_PASSTHROUGH
1060 if (pGVM->pgm.s.fPciPassthrough)
1061 {
1062 /*
1063 * The Simplistic Approach - Enumerate all the pages and call tell the
1064 * IOMMU about each of them.
1065 */
1066 PGM_LOCK_VOID(pGVM);
1067 rc = GPciRawR0GuestPageBeginAssignments(pGVM);
1068 if (RT_SUCCESS(rc))
1069 {
1070 for (PPGMRAMRANGE pRam = pGVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
1071 {
1072 PPGMPAGE pPage = &pRam->aPages[0];
1073 RTGCPHYS GCPhys = pRam->GCPhys;
1074 uint32_t cLeft = pRam->cb >> GUEST_PAGE_SHIFT;
1075 while (cLeft-- > 0)
1076 {
1077 /* Only expose pages that are 100% safe for now. */
1078 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
1079 && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
1080 && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1081 rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
1082 else
1083 rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
1084
1085 /* next */
1086 pPage++;
1087 GCPhys += HOST_PAGE_SIZE;
1088 }
1089 }
1090
1091 int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
1092 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
1093 rc = rc2;
1094 }
1095 PGM_UNLOCK(pGVM);
1096 }
1097 else
1098#endif
1099 rc = VERR_NOT_SUPPORTED;
1100 return rc;
1101}
1102
1103
1104/**
1105 * \#PF Handler for nested paging.
1106 *
1107 * @returns VBox status code (appropriate for trap handling and GC return).
1108 * @param pGVM The global (ring-0) VM structure.
1109 * @param pGVCpu The global (ring-0) CPU structure of the calling
1110 * EMT.
1111 * @param enmShwPagingMode Paging mode for the nested page tables.
1112 * @param uErr The trap error code.
1113 * @param pCtx Pointer to the register context for the CPU.
1114 * @param GCPhysFault The fault address.
1115 */
1116VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1117 PCPUMCTX pCtx, RTGCPHYS GCPhysFault)
1118{
1119 int rc;
1120
1121 LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pCtx->rip));
1122 STAM_PROFILE_START(&pGVCpu->pgm.s.StatRZTrap0e, a);
1123 STAM_STATS({ pGVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
1124
1125 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */
1126 AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
1127 || enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
1128 ("enmShwPagingMode=%d\n", enmShwPagingMode));
1129
1130 /* Reserved shouldn't end up here. */
1131 Assert(!(uErr & X86_TRAP_PF_RSVD));
1132
1133#ifdef VBOX_WITH_STATISTICS
1134 /*
1135 * Error code stats.
1136 */
1137 if (uErr & X86_TRAP_PF_US)
1138 {
1139 if (!(uErr & X86_TRAP_PF_P))
1140 {
1141 if (uErr & X86_TRAP_PF_RW)
1142 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
1143 else
1144 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1145 }
1146 else if (uErr & X86_TRAP_PF_RW)
1147 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1148 else if (uErr & X86_TRAP_PF_RSVD)
1149 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1150 else if (uErr & X86_TRAP_PF_ID)
1151 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1152 else
1153 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1154 }
1155 else
1156 { /* Supervisor */
1157 if (!(uErr & X86_TRAP_PF_P))
1158 {
1159 if (uErr & X86_TRAP_PF_RW)
1160 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1161 else
1162 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1163 }
1164 else if (uErr & X86_TRAP_PF_RW)
1165 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1166 else if (uErr & X86_TRAP_PF_ID)
1167 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1168 else if (uErr & X86_TRAP_PF_RSVD)
1169 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1170 }
1171#endif
1172
1173 /*
1174 * Call the worker.
1175 *
1176 * Note! We pretend the guest is in protected mode without paging, so we
1177 * can use existing code to build the nested page tables.
1178 */
1179/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1180 bool fLockTaken = false;
1181 switch (enmShwPagingMode)
1182 {
1183 case PGMMODE_32_BIT:
1184 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pGVCpu, uErr, pCtx, GCPhysFault, &fLockTaken);
1185 break;
1186 case PGMMODE_PAE:
1187 case PGMMODE_PAE_NX:
1188 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pGVCpu, uErr, pCtx, GCPhysFault, &fLockTaken);
1189 break;
1190 case PGMMODE_AMD64:
1191 case PGMMODE_AMD64_NX:
1192 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pGVCpu, uErr, pCtx, GCPhysFault, &fLockTaken);
1193 break;
1194 case PGMMODE_EPT:
1195 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pGVCpu, uErr, pCtx, GCPhysFault, &fLockTaken);
1196 break;
1197 default:
1198 AssertFailed();
1199 rc = VERR_INVALID_PARAMETER;
1200 break;
1201 }
1202 if (fLockTaken)
1203 {
1204 PGM_LOCK_ASSERT_OWNER(pGVM);
1205 PGM_UNLOCK(pGVM);
1206 }
1207
1208 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1209 rc = VINF_SUCCESS;
1210 /*
1211 * Handle the case where we cannot interpret the instruction because we cannot get the guest physical address
1212 * via its page tables, see @bugref{6043}.
1213 */
1214 else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1215 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1216 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1217 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1218 {
1219 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pCtx->rip));
1220 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
1221 single VCPU VMs though. */
1222 rc = VINF_SUCCESS;
1223 }
1224
1225 STAM_STATS({ if (!pGVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1226 pGVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pGVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1227 STAM_PROFILE_STOP_EX(&pGVCpu->pgm.s.Stats.StatRZTrap0e, pGVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1228 return rc;
1229}
1230
1231
1232#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1233/**
1234 * Nested \#PF Handler for nested-guest execution using nested paging.
1235 *
1236 * @returns Strict VBox status code (appropriate for trap handling and GC return).
1237 * @param pGVM The global (ring-0) VM structure.
1238 * @param pGVCpu The global (ring-0) CPU structure of the calling
1239 * EMT.
1240 * @param uErr The trap error code.
1241 * @param pCtx Pointer to the register context for the CPU.
1242 * @param GCPhysNestedFault The nested-guest physical address causing the fault.
1243 * @param fIsLinearAddrValid Whether translation of a nested-guest linear address
1244 * caused this fault. If @c false, GCPtrNestedFault
1245 * must be 0.
1246 * @param GCPtrNestedFault The nested-guest linear address that caused this
1247 * fault.
1248 * @param pWalk Where to store the SLAT walk result.
1249 */
1250VMMR0DECL(VBOXSTRICTRC) PGMR0NestedTrap0eHandlerNestedPaging(PGVMCPU pGVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
1251 PCPUMCTX pCtx, RTGCPHYS GCPhysNestedFault,
1252 bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk)
1253{
1254 Assert(enmShwPagingMode == PGMMODE_EPT);
1255 NOREF(enmShwPagingMode);
1256
1257 bool fLockTaken;
1258 VBOXSTRICTRC rcStrict = PGM_BTH_NAME_EPT_PROT(NestedTrap0eHandler)(pGVCpu, uErr, pCtx, GCPhysNestedFault,
1259 fIsLinearAddrValid, GCPtrNestedFault, pWalk, &fLockTaken);
1260 if (fLockTaken)
1261 {
1262 PGM_LOCK_ASSERT_OWNER(pGVCpu->CTX_SUFF(pVM));
1263 PGM_UNLOCK(pGVCpu->CTX_SUFF(pVM));
1264 }
1265 Assert(rcStrict != VINF_PGM_SYNCPAGE_MODIFIED_PDE); /* This rc isn't used with Nested Paging and nested-EPT. */
1266 return rcStrict;
1267}
1268#endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1269
1270
1271/**
1272 * \#PF Handler for deliberate nested paging misconfiguration (/reserved bit)
1273 * employed for MMIO pages.
1274 *
1275 * @returns VBox status code (appropriate for trap handling and GC return).
1276 * @param pGVM The global (ring-0) VM structure.
1277 * @param pGVCpu The global (ring-0) CPU structure of the calling
1278 * EMT.
1279 * @param enmShwPagingMode Paging mode for the nested page tables.
1280 * @param pCtx Pointer to the register context for the CPU.
1281 * @param GCPhysFault The fault address.
1282 * @param uErr The error code, UINT32_MAX if not available
1283 * (VT-x).
1284 */
1285VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PGVM pGVM, PGVMCPU pGVCpu, PGMMODE enmShwPagingMode,
1286 PCPUMCTX pCtx, RTGCPHYS GCPhysFault, uint32_t uErr)
1287{
1288#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
1289 STAM_PROFILE_START(&pGVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
1290 VBOXSTRICTRC rc;
1291
1292 /*
1293 * Try lookup the all access physical handler for the address.
1294 */
1295 PGM_LOCK_VOID(pGVM);
1296 PPGMPHYSHANDLER pHandler;
1297 rc = pgmHandlerPhysicalLookup(pGVM, GCPhysFault, &pHandler);
1298 if (RT_SUCCESS(rc))
1299 {
1300 PCPGMPHYSHANDLERTYPEINT pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pGVM, pHandler);
1301 if (RT_LIKELY( pHandlerType->enmKind != PGMPHYSHANDLERKIND_WRITE
1302 && !pHandlerType->fNotInHm /*paranoia*/ ))
1303 {
1304 /*
1305 * If the handle has aliases page or pages that have been temporarily
1306 * disabled, we'll have to take a detour to make sure we resync them
1307 * to avoid lots of unnecessary exits.
1308 */
1309 PPGMPAGE pPage;
1310 if ( ( pHandler->cAliasedPages
1311 || pHandler->cTmpOffPages)
1312 && ( (pPage = pgmPhysGetPage(pGVM, GCPhysFault)) == NULL
1313 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1314 )
1315 {
1316 Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
1317 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatR0NpMiscfgSyncPage);
1318 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
1319 PGM_UNLOCK(pGVM);
1320 }
1321 else
1322 {
1323 if (pHandlerType->pfnPfHandler)
1324 {
1325 uint64_t const uUser = !pHandlerType->fRing0DevInsIdx ? pHandler->uUser
1326 : (uintptr_t)PDMDeviceRing0IdxToInstance(pGVM, pHandler->uUser);
1327 STAM_PROFILE_START(&pHandler->Stat, h);
1328 PGM_UNLOCK(pGVM);
1329
1330 Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pHandlerType->pfnPfHandler, uErr, GCPhysFault, uUser));
1331 rc = pHandlerType->pfnPfHandler(pGVM, pGVCpu, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pCtx,
1332 GCPhysFault, GCPhysFault, uUser);
1333
1334 STAM_PROFILE_STOP(&pHandler->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
1335 }
1336 else
1337 {
1338 PGM_UNLOCK(pGVM);
1339 Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
1340 rc = VINF_EM_RAW_EMULATE_INSTR;
1341 }
1342 }
1343 STAM_PROFILE_STOP(&pGVCpu->pgm.s.Stats.StatR0NpMiscfg, a);
1344 return rc;
1345 }
1346 }
1347 else
1348 AssertMsgReturn(rc == VERR_NOT_FOUND, ("%Rrc GCPhysFault=%RGp\n", VBOXSTRICTRC_VAL(rc), GCPhysFault), rc);
1349
1350 /*
1351 * Must be out of sync, so do a SyncPage and restart the instruction.
1352 *
1353 * ASSUMES that ALL handlers are page aligned and covers whole pages
1354 * (assumption asserted in PGMHandlerPhysicalRegisterEx).
1355 */
1356 Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
1357 STAM_COUNTER_INC(&pGVCpu->pgm.s.Stats.StatR0NpMiscfgSyncPage);
1358 rc = pgmShwSyncNestedPageLocked(pGVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
1359 PGM_UNLOCK(pGVM);
1360
1361 STAM_PROFILE_STOP(&pGVCpu->pgm.s.Stats.StatR0NpMiscfg, a);
1362 return rc;
1363
1364#else
1365 AssertLogRelFailed();
1366 return VERR_PGM_NOT_USED_IN_MODE;
1367#endif
1368}
1369
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette