VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 98150

Last change on this file since 98150 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.8 KB
Line 
1/* $Id: PGMAllHandler.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/iom.h>
38#include <VBox/vmm/mm.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/dbgf.h>
43#ifdef IN_RING0
44# include <VBox/vmm/pdmdev.h>
45#endif
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
53# include <iprt/asm-amd64-x86.h>
54#endif
55#include <iprt/string.h>
56#include <VBox/param.h>
57#include <VBox/err.h>
58#include <VBox/vmm/selm.h>
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64/** Dummy physical access handler type record. */
65CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
66{
67 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
68 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
69 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
70 /* .fKeepPgmLock = */ true,
71 /* .fRing0DevInsIdx = */ false,
72#ifdef IN_RING0
73 /* .fNotInHm = */ false,
74 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
75 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
76#elif defined(IN_RING3)
77 /* .fRing0Enabled = */ false,
78 /* .fNotInHm = */ false,
79 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
80#else
81# error "unsupported context"
82#endif
83 /* .pszDesc = */ "dummy"
84};
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
91 void *pvBitmap, uint32_t offBitmap);
92static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
93static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
94
95
96#ifndef IN_RING3
97
98/**
99 * @callback_method_impl{FNPGMPHYSHANDLER,
100 * Dummy for forcing ring-3 handling of the access.}
101 */
102DECLCALLBACK(VBOXSTRICTRC)
103pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
104 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
105{
106 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
107 return VINF_EM_RAW_EMULATE_INSTR;
108}
109
110
111/**
112 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
113 * Dummy for forcing ring-3 handling of the access.}
114 */
115DECLCALLBACK(VBOXSTRICTRC)
116pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
117 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
118{
119 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
120 return VINF_EM_RAW_EMULATE_INSTR;
121}
122
123#endif /* !IN_RING3 */
124
125
126/**
127 * Creates a physical access handler, allocation part.
128 *
129 * @returns VBox status code.
130 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
131 *
132 * @param pVM The cross context VM structure.
133 * @param hType The handler type registration handle.
134 * @param uUser User argument to the handlers (not pointer).
135 * @param pszDesc Description of this handler. If NULL, the type
136 * description will be used instead.
137 * @param ppPhysHandler Where to return the access handler structure on
138 * success.
139 */
140int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
141 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
142{
143 /*
144 * Validate input.
145 */
146 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
147 AssertReturn(pType, VERR_INVALID_HANDLE);
148 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
149 AssertPtr(ppPhysHandler);
150
151 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
152 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
153
154 /*
155 * Allocate and initialize the new entry.
156 */
157 int rc = PGM_LOCK(pVM);
158 AssertRCReturn(rc, rc);
159
160 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
161 if (pNew)
162 {
163 pNew->Key = NIL_RTGCPHYS;
164 pNew->KeyLast = NIL_RTGCPHYS;
165 pNew->cPages = 0;
166 pNew->cAliasedPages = 0;
167 pNew->cTmpOffPages = 0;
168 pNew->uUser = uUser;
169 pNew->hType = hType;
170 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
171#ifdef IN_RING3
172 : pType->pszDesc;
173#else
174 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
175#endif
176
177 PGM_UNLOCK(pVM);
178 *ppPhysHandler = pNew;
179 return VINF_SUCCESS;
180 }
181
182 PGM_UNLOCK(pVM);
183 return VERR_OUT_OF_RESOURCES;
184}
185
186
187/**
188 * Duplicates a physical access handler.
189 *
190 * @returns VBox status code.
191 * @retval VINF_SUCCESS when successfully installed.
192 *
193 * @param pVM The cross context VM structure.
194 * @param pPhysHandlerSrc The source handler to duplicate
195 * @param ppPhysHandler Where to return the access handler structure on
196 * success.
197 */
198int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
199{
200 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
201 pPhysHandlerSrc->pszDesc, ppPhysHandler);
202}
203
204
205/**
206 * Register a access handler for a physical range.
207 *
208 * @returns VBox status code.
209 * @retval VINF_SUCCESS when successfully installed.
210 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
211 *
212 * @param pVM The cross context VM structure.
213 * @param pPhysHandler The physical handler.
214 * @param GCPhys Start physical address.
215 * @param GCPhysLast Last physical address. (inclusive)
216 */
217int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
218{
219 /*
220 * Validate input.
221 */
222 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
223 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
224 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
225 AssertReturn(pType, VERR_INVALID_HANDLE);
226 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
227
228 AssertPtr(pPhysHandler);
229
230 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
231 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
232 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
233
234 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
235 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
236
237 switch (pType->enmKind)
238 {
239 case PGMPHYSHANDLERKIND_WRITE:
240 if (!pType->fNotInHm)
241 break;
242 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
243 case PGMPHYSHANDLERKIND_MMIO:
244 case PGMPHYSHANDLERKIND_ALL:
245 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
246 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
247 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
248 break;
249 default:
250 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
251 return VERR_INVALID_PARAMETER;
252 }
253
254 /*
255 * We require the range to be within registered ram.
256 * There is no apparent need to support ranges which cover more than one ram range.
257 */
258 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
259 if ( !pRam
260 || GCPhysLast > pRam->GCPhysLast)
261 {
262#ifdef IN_RING3
263 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
264#endif
265 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
266 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
267 }
268 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
269 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
270
271 /*
272 * Try insert into list.
273 */
274 pPhysHandler->Key = GCPhys;
275 pPhysHandler->KeyLast = GCPhysLast;
276 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
277
278 int rc = PGM_LOCK(pVM);
279 if (RT_SUCCESS(rc))
280 {
281 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
282 if (RT_SUCCESS(rc))
283 {
284 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
285 if (rc == VINF_PGM_SYNC_CR3)
286 rc = VINF_PGM_GCPHYS_ALIASED;
287
288#if defined(IN_RING3) || defined(IN_RING0)
289 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
290#endif
291 PGM_UNLOCK(pVM);
292
293 if (rc != VINF_SUCCESS)
294 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
295 return rc;
296 }
297 PGM_UNLOCK(pVM);
298 }
299
300 pPhysHandler->Key = NIL_RTGCPHYS;
301 pPhysHandler->KeyLast = NIL_RTGCPHYS;
302
303 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
304
305#if defined(IN_RING3) && defined(VBOX_STRICT)
306 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
307#endif
308 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
309 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
310 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
311}
312
313
314/**
315 * Register a access handler for a physical range.
316 *
317 * @returns VBox status code.
318 * @retval VINF_SUCCESS when successfully installed.
319 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
320 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
321 * flagged together with a pool clearing.
322 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
323 * one. A debug assertion is raised.
324 *
325 * @param pVM The cross context VM structure.
326 * @param GCPhys Start physical address.
327 * @param GCPhysLast Last physical address. (inclusive)
328 * @param hType The handler type registration handle.
329 * @param uUser User argument to the handler.
330 * @param pszDesc Description of this handler. If NULL, the type
331 * description will be used instead.
332 */
333VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
334 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
335{
336#ifdef LOG_ENABLED
337 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
338 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
339 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
340#endif
341
342 PPGMPHYSHANDLER pNew;
343 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
344 if (RT_SUCCESS(rc))
345 {
346 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
347 if (RT_SUCCESS(rc))
348 return rc;
349 pgmHandlerPhysicalExDestroy(pVM, pNew);
350 }
351 return rc;
352}
353
354
355/**
356 * Sets ram range flags and attempts updating shadow PTs.
357 *
358 * @returns VBox status code.
359 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
360 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
361 * the guest page aliased or/and mapped by multiple PTs. FFs set.
362 * @param pVM The cross context VM structure.
363 * @param pCur The physical handler.
364 * @param pRam The RAM range.
365 * @param pvBitmap Dirty bitmap. Optional.
366 * @param offBitmap Dirty bitmap offset.
367 */
368static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
369 void *pvBitmap, uint32_t offBitmap)
370{
371 /*
372 * Iterate the guest ram pages updating the flags and flushing PT entries
373 * mapping the page.
374 */
375 bool fFlushTLBs = false;
376 int rc = VINF_SUCCESS;
377 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
378 const unsigned uState = pCurType->uState;
379 uint32_t cPages = pCur->cPages;
380 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
381 for (;;)
382 {
383 PPGMPAGE pPage = &pRam->aPages[i];
384 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
385 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
386
387 /* Only do upgrades. */
388 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
389 {
390 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
391
392 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
393 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
394 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
395 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
396 rc = rc2;
397
398#ifdef VBOX_WITH_NATIVE_NEM
399 /* Tell NEM about the protection update. */
400 if (VM_IS_NEM_ENABLED(pVM))
401 {
402 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
403 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
404 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
405 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
406 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
407 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
408 }
409#endif
410 if (pvBitmap)
411 ASMBitSet(pvBitmap, offBitmap);
412 }
413
414 /* next */
415 if (--cPages == 0)
416 break;
417 i++;
418 offBitmap++;
419 }
420
421 if (fFlushTLBs)
422 {
423 PGM_INVL_ALL_VCPU_TLBS(pVM);
424 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
425 }
426 else
427 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
428
429 return rc;
430}
431
432
433/**
434 * Deregister a physical page access handler.
435 *
436 * @returns VBox status code.
437 * @param pVM The cross context VM structure.
438 * @param pPhysHandler The handler to deregister (but not free).
439 */
440int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
441{
442 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
443 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
444
445 int rc = PGM_LOCK(pVM);
446 AssertRCReturn(rc, rc);
447
448 RTGCPHYS const GCPhys = pPhysHandler->Key;
449 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
450
451 /*
452 * Remove the handler from the tree.
453 */
454
455 PPGMPHYSHANDLER pRemoved;
456 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
457 if (RT_SUCCESS(rc))
458 {
459 if (pRemoved == pPhysHandler)
460 {
461 /*
462 * Clear the page bits, notify the REM about this change and clear
463 * the cache.
464 */
465 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
466 if (VM_IS_NEM_ENABLED(pVM))
467 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
468 pVM->pgm.s.idxLastPhysHandler = 0;
469
470 pPhysHandler->Key = NIL_RTGCPHYS;
471 pPhysHandler->KeyLast = NIL_RTGCPHYS;
472
473 PGM_UNLOCK(pVM);
474
475 return VINF_SUCCESS;
476 }
477
478 /*
479 * Both of the failure conditions here are considered internal processing
480 * errors because they can only be caused by race conditions or corruption.
481 * If we ever need to handle concurrent deregistration, we have to move
482 * the NIL_RTGCPHYS check inside the PGM lock.
483 */
484 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
485 }
486
487 PGM_UNLOCK(pVM);
488
489 if (RT_FAILURE(rc))
490 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
491 else
492 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
493 GCPhys, pRemoved, pPhysHandler));
494 return VERR_PGM_HANDLER_IPE_1;
495}
496
497
498/**
499 * Destroys (frees) a physical handler.
500 *
501 * The caller must deregister it before destroying it!
502 *
503 * @returns VBox status code.
504 * @param pVM The cross context VM structure.
505 * @param pHandler The handler to free. NULL if ignored.
506 */
507int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
508{
509 if (pHandler)
510 {
511 AssertPtr(pHandler);
512 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
513
514 int rc = PGM_LOCK(pVM);
515 if (RT_SUCCESS(rc))
516 {
517 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
518 PGM_UNLOCK(pVM);
519 }
520 return rc;
521 }
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Deregister a physical page access handler.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param GCPhys Start physical address.
532 */
533VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
534{
535 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
536
537 /*
538 * Find the handler.
539 */
540 int rc = PGM_LOCK(pVM);
541 AssertRCReturn(rc, rc);
542
543 PPGMPHYSHANDLER pRemoved;
544 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
545 if (RT_SUCCESS(rc))
546 {
547 Assert(pRemoved->Key == GCPhys);
548 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
549 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
550
551 /*
552 * Clear the page bits, notify the REM about this change and clear
553 * the cache.
554 */
555 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
556 if (VM_IS_NEM_ENABLED(pVM))
557 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
558 pVM->pgm.s.idxLastPhysHandler = 0;
559
560 pRemoved->Key = NIL_RTGCPHYS;
561 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
562
563 PGM_UNLOCK(pVM);
564 return rc;
565 }
566
567 PGM_UNLOCK(pVM);
568
569 if (rc == VERR_NOT_FOUND)
570 {
571 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
572 rc = VERR_PGM_HANDLER_NOT_FOUND;
573 }
574 return rc;
575}
576
577
578/**
579 * Shared code with modify.
580 */
581static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
582{
583#ifdef VBOX_WITH_NATIVE_NEM
584 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
585 RTGCPHYS GCPhysStart = pCur->Key;
586 RTGCPHYS GCPhysLast = pCur->KeyLast;
587
588 /*
589 * Page align the range.
590 *
591 * Since we've reset (recalculated) the physical handler state of all pages
592 * we can make use of the page states to figure out whether a page should be
593 * included in the REM notification or not.
594 */
595 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
596 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
597 {
598 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
599
600 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
601 {
602 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
603 if ( pPage
604 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
605 {
606 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
607 if ( GCPhys > GCPhysLast
608 || GCPhys < GCPhysStart)
609 return;
610 GCPhysStart = GCPhys;
611 }
612 else
613 GCPhysStart &= X86_PTE_PAE_PG_MASK;
614 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
615 }
616
617 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
618 {
619 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
620 if ( pPage
621 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
622 {
623 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
624 if ( GCPhys < GCPhysStart
625 || GCPhys > GCPhysLast)
626 return;
627 GCPhysLast = GCPhys;
628 }
629 else
630 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
631 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
632 }
633 }
634
635 /*
636 * Tell NEM.
637 */
638 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
639 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
640 uint8_t u2State = UINT8_MAX;
641 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
642 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
643 if (u2State != UINT8_MAX && pRam)
644 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
645 cb >> GUEST_PAGE_SHIFT, u2State);
646#else
647 RT_NOREF(pVM, pCur);
648#endif
649}
650
651
652/**
653 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
654 * edge pages.
655 */
656DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
657{
658 /*
659 * Look for other handlers.
660 */
661 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
662 for (;;)
663 {
664 PPGMPHYSHANDLER pCur;
665 int rc;
666 if (fAbove)
667 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
668 GCPhys, &pCur);
669 else
670 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
671 GCPhys, &pCur);
672 if (rc == VERR_NOT_FOUND)
673 break;
674 AssertRCBreak(rc);
675 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
676 break;
677 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
678 uState = RT_MAX(uState, pCurType->uState);
679
680 /* next? */
681 RTGCPHYS GCPhysNext = fAbove
682 ? pCur->KeyLast + 1
683 : pCur->Key - 1;
684 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
685 break;
686 GCPhys = GCPhysNext;
687 }
688
689 /*
690 * Update if we found something that is a higher priority state than the current.
691 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
692 */
693 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
694 {
695 PPGMPAGE pPage;
696 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
697 if ( RT_SUCCESS(rc)
698 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
699 {
700 /* This should normally not be necessary. */
701 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
702 bool fFlushTLBs;
703 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
704 if (RT_SUCCESS(rc) && fFlushTLBs)
705 PGM_INVL_ALL_VCPU_TLBS(pVM);
706 else
707 AssertRC(rc);
708
709#ifdef VBOX_WITH_NATIVE_NEM
710 /* Tell NEM about the protection update. */
711 if (VM_IS_NEM_ENABLED(pVM))
712 {
713 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
714 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
715 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
716 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
717 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
718 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
719 }
720#endif
721 }
722 else
723 AssertRC(rc);
724 }
725}
726
727
728/**
729 * Resets an aliased page.
730 *
731 * @param pVM The cross context VM structure.
732 * @param pPage The page.
733 * @param GCPhysPage The page address in case it comes in handy.
734 * @param pRam The RAM range the page is associated with (for NEM
735 * notifications).
736 * @param fDoAccounting Whether to perform accounting. (Only set during
737 * reset where pgmR3PhysRamReset doesn't have the
738 * handler structure handy.)
739 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
740 * can be cleared only if the caller does the flushing
741 * after calling this function.
742 */
743void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
744 bool fDoAccounting, bool fFlushIemTlbs)
745{
746 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
747 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
748 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
749#ifdef VBOX_WITH_NATIVE_NEM
750 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
751#endif
752
753 /*
754 * Flush any shadow page table references *first*.
755 */
756 bool fFlushTLBs = false;
757 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
758 AssertLogRelRCReturnVoid(rc);
759 HMFlushTlbOnAllVCpus(pVM);
760
761 /*
762 * Make it an MMIO/Zero page.
763 */
764 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
765 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
766 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
767 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
768 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
769
770 /*
771 * Flush its TLB entry.
772 */
773 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
774 if (fFlushIemTlbs)
775 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
776
777 /*
778 * Do accounting for pgmR3PhysRamReset.
779 */
780 if (fDoAccounting)
781 {
782 PPGMPHYSHANDLER pHandler;
783 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
784 if (RT_SUCCESS(rc))
785 {
786 Assert(pHandler->cAliasedPages > 0);
787 pHandler->cAliasedPages--;
788 }
789 else
790 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
791 }
792
793#ifdef VBOX_WITH_NATIVE_NEM
794 /*
795 * Tell NEM about the protection change.
796 */
797 if (VM_IS_NEM_ENABLED(pVM))
798 {
799 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
800 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
801 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
802 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
803 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
804 }
805#else
806 RT_NOREF(pRam);
807#endif
808}
809
810
811/**
812 * Resets ram range flags.
813 *
814 * @returns VBox status code.
815 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
816 * @param pVM The cross context VM structure.
817 * @param pCur The physical handler.
818 *
819 * @remark We don't start messing with the shadow page tables, as we've
820 * already got code in Trap0e which deals with out of sync handler
821 * flags (originally conceived for global pages).
822 */
823static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
824{
825 /*
826 * Iterate the guest ram pages updating the state.
827 */
828 RTUINT cPages = pCur->cPages;
829 RTGCPHYS GCPhys = pCur->Key;
830 PPGMRAMRANGE pRamHint = NULL;
831 for (;;)
832 {
833 PPGMPAGE pPage;
834 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
835 if (RT_SUCCESS(rc))
836 {
837 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
838 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
839 bool fNemNotifiedAlready = false;
840 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
841 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
842 {
843 Assert(pCur->cAliasedPages > 0);
844 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
845 pCur->cAliasedPages--;
846 fNemNotifiedAlready = true;
847 }
848#ifdef VBOX_STRICT
849 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
850 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
851 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
852#endif
853 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
854
855#ifdef VBOX_WITH_NATIVE_NEM
856 /* Tell NEM about the protection change. */
857 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
858 {
859 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
860 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
861 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
862 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
863 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
864 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
865 }
866#endif
867 RT_NOREF(fNemNotifiedAlready);
868 }
869 else
870 AssertRC(rc);
871
872 /* next */
873 if (--cPages == 0)
874 break;
875 GCPhys += GUEST_PAGE_SIZE;
876 }
877
878 pCur->cAliasedPages = 0;
879 pCur->cTmpOffPages = 0;
880
881 /*
882 * Check for partial start and end pages.
883 */
884 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
885 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
886 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
887 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
888}
889
890
891#if 0 /* unused */
892/**
893 * Modify a physical page access handler.
894 *
895 * Modification can only be done to the range it self, not the type or anything else.
896 *
897 * @returns VBox status code.
898 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
899 * and a new registration must be performed!
900 * @param pVM The cross context VM structure.
901 * @param GCPhysCurrent Current location.
902 * @param GCPhys New location.
903 * @param GCPhysLast New last location.
904 */
905VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
906{
907 /*
908 * Remove it.
909 */
910 int rc;
911 PGM_LOCK_VOID(pVM);
912 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
913 if (pCur)
914 {
915 /*
916 * Clear the ram flags. (We're gonna move or free it!)
917 */
918 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
919 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
920 @todo pCurType validation
921 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
922 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
923
924 /*
925 * Validate the new range, modify and reinsert.
926 */
927 if (GCPhysLast >= GCPhys)
928 {
929 /*
930 * We require the range to be within registered ram.
931 * There is no apparent need to support ranges which cover more than one ram range.
932 */
933 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
934 if ( pRam
935 && GCPhys <= pRam->GCPhysLast
936 && GCPhysLast >= pRam->GCPhys)
937 {
938 pCur->Core.Key = GCPhys;
939 pCur->Core.KeyLast = GCPhysLast;
940 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
941
942 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
943 {
944 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
945 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
946
947 /*
948 * Set ram flags, flush shadow PT entries and finally tell REM about this.
949 */
950 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
951
952 /** @todo NEM: not sure we need this notification... */
953 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
954
955 PGM_UNLOCK(pVM);
956
957 PGM_INVL_ALL_VCPU_TLBS(pVM);
958 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
959 GCPhysCurrent, GCPhys, GCPhysLast));
960 return VINF_SUCCESS;
961 }
962
963 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
964 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
965 }
966 else
967 {
968 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
969 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
970 }
971 }
972 else
973 {
974 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
975 rc = VERR_INVALID_PARAMETER;
976 }
977
978 /*
979 * Invalid new location, flush the cache and free it.
980 * We've only gotta notify REM and free the memory.
981 */
982 if (VM_IS_NEM_ENABLED(pVM))
983 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
984 pVM->pgm.s.pLastPhysHandlerR0 = 0;
985 pVM->pgm.s.pLastPhysHandlerR3 = 0;
986 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
987 MMHyperFree(pVM, pCur);
988 }
989 else
990 {
991 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
992 rc = VERR_PGM_HANDLER_NOT_FOUND;
993 }
994
995 PGM_UNLOCK(pVM);
996 return rc;
997}
998#endif /* unused */
999
1000
1001/**
1002 * Changes the user callback arguments associated with a physical access handler.
1003 *
1004 * @returns VBox status code.
1005 * @param pVM The cross context VM structure.
1006 * @param GCPhys Start physical address of the handler.
1007 * @param uUser User argument to the handlers.
1008 */
1009VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1010{
1011 /*
1012 * Find the handler and make the change.
1013 */
1014 int rc = PGM_LOCK(pVM);
1015 AssertRCReturn(rc, rc);
1016
1017 PPGMPHYSHANDLER pCur;
1018 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1019 if (RT_SUCCESS(rc))
1020 {
1021 Assert(pCur->Key == GCPhys);
1022 pCur->uUser = uUser;
1023 }
1024 else if (rc == VERR_NOT_FOUND)
1025 {
1026 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1027 rc = VERR_PGM_HANDLER_NOT_FOUND;
1028 }
1029
1030 PGM_UNLOCK(pVM);
1031 return rc;
1032}
1033
1034#if 0 /* unused */
1035
1036/**
1037 * Splits a physical access handler in two.
1038 *
1039 * @returns VBox status code.
1040 * @param pVM The cross context VM structure.
1041 * @param GCPhys Start physical address of the handler.
1042 * @param GCPhysSplit The split address.
1043 */
1044VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1045{
1046 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1047
1048 /*
1049 * Do the allocation without owning the lock.
1050 */
1051 PPGMPHYSHANDLER pNew;
1052 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1053 if (RT_FAILURE(rc))
1054 return rc;
1055
1056 /*
1057 * Get the handler.
1058 */
1059 PGM_LOCK_VOID(pVM);
1060 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1061 if (RT_LIKELY(pCur))
1062 {
1063 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1064 {
1065 /*
1066 * Create new handler node for the 2nd half.
1067 */
1068 *pNew = *pCur;
1069 pNew->Core.Key = GCPhysSplit;
1070 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1071
1072 pCur->Core.KeyLast = GCPhysSplit - 1;
1073 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1074
1075 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1076 {
1077 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1078 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1079 PGM_UNLOCK(pVM);
1080 return VINF_SUCCESS;
1081 }
1082 AssertMsgFailed(("whu?\n"));
1083 rc = VERR_PGM_PHYS_HANDLER_IPE;
1084 }
1085 else
1086 {
1087 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1088 rc = VERR_INVALID_PARAMETER;
1089 }
1090 }
1091 else
1092 {
1093 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1094 rc = VERR_PGM_HANDLER_NOT_FOUND;
1095 }
1096 PGM_UNLOCK(pVM);
1097 MMHyperFree(pVM, pNew);
1098 return rc;
1099}
1100
1101
1102/**
1103 * Joins up two adjacent physical access handlers which has the same callbacks.
1104 *
1105 * @returns VBox status code.
1106 * @param pVM The cross context VM structure.
1107 * @param GCPhys1 Start physical address of the first handler.
1108 * @param GCPhys2 Start physical address of the second handler.
1109 */
1110VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1111{
1112 /*
1113 * Get the handlers.
1114 */
1115 int rc;
1116 PGM_LOCK_VOID(pVM);
1117 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1118 if (RT_LIKELY(pCur1))
1119 {
1120 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1121 if (RT_LIKELY(pCur2))
1122 {
1123 /*
1124 * Make sure that they are adjacent, and that they've got the same callbacks.
1125 */
1126 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1127 {
1128 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1129 {
1130 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1131 if (RT_LIKELY(pCur3 == pCur2))
1132 {
1133 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1134 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1135 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1136 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1137 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1138 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1139 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1140 MMHyperFree(pVM, pCur2);
1141 PGM_UNLOCK(pVM);
1142 return VINF_SUCCESS;
1143 }
1144
1145 Assert(pCur3 == pCur2);
1146 rc = VERR_PGM_PHYS_HANDLER_IPE;
1147 }
1148 else
1149 {
1150 AssertMsgFailed(("mismatching handlers\n"));
1151 rc = VERR_ACCESS_DENIED;
1152 }
1153 }
1154 else
1155 {
1156 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1157 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1158 rc = VERR_INVALID_PARAMETER;
1159 }
1160 }
1161 else
1162 {
1163 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1164 rc = VERR_PGM_HANDLER_NOT_FOUND;
1165 }
1166 }
1167 else
1168 {
1169 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1170 rc = VERR_PGM_HANDLER_NOT_FOUND;
1171 }
1172 PGM_UNLOCK(pVM);
1173 return rc;
1174
1175}
1176
1177#endif /* unused */
1178
1179/**
1180 * Resets any modifications to individual pages in a physical page access
1181 * handler region.
1182 *
1183 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1184 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1185 *
1186 * @returns VBox status code.
1187 * @param pVM The cross context VM structure.
1188 * @param GCPhys The start address of the handler regions, i.e. what you
1189 * passed to PGMR3HandlerPhysicalRegister(),
1190 * PGMHandlerPhysicalRegisterEx() or
1191 * PGMHandlerPhysicalModify().
1192 */
1193VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1194{
1195 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1196 int rc = PGM_LOCK(pVM);
1197 AssertRCReturn(rc, rc);
1198
1199 /*
1200 * Find the handler.
1201 */
1202 PPGMPHYSHANDLER pCur;
1203 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1204 if (RT_SUCCESS(rc))
1205 {
1206 Assert(pCur->Key == GCPhys);
1207
1208 /*
1209 * Validate kind.
1210 */
1211 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1212 switch (pCurType->enmKind)
1213 {
1214 case PGMPHYSHANDLERKIND_WRITE:
1215 case PGMPHYSHANDLERKIND_ALL:
1216 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1217 {
1218 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1219 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1220 Assert(pRam);
1221 Assert(pRam->GCPhys <= pCur->Key);
1222 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1223
1224 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1225 {
1226 /*
1227 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1228 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1229 * to do that now...
1230 */
1231 if (pCur->cAliasedPages)
1232 {
1233 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1234 RTGCPHYS GCPhysPage = pCur->Key;
1235 uint32_t cLeft = pCur->cPages;
1236 bool fFlushIemTlb = false;
1237 while (cLeft-- > 0)
1238 {
1239 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1240 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1241 {
1242 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1243 Assert(pCur->cAliasedPages > 0);
1244 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1245 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1246 --pCur->cAliasedPages;
1247#ifndef VBOX_STRICT
1248 if (pCur->cAliasedPages == 0)
1249 break;
1250#endif
1251 }
1252 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1253 GCPhysPage += GUEST_PAGE_SIZE;
1254 pPage++;
1255 }
1256 Assert(pCur->cAliasedPages == 0);
1257
1258 /*
1259 * Flush IEM TLBs in case they contain any references to aliased pages.
1260 * This is only necessary for MMIO2 aliases.
1261 */
1262 if (fFlushIemTlb)
1263 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1264 }
1265 }
1266 else if (pCur->cTmpOffPages > 0)
1267 {
1268 /*
1269 * Set the flags and flush shadow PT entries.
1270 */
1271 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1272 }
1273
1274 pCur->cAliasedPages = 0;
1275 pCur->cTmpOffPages = 0;
1276
1277 rc = VINF_SUCCESS;
1278 break;
1279 }
1280
1281 /*
1282 * Invalid.
1283 */
1284 default:
1285 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1286 rc = VERR_PGM_PHYS_HANDLER_IPE;
1287 break;
1288 }
1289 }
1290 else if (rc == VERR_NOT_FOUND)
1291 {
1292 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1293 rc = VERR_PGM_HANDLER_NOT_FOUND;
1294 }
1295
1296 PGM_UNLOCK(pVM);
1297 return rc;
1298}
1299
1300
1301/**
1302 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1303 * tracking.
1304 *
1305 * @returns VBox status code.
1306 * @param pVM The cross context VM structure.
1307 * @param GCPhys The start address of the handler region.
1308 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1309 * dirty bits will be set. Caller also made sure it's big
1310 * enough.
1311 * @param offBitmap Dirty bitmap offset.
1312 * @remarks Caller must own the PGM critical section.
1313 */
1314DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1315{
1316 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1317 PGM_LOCK_ASSERT_OWNER(pVM);
1318
1319 /*
1320 * Find the handler.
1321 */
1322 PPGMPHYSHANDLER pCur;
1323 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1324 if (RT_SUCCESS(rc))
1325 {
1326 Assert(pCur->Key == GCPhys);
1327
1328 /*
1329 * Validate kind.
1330 */
1331 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1332 if ( pCurType
1333 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1334 {
1335 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1336
1337 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1338 Assert(pRam);
1339 Assert(pRam->GCPhys <= pCur->Key);
1340 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1341
1342 /*
1343 * Set the flags and flush shadow PT entries.
1344 */
1345 if (pCur->cTmpOffPages > 0)
1346 {
1347 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1348 pCur->cTmpOffPages = 0;
1349 }
1350 else
1351 rc = VINF_SUCCESS;
1352 }
1353 else
1354 {
1355 AssertFailed();
1356 rc = VERR_WRONG_TYPE;
1357 }
1358 }
1359 else if (rc == VERR_NOT_FOUND)
1360 {
1361 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1362 rc = VERR_PGM_HANDLER_NOT_FOUND;
1363 }
1364
1365 return rc;
1366}
1367
1368
1369/**
1370 * Temporarily turns off the access monitoring of a page within a monitored
1371 * physical write/all page access handler region.
1372 *
1373 * Use this when no further \#PFs are required for that page. Be aware that
1374 * a page directory sync might reset the flags, and turn on access monitoring
1375 * for the page.
1376 *
1377 * The caller must do required page table modifications.
1378 *
1379 * @returns VBox status code.
1380 * @param pVM The cross context VM structure.
1381 * @param GCPhys The start address of the access handler. This
1382 * must be a fully page aligned range or we risk
1383 * messing up other handlers installed for the
1384 * start and end pages.
1385 * @param GCPhysPage The physical address of the page to turn off
1386 * access monitoring for.
1387 */
1388VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1389{
1390 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1391 int rc = PGM_LOCK(pVM);
1392 AssertRCReturn(rc, rc);
1393
1394 /*
1395 * Validate the range.
1396 */
1397 PPGMPHYSHANDLER pCur;
1398 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1399 if (RT_SUCCESS(rc))
1400 {
1401 Assert(pCur->Key == GCPhys);
1402 if (RT_LIKELY( GCPhysPage >= pCur->Key
1403 && GCPhysPage <= pCur->KeyLast))
1404 {
1405 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1406 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1407
1408 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1409 AssertReturnStmt( pCurType
1410 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1411 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1412 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1413
1414 /*
1415 * Change the page status.
1416 */
1417 PPGMPAGE pPage;
1418 PPGMRAMRANGE pRam;
1419 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1420 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1421 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1422 {
1423 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1424 pCur->cTmpOffPages++;
1425
1426#ifdef VBOX_WITH_NATIVE_NEM
1427 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1428 if (VM_IS_NEM_ENABLED(pVM))
1429 {
1430 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1431 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1432 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1433 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1434 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1435 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1436 }
1437#endif
1438 }
1439 PGM_UNLOCK(pVM);
1440 return VINF_SUCCESS;
1441 }
1442 PGM_UNLOCK(pVM);
1443 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1444 return VERR_INVALID_PARAMETER;
1445 }
1446 PGM_UNLOCK(pVM);
1447
1448 if (rc == VERR_NOT_FOUND)
1449 {
1450 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1451 return VERR_PGM_HANDLER_NOT_FOUND;
1452 }
1453 return rc;
1454}
1455
1456
1457/**
1458 * Resolves an MMIO2 page.
1459 *
1460 * Caller as taken the PGM lock.
1461 *
1462 * @returns Pointer to the page if valid, NULL otherwise
1463 * @param pVM The cross context VM structure.
1464 * @param pDevIns The device owning it.
1465 * @param hMmio2 The MMIO2 region.
1466 * @param offMmio2Page The offset into the region.
1467 */
1468static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1469{
1470 /* Only works if the handle is in the handle table! */
1471 AssertReturn(hMmio2 != 0, NULL);
1472 hMmio2--;
1473
1474 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1475 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1476 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1477 AssertReturn(pCur, NULL);
1478 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1479
1480 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1481 for (;;)
1482 {
1483#ifdef IN_RING3
1484 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1485#else
1486 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1487#endif
1488
1489 /* Does it match the offset? */
1490 if (offMmio2Page < pCur->cbReal)
1491 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1492
1493 /* Advance if we can. */
1494 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1495 offMmio2Page -= pCur->cbReal;
1496 hMmio2++;
1497 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1498 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1499 AssertReturn(pCur, NULL);
1500 }
1501}
1502
1503
1504/**
1505 * Replaces an MMIO page with an MMIO2 page.
1506 *
1507 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1508 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1509 * backing, the caller must provide a replacement page. For various reasons the
1510 * replacement page must be an MMIO2 page.
1511 *
1512 * The caller must do required page table modifications. You can get away
1513 * without making any modifications since it's an MMIO page, the cost is an extra
1514 * \#PF which will the resync the page.
1515 *
1516 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1517 *
1518 * The caller may still get handler callback even after this call and must be
1519 * able to deal correctly with such calls. The reason for these callbacks are
1520 * either that we're executing in the recompiler (which doesn't know about this
1521 * arrangement) or that we've been restored from saved state (where we won't
1522 * save the change).
1523 *
1524 * @returns VBox status code.
1525 * @param pVM The cross context VM structure.
1526 * @param GCPhys The start address of the access handler. This
1527 * must be a fully page aligned range or we risk
1528 * messing up other handlers installed for the
1529 * start and end pages.
1530 * @param GCPhysPage The physical address of the page to turn off
1531 * access monitoring for and replace with the MMIO2
1532 * page.
1533 * @param pDevIns The device instance owning @a hMmio2.
1534 * @param hMmio2 Handle to the MMIO2 region containing the page
1535 * to remap in the the MMIO page at @a GCPhys.
1536 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1537 * should serve as backing memory.
1538 *
1539 * @remark May cause a page pool flush if used on a page that is already
1540 * aliased.
1541 *
1542 * @note This trick does only work reliably if the two pages are never ever
1543 * mapped in the same page table. If they are the page pool code will
1544 * be confused should either of them be flushed. See the special case
1545 * of zero page aliasing mentioned in #3170.
1546 *
1547 */
1548VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1549 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1550{
1551#ifdef VBOX_WITH_PGM_NEM_MODE
1552 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1553#endif
1554 int rc = PGM_LOCK(pVM);
1555 AssertRCReturn(rc, rc);
1556
1557 /*
1558 * Resolve the MMIO2 reference.
1559 */
1560 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1561 if (RT_LIKELY(pPageRemap))
1562 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1563 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1564 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1565 else
1566 {
1567 PGM_UNLOCK(pVM);
1568 return VERR_OUT_OF_RANGE;
1569 }
1570
1571 /*
1572 * Lookup and validate the range.
1573 */
1574 PPGMPHYSHANDLER pCur;
1575 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1576 if (RT_SUCCESS(rc))
1577 {
1578 Assert(pCur->Key == GCPhys);
1579 if (RT_LIKELY( GCPhysPage >= pCur->Key
1580 && GCPhysPage <= pCur->KeyLast))
1581 {
1582 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1583 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1584 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1585 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1586 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1587
1588 /*
1589 * Validate the page.
1590 */
1591 PPGMPAGE pPage;
1592 PPGMRAMRANGE pRam;
1593 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1594 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1595 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1596 {
1597 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1598 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1599 VERR_PGM_PHYS_NOT_MMIO2);
1600 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1601 {
1602 PGM_UNLOCK(pVM);
1603 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1604 }
1605
1606 /*
1607 * The page is already mapped as some other page, reset it
1608 * to an MMIO/ZERO page before doing the new mapping.
1609 */
1610 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1611 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1612 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1613 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1614 pCur->cAliasedPages--;
1615
1616 /* Since this may be present in the TLB and now be wrong, invalid
1617 the guest physical address part of the IEM TLBs. Note, we do
1618 this here as we will not invalid */
1619 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1620 }
1621 Assert(PGM_PAGE_IS_ZERO(pPage));
1622
1623 /*
1624 * Do the actual remapping here.
1625 * This page now serves as an alias for the backing memory specified.
1626 */
1627 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1628 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1629 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1630 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1631 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1632 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1633 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1634 pCur->cAliasedPages++;
1635 Assert(pCur->cAliasedPages <= pCur->cPages);
1636
1637 /*
1638 * Flush its TLB entry.
1639 *
1640 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1641 * all the other IEM TLB entires. When this one is kicked out and
1642 * reloaded, it will be using the MMIO2 alias, but till then we'll
1643 * continue doing MMIO.
1644 */
1645 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1646 /** @todo Do some preformance checks of calling
1647 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1648 * actually makes sense or not. Screen updates are typically massive
1649 * and important when this kind of aliasing is used, so it may pay of... */
1650
1651#ifdef VBOX_WITH_NATIVE_NEM
1652 /* Tell NEM about the backing and protection change. */
1653 if (VM_IS_NEM_ENABLED(pVM))
1654 {
1655 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1656 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1657 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1658 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1659 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1660 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1661 }
1662#endif
1663 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1664 PGM_UNLOCK(pVM);
1665 return VINF_SUCCESS;
1666 }
1667
1668 PGM_UNLOCK(pVM);
1669 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1670 return VERR_INVALID_PARAMETER;
1671 }
1672
1673 PGM_UNLOCK(pVM);
1674 if (rc == VERR_NOT_FOUND)
1675 {
1676 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1677 return VERR_PGM_HANDLER_NOT_FOUND;
1678 }
1679 return rc;
1680}
1681
1682
1683/**
1684 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1685 *
1686 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1687 * need to be a known MMIO2 page and that only shadow paging may access the
1688 * page. The latter distinction is important because the only use for this
1689 * feature is for mapping the special APIC access page that VT-x uses to detect
1690 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1691 * not written to. At least at the moment.
1692 *
1693 * The caller must do required page table modifications. You can get away
1694 * without making any modifications since it's an MMIO page, the cost is an extra
1695 * \#PF which will the resync the page.
1696 *
1697 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1698 *
1699 *
1700 * @returns VBox status code.
1701 * @param pVM The cross context VM structure.
1702 * @param GCPhys The start address of the access handler. This
1703 * must be a fully page aligned range or we risk
1704 * messing up other handlers installed for the
1705 * start and end pages.
1706 * @param GCPhysPage The physical address of the page to turn off
1707 * access monitoring for.
1708 * @param HCPhysPageRemap The physical address of the HC page that
1709 * serves as backing memory.
1710 *
1711 * @remark May cause a page pool flush if used on a page that is already
1712 * aliased.
1713 */
1714VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1715{
1716/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1717#ifdef VBOX_WITH_PGM_NEM_MODE
1718 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1719#endif
1720 int rc = PGM_LOCK(pVM);
1721 AssertRCReturn(rc, rc);
1722
1723 /*
1724 * Lookup and validate the range.
1725 */
1726 PPGMPHYSHANDLER pCur;
1727 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1728 if (RT_SUCCESS(rc))
1729 {
1730 Assert(pCur->Key == GCPhys);
1731 if (RT_LIKELY( GCPhysPage >= pCur->Key
1732 && GCPhysPage <= pCur->KeyLast))
1733 {
1734 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1735 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1736 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1737 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1738 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1739
1740 /*
1741 * Get and validate the pages.
1742 */
1743 PPGMPAGE pPage;
1744 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1745 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1746 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1747 {
1748 PGM_UNLOCK(pVM);
1749 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1750 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1751 VERR_PGM_PHYS_NOT_MMIO2);
1752 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1753 }
1754 Assert(PGM_PAGE_IS_ZERO(pPage));
1755
1756 /*
1757 * Do the actual remapping here.
1758 * This page now serves as an alias for the backing memory
1759 * specified as far as shadow paging is concerned.
1760 */
1761 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1762 GCPhysPage, pPage, HCPhysPageRemap));
1763 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1764 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1765 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1766 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1767 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1768 pCur->cAliasedPages++;
1769 Assert(pCur->cAliasedPages <= pCur->cPages);
1770
1771 /*
1772 * Flush its TLB entry.
1773 *
1774 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1775 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1776 */
1777 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1778
1779#ifdef VBOX_WITH_NATIVE_NEM
1780 /* Tell NEM about the backing and protection change. */
1781 if (VM_IS_NEM_ENABLED(pVM))
1782 {
1783 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1784 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1785 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1786 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1787 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1788 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1789 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1790 }
1791#endif
1792 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1793 PGM_UNLOCK(pVM);
1794 return VINF_SUCCESS;
1795 }
1796 PGM_UNLOCK(pVM);
1797 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1798 return VERR_INVALID_PARAMETER;
1799 }
1800 PGM_UNLOCK(pVM);
1801
1802 if (rc == VERR_NOT_FOUND)
1803 {
1804 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1805 return VERR_PGM_HANDLER_NOT_FOUND;
1806 }
1807 return rc;
1808}
1809
1810
1811/**
1812 * Checks if a physical range is handled
1813 *
1814 * @returns boolean
1815 * @param pVM The cross context VM structure.
1816 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1817 * @remarks Caller must take the PGM lock...
1818 * @thread EMT.
1819 */
1820VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1821{
1822 /*
1823 * Find the handler.
1824 */
1825 PGM_LOCK_VOID(pVM);
1826 PPGMPHYSHANDLER pCur;
1827 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1828 if (RT_SUCCESS(rc))
1829 {
1830#ifdef VBOX_STRICT
1831 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1832 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1833 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1834 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1835 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1836#endif
1837 PGM_UNLOCK(pVM);
1838 return true;
1839 }
1840 PGM_UNLOCK(pVM);
1841 return false;
1842}
1843
1844
1845/**
1846 * Checks if it's an disabled all access handler or write access handler at the
1847 * given address.
1848 *
1849 * @returns true if it's an all access handler, false if it's a write access
1850 * handler.
1851 * @param pVM The cross context VM structure.
1852 * @param GCPhys The address of the page with a disabled handler.
1853 *
1854 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1855 */
1856bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1857{
1858 PGM_LOCK_VOID(pVM);
1859 PPGMPHYSHANDLER pCur;
1860 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1861 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
1862
1863 /* Only whole pages can be disabled. */
1864 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1865 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1866
1867 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1868 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1869 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1870 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1871 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1872 PGM_UNLOCK(pVM);
1873 return fRet;
1874}
1875
1876#ifdef VBOX_STRICT
1877
1878/**
1879 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1880 * and its AVL enumerators.
1881 */
1882typedef struct PGMAHAFIS
1883{
1884 /** The current physical address. */
1885 RTGCPHYS GCPhys;
1886 /** Number of errors. */
1887 unsigned cErrors;
1888 /** Pointer to the VM. */
1889 PVM pVM;
1890} PGMAHAFIS, *PPGMAHAFIS;
1891
1892
1893/**
1894 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1895 * that the physical addresses associated with virtual handlers are correct.
1896 *
1897 * @returns Number of mismatches.
1898 * @param pVM The cross context VM structure.
1899 */
1900VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1901{
1902 PPGM pPGM = &pVM->pgm.s;
1903 PGMAHAFIS State;
1904 State.GCPhys = 0;
1905 State.cErrors = 0;
1906 State.pVM = pVM;
1907
1908 PGM_LOCK_ASSERT_OWNER(pVM);
1909
1910 /*
1911 * Check the RAM flags against the handlers.
1912 */
1913 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
1914 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1915 {
1916 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1917 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1918 {
1919 PGMPAGE const *pPage = &pRam->aPages[iPage];
1920 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1921 {
1922 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1923
1924 /*
1925 * Physical first - calculate the state based on the handlers
1926 * active on the page, then compare.
1927 */
1928 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1929 {
1930 /* the first */
1931 PPGMPHYSHANDLER pPhys;
1932 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
1933 if (rc == VERR_NOT_FOUND)
1934 {
1935 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1936 State.GCPhys, &pPhys);
1937 if (RT_SUCCESS(rc))
1938 {
1939 Assert(pPhys->Key >= State.GCPhys);
1940 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1941 pPhys = NULL;
1942 }
1943 else
1944 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1945 }
1946 else
1947 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1948
1949 if (pPhys)
1950 {
1951 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1952 unsigned uState = pPhysType->uState;
1953 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
1954
1955 /* more? */
1956 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1957 {
1958 PPGMPHYSHANDLER pPhys2;
1959 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1960 pPhys->KeyLast + 1, &pPhys2);
1961 if (rc == VERR_NOT_FOUND)
1962 break;
1963 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
1964 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1965 break;
1966 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1967 uState = RT_MAX(uState, pPhysType2->uState);
1968 pPhys = pPhys2;
1969 }
1970
1971 /* compare.*/
1972 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1973 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1974 {
1975 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1976 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1977 State.cErrors++;
1978 }
1979 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
1980 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
1981 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
1982 State.cErrors++);
1983 }
1984 else
1985 {
1986 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1987 State.cErrors++;
1988 }
1989 }
1990 }
1991 } /* foreach page in ram range. */
1992 } /* foreach ram range. */
1993
1994 /*
1995 * Do the reverse check for physical handlers.
1996 */
1997 /** @todo */
1998
1999 return State.cErrors;
2000}
2001
2002#endif /* VBOX_STRICT */
2003
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette