VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 99051

Last change on this file since 99051 was 99051, checked in by vboxsync, 21 months ago

VMM: More ARMv8 x86/amd64 separation work, VBoxVMMArm compiles and links now, bugref:10385

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 77.8 KB
Line 
1/* $Id: PGMAllHandler.cpp 99051 2023-03-19 16:40:06Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/iom.h>
38#include <VBox/vmm/mm.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/dbgf.h>
43#ifdef IN_RING0
44# include <VBox/vmm/pdmdev.h>
45#endif
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
53# include <iprt/asm-amd64-x86.h>
54#endif
55#include <iprt/string.h>
56#include <VBox/param.h>
57#include <VBox/err.h>
58#include <VBox/vmm/selm.h>
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64/** Dummy physical access handler type record. */
65CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
66{
67 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
68 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
69 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
70 /* .fKeepPgmLock = */ true,
71 /* .fRing0DevInsIdx = */ false,
72#ifdef IN_RING0
73 /* .fNotInHm = */ false,
74 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
75 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
76#elif defined(IN_RING3)
77 /* .fRing0Enabled = */ false,
78 /* .fNotInHm = */ false,
79 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
80#else
81# error "unsupported context"
82#endif
83 /* .pszDesc = */ "dummy"
84};
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
91 void *pvBitmap, uint32_t offBitmap);
92static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
93static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
94
95
96#ifndef IN_RING3
97
98/**
99 * @callback_method_impl{FNPGMPHYSHANDLER,
100 * Dummy for forcing ring-3 handling of the access.}
101 */
102DECLCALLBACK(VBOXSTRICTRC)
103pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
104 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
105{
106 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
107 return VINF_EM_RAW_EMULATE_INSTR;
108}
109
110
111/**
112 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
113 * Dummy for forcing ring-3 handling of the access.}
114 */
115DECLCALLBACK(VBOXSTRICTRC)
116pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
117 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
118{
119 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
120 return VINF_EM_RAW_EMULATE_INSTR;
121}
122
123#endif /* !IN_RING3 */
124
125
126/**
127 * Creates a physical access handler, allocation part.
128 *
129 * @returns VBox status code.
130 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
131 *
132 * @param pVM The cross context VM structure.
133 * @param hType The handler type registration handle.
134 * @param uUser User argument to the handlers (not pointer).
135 * @param pszDesc Description of this handler. If NULL, the type
136 * description will be used instead.
137 * @param ppPhysHandler Where to return the access handler structure on
138 * success.
139 */
140int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
141 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
142{
143 /*
144 * Validate input.
145 */
146 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
147 AssertReturn(pType, VERR_INVALID_HANDLE);
148 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
149 AssertPtr(ppPhysHandler);
150
151 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
152 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
153
154 /*
155 * Allocate and initialize the new entry.
156 */
157 int rc = PGM_LOCK(pVM);
158 AssertRCReturn(rc, rc);
159
160 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
161 if (pNew)
162 {
163 pNew->Key = NIL_RTGCPHYS;
164 pNew->KeyLast = NIL_RTGCPHYS;
165 pNew->cPages = 0;
166 pNew->cAliasedPages = 0;
167 pNew->cTmpOffPages = 0;
168 pNew->uUser = uUser;
169 pNew->hType = hType;
170 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
171#ifdef IN_RING3
172 : pType->pszDesc;
173#else
174 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
175#endif
176
177 PGM_UNLOCK(pVM);
178 *ppPhysHandler = pNew;
179 return VINF_SUCCESS;
180 }
181
182 PGM_UNLOCK(pVM);
183 return VERR_OUT_OF_RESOURCES;
184}
185
186
187/**
188 * Duplicates a physical access handler.
189 *
190 * @returns VBox status code.
191 * @retval VINF_SUCCESS when successfully installed.
192 *
193 * @param pVM The cross context VM structure.
194 * @param pPhysHandlerSrc The source handler to duplicate
195 * @param ppPhysHandler Where to return the access handler structure on
196 * success.
197 */
198int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
199{
200 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
201 pPhysHandlerSrc->pszDesc, ppPhysHandler);
202}
203
204
205/**
206 * Register a access handler for a physical range.
207 *
208 * @returns VBox status code.
209 * @retval VINF_SUCCESS when successfully installed.
210 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
211 *
212 * @param pVM The cross context VM structure.
213 * @param pPhysHandler The physical handler.
214 * @param GCPhys Start physical address.
215 * @param GCPhysLast Last physical address. (inclusive)
216 */
217int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
218{
219 /*
220 * Validate input.
221 */
222 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
223 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
224 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
225 AssertReturn(pType, VERR_INVALID_HANDLE);
226 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
227
228 AssertPtr(pPhysHandler);
229
230 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
231 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
232 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
233
234 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
235 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
236
237 switch (pType->enmKind)
238 {
239 case PGMPHYSHANDLERKIND_WRITE:
240 if (!pType->fNotInHm)
241 break;
242 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
243 case PGMPHYSHANDLERKIND_MMIO:
244 case PGMPHYSHANDLERKIND_ALL:
245 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
246 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
247 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
248 break;
249 default:
250 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
251 return VERR_INVALID_PARAMETER;
252 }
253
254 /*
255 * We require the range to be within registered ram.
256 * There is no apparent need to support ranges which cover more than one ram range.
257 */
258 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
259 if ( !pRam
260 || GCPhysLast > pRam->GCPhysLast)
261 {
262#ifdef IN_RING3
263 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
264#endif
265 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
266 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
267 }
268 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
269 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
270
271 /*
272 * Try insert into list.
273 */
274 pPhysHandler->Key = GCPhys;
275 pPhysHandler->KeyLast = GCPhysLast;
276 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
277
278 int rc = PGM_LOCK(pVM);
279 if (RT_SUCCESS(rc))
280 {
281 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
282 if (RT_SUCCESS(rc))
283 {
284 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
285 if (rc == VINF_PGM_SYNC_CR3)
286 rc = VINF_PGM_GCPHYS_ALIASED;
287
288#if defined(IN_RING3) || defined(IN_RING0)
289 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
290#endif
291 PGM_UNLOCK(pVM);
292
293 if (rc != VINF_SUCCESS)
294 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
295 return rc;
296 }
297 PGM_UNLOCK(pVM);
298 }
299
300 pPhysHandler->Key = NIL_RTGCPHYS;
301 pPhysHandler->KeyLast = NIL_RTGCPHYS;
302
303 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
304
305#if defined(IN_RING3) && defined(VBOX_STRICT)
306 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
307#endif
308 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
309 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
310 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
311}
312
313
314/**
315 * Register a access handler for a physical range.
316 *
317 * @returns VBox status code.
318 * @retval VINF_SUCCESS when successfully installed.
319 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
320 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
321 * flagged together with a pool clearing.
322 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
323 * one. A debug assertion is raised.
324 *
325 * @param pVM The cross context VM structure.
326 * @param GCPhys Start physical address.
327 * @param GCPhysLast Last physical address. (inclusive)
328 * @param hType The handler type registration handle.
329 * @param uUser User argument to the handler.
330 * @param pszDesc Description of this handler. If NULL, the type
331 * description will be used instead.
332 */
333VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
334 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
335{
336#ifdef LOG_ENABLED
337 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
338 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
339 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
340#endif
341
342 PPGMPHYSHANDLER pNew;
343 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
344 if (RT_SUCCESS(rc))
345 {
346 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
347 if (RT_SUCCESS(rc))
348 return rc;
349 pgmHandlerPhysicalExDestroy(pVM, pNew);
350 }
351 return rc;
352}
353
354
355/**
356 * Sets ram range flags and attempts updating shadow PTs.
357 *
358 * @returns VBox status code.
359 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
360 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
361 * the guest page aliased or/and mapped by multiple PTs. FFs set.
362 * @param pVM The cross context VM structure.
363 * @param pCur The physical handler.
364 * @param pRam The RAM range.
365 * @param pvBitmap Dirty bitmap. Optional.
366 * @param offBitmap Dirty bitmap offset.
367 */
368static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
369 void *pvBitmap, uint32_t offBitmap)
370{
371 /*
372 * Iterate the guest ram pages updating the flags and flushing PT entries
373 * mapping the page.
374 */
375 bool fFlushTLBs = false;
376 int rc = VINF_SUCCESS;
377 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
378 const unsigned uState = pCurType->uState;
379 uint32_t cPages = pCur->cPages;
380 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
381 for (;;)
382 {
383 PPGMPAGE pPage = &pRam->aPages[i];
384 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
385 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
386
387 /* Only do upgrades. */
388 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
389 {
390 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
391
392 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
393 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
394 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
395 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
396 rc = rc2;
397
398#ifdef VBOX_WITH_NATIVE_NEM
399 /* Tell NEM about the protection update. */
400 if (VM_IS_NEM_ENABLED(pVM))
401 {
402 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
403 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
404 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
405 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
406 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
407 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
408 }
409#endif
410 if (pvBitmap)
411 ASMBitSet(pvBitmap, offBitmap);
412 }
413
414 /* next */
415 if (--cPages == 0)
416 break;
417 i++;
418 offBitmap++;
419 }
420
421 if (fFlushTLBs)
422 {
423 PGM_INVL_ALL_VCPU_TLBS(pVM);
424 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
425 }
426 else
427 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
428
429 return rc;
430}
431
432
433/**
434 * Deregister a physical page access handler.
435 *
436 * @returns VBox status code.
437 * @param pVM The cross context VM structure.
438 * @param pPhysHandler The handler to deregister (but not free).
439 */
440int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
441{
442 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
443 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
444
445 int rc = PGM_LOCK(pVM);
446 AssertRCReturn(rc, rc);
447
448 RTGCPHYS const GCPhys = pPhysHandler->Key;
449 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
450
451 /*
452 * Remove the handler from the tree.
453 */
454
455 PPGMPHYSHANDLER pRemoved;
456 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
457 if (RT_SUCCESS(rc))
458 {
459 if (pRemoved == pPhysHandler)
460 {
461 /*
462 * Clear the page bits, notify the REM about this change and clear
463 * the cache.
464 */
465 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
466 if (VM_IS_NEM_ENABLED(pVM))
467 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
468 pVM->pgm.s.idxLastPhysHandler = 0;
469
470 pPhysHandler->Key = NIL_RTGCPHYS;
471 pPhysHandler->KeyLast = NIL_RTGCPHYS;
472
473 PGM_UNLOCK(pVM);
474
475 return VINF_SUCCESS;
476 }
477
478 /*
479 * Both of the failure conditions here are considered internal processing
480 * errors because they can only be caused by race conditions or corruption.
481 * If we ever need to handle concurrent deregistration, we have to move
482 * the NIL_RTGCPHYS check inside the PGM lock.
483 */
484 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
485 }
486
487 PGM_UNLOCK(pVM);
488
489 if (RT_FAILURE(rc))
490 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
491 else
492 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
493 GCPhys, pRemoved, pPhysHandler));
494 return VERR_PGM_HANDLER_IPE_1;
495}
496
497
498/**
499 * Destroys (frees) a physical handler.
500 *
501 * The caller must deregister it before destroying it!
502 *
503 * @returns VBox status code.
504 * @param pVM The cross context VM structure.
505 * @param pHandler The handler to free. NULL if ignored.
506 */
507int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
508{
509 if (pHandler)
510 {
511 AssertPtr(pHandler);
512 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
513
514 int rc = PGM_LOCK(pVM);
515 if (RT_SUCCESS(rc))
516 {
517 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
518 PGM_UNLOCK(pVM);
519 }
520 return rc;
521 }
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Deregister a physical page access handler.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param GCPhys Start physical address.
532 */
533VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
534{
535 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
536
537 /*
538 * Find the handler.
539 */
540 int rc = PGM_LOCK(pVM);
541 AssertRCReturn(rc, rc);
542
543 PPGMPHYSHANDLER pRemoved;
544 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
545 if (RT_SUCCESS(rc))
546 {
547 Assert(pRemoved->Key == GCPhys);
548 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
549 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
550
551 /*
552 * Clear the page bits, notify the REM about this change and clear
553 * the cache.
554 */
555 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
556 if (VM_IS_NEM_ENABLED(pVM))
557 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
558 pVM->pgm.s.idxLastPhysHandler = 0;
559
560 pRemoved->Key = NIL_RTGCPHYS;
561 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
562
563 PGM_UNLOCK(pVM);
564 return rc;
565 }
566
567 PGM_UNLOCK(pVM);
568
569 if (rc == VERR_NOT_FOUND)
570 {
571 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
572 rc = VERR_PGM_HANDLER_NOT_FOUND;
573 }
574 return rc;
575}
576
577
578/**
579 * Shared code with modify.
580 */
581static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
582{
583#ifdef VBOX_WITH_NATIVE_NEM
584 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
585 RTGCPHYS GCPhysStart = pCur->Key;
586 RTGCPHYS GCPhysLast = pCur->KeyLast;
587
588 /*
589 * Page align the range.
590 *
591 * Since we've reset (recalculated) the physical handler state of all pages
592 * we can make use of the page states to figure out whether a page should be
593 * included in the REM notification or not.
594 */
595 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
596 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
597 {
598 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
599
600 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
601 {
602 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
603 if ( pPage
604 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
605 {
606 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
607 if ( GCPhys > GCPhysLast
608 || GCPhys < GCPhysStart)
609 return;
610 GCPhysStart = GCPhys;
611 }
612 else
613 GCPhysStart &= X86_PTE_PAE_PG_MASK;
614 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
615 }
616
617 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
618 {
619 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
620 if ( pPage
621 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
622 {
623 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
624 if ( GCPhys < GCPhysStart
625 || GCPhys > GCPhysLast)
626 return;
627 GCPhysLast = GCPhys;
628 }
629 else
630 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
631 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
632 }
633 }
634
635 /*
636 * Tell NEM.
637 */
638 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
639 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
640 uint8_t u2State = UINT8_MAX;
641 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
642 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
643 if (u2State != UINT8_MAX && pRam)
644 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
645 cb >> GUEST_PAGE_SHIFT, u2State);
646#else
647 RT_NOREF(pVM, pCur);
648#endif
649}
650
651
652/**
653 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
654 * edge pages.
655 */
656DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
657{
658 /*
659 * Look for other handlers.
660 */
661 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
662 for (;;)
663 {
664 PPGMPHYSHANDLER pCur;
665 int rc;
666 if (fAbove)
667 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
668 GCPhys, &pCur);
669 else
670 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
671 GCPhys, &pCur);
672 if (rc == VERR_NOT_FOUND)
673 break;
674 AssertRCBreak(rc);
675 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
676 break;
677 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
678 uState = RT_MAX(uState, pCurType->uState);
679
680 /* next? */
681 RTGCPHYS GCPhysNext = fAbove
682 ? pCur->KeyLast + 1
683 : pCur->Key - 1;
684 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
685 break;
686 GCPhys = GCPhysNext;
687 }
688
689 /*
690 * Update if we found something that is a higher priority state than the current.
691 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
692 */
693 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
694 {
695 PPGMPAGE pPage;
696 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
697 if ( RT_SUCCESS(rc)
698 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
699 {
700 /* This should normally not be necessary. */
701 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
702 bool fFlushTLBs;
703 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
704 if (RT_SUCCESS(rc) && fFlushTLBs)
705 PGM_INVL_ALL_VCPU_TLBS(pVM);
706 else
707 AssertRC(rc);
708
709#ifdef VBOX_WITH_NATIVE_NEM
710 /* Tell NEM about the protection update. */
711 if (VM_IS_NEM_ENABLED(pVM))
712 {
713 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
714 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
715 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
716 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
717 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
718 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
719 }
720#endif
721 }
722 else
723 AssertRC(rc);
724 }
725}
726
727
728/**
729 * Resets an aliased page.
730 *
731 * @param pVM The cross context VM structure.
732 * @param pPage The page.
733 * @param GCPhysPage The page address in case it comes in handy.
734 * @param pRam The RAM range the page is associated with (for NEM
735 * notifications).
736 * @param fDoAccounting Whether to perform accounting. (Only set during
737 * reset where pgmR3PhysRamReset doesn't have the
738 * handler structure handy.)
739 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
740 * can be cleared only if the caller does the flushing
741 * after calling this function.
742 */
743void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
744 bool fDoAccounting, bool fFlushIemTlbs)
745{
746 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
747 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
748 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
749#ifdef VBOX_WITH_NATIVE_NEM
750 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
751#endif
752
753 /*
754 * Flush any shadow page table references *first*.
755 */
756 bool fFlushTLBs = false;
757 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
758 AssertLogRelRCReturnVoid(rc);
759#if defined(VBOX_VMM_TARGET_ARMV8)
760 AssertReleaseFailed();
761#else
762 HMFlushTlbOnAllVCpus(pVM);
763#endif
764
765 /*
766 * Make it an MMIO/Zero page.
767 */
768 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
769 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
770 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
771 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
772 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
773
774 /*
775 * Flush its TLB entry.
776 */
777 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
778 if (fFlushIemTlbs)
779 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
780
781 /*
782 * Do accounting for pgmR3PhysRamReset.
783 */
784 if (fDoAccounting)
785 {
786 PPGMPHYSHANDLER pHandler;
787 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
788 if (RT_SUCCESS(rc))
789 {
790 Assert(pHandler->cAliasedPages > 0);
791 pHandler->cAliasedPages--;
792 }
793 else
794 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
795 }
796
797#ifdef VBOX_WITH_NATIVE_NEM
798 /*
799 * Tell NEM about the protection change.
800 */
801 if (VM_IS_NEM_ENABLED(pVM))
802 {
803 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
804 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
805 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
806 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
807 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
808 }
809#else
810 RT_NOREF(pRam);
811#endif
812}
813
814
815/**
816 * Resets ram range flags.
817 *
818 * @returns VBox status code.
819 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
820 * @param pVM The cross context VM structure.
821 * @param pCur The physical handler.
822 *
823 * @remark We don't start messing with the shadow page tables, as we've
824 * already got code in Trap0e which deals with out of sync handler
825 * flags (originally conceived for global pages).
826 */
827static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
828{
829 /*
830 * Iterate the guest ram pages updating the state.
831 */
832 RTUINT cPages = pCur->cPages;
833 RTGCPHYS GCPhys = pCur->Key;
834 PPGMRAMRANGE pRamHint = NULL;
835 for (;;)
836 {
837 PPGMPAGE pPage;
838 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
839 if (RT_SUCCESS(rc))
840 {
841 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
842 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
843 bool fNemNotifiedAlready = false;
844 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
845 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
846 {
847 Assert(pCur->cAliasedPages > 0);
848 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
849 pCur->cAliasedPages--;
850 fNemNotifiedAlready = true;
851 }
852#ifdef VBOX_STRICT
853 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
854 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
855 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
856#endif
857 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
858
859#ifdef VBOX_WITH_NATIVE_NEM
860 /* Tell NEM about the protection change. */
861 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
862 {
863 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
864 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
865 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
866 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
867 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
868 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
869 }
870#endif
871 RT_NOREF(fNemNotifiedAlready);
872 }
873 else
874 AssertRC(rc);
875
876 /* next */
877 if (--cPages == 0)
878 break;
879 GCPhys += GUEST_PAGE_SIZE;
880 }
881
882 pCur->cAliasedPages = 0;
883 pCur->cTmpOffPages = 0;
884
885 /*
886 * Check for partial start and end pages.
887 */
888 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
889 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
890 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
891 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
892}
893
894
895#if 0 /* unused */
896/**
897 * Modify a physical page access handler.
898 *
899 * Modification can only be done to the range it self, not the type or anything else.
900 *
901 * @returns VBox status code.
902 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
903 * and a new registration must be performed!
904 * @param pVM The cross context VM structure.
905 * @param GCPhysCurrent Current location.
906 * @param GCPhys New location.
907 * @param GCPhysLast New last location.
908 */
909VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
910{
911 /*
912 * Remove it.
913 */
914 int rc;
915 PGM_LOCK_VOID(pVM);
916 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
917 if (pCur)
918 {
919 /*
920 * Clear the ram flags. (We're gonna move or free it!)
921 */
922 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
923 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
924 @todo pCurType validation
925 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
926 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
927
928 /*
929 * Validate the new range, modify and reinsert.
930 */
931 if (GCPhysLast >= GCPhys)
932 {
933 /*
934 * We require the range to be within registered ram.
935 * There is no apparent need to support ranges which cover more than one ram range.
936 */
937 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
938 if ( pRam
939 && GCPhys <= pRam->GCPhysLast
940 && GCPhysLast >= pRam->GCPhys)
941 {
942 pCur->Core.Key = GCPhys;
943 pCur->Core.KeyLast = GCPhysLast;
944 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
945
946 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
947 {
948 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
949 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
950
951 /*
952 * Set ram flags, flush shadow PT entries and finally tell REM about this.
953 */
954 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
955
956 /** @todo NEM: not sure we need this notification... */
957 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
958
959 PGM_UNLOCK(pVM);
960
961 PGM_INVL_ALL_VCPU_TLBS(pVM);
962 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
963 GCPhysCurrent, GCPhys, GCPhysLast));
964 return VINF_SUCCESS;
965 }
966
967 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
968 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
969 }
970 else
971 {
972 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
973 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
974 }
975 }
976 else
977 {
978 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
979 rc = VERR_INVALID_PARAMETER;
980 }
981
982 /*
983 * Invalid new location, flush the cache and free it.
984 * We've only gotta notify REM and free the memory.
985 */
986 if (VM_IS_NEM_ENABLED(pVM))
987 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
988 pVM->pgm.s.pLastPhysHandlerR0 = 0;
989 pVM->pgm.s.pLastPhysHandlerR3 = 0;
990 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
991 MMHyperFree(pVM, pCur);
992 }
993 else
994 {
995 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
996 rc = VERR_PGM_HANDLER_NOT_FOUND;
997 }
998
999 PGM_UNLOCK(pVM);
1000 return rc;
1001}
1002#endif /* unused */
1003
1004
1005/**
1006 * Changes the user callback arguments associated with a physical access handler.
1007 *
1008 * @returns VBox status code.
1009 * @param pVM The cross context VM structure.
1010 * @param GCPhys Start physical address of the handler.
1011 * @param uUser User argument to the handlers.
1012 */
1013VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1014{
1015 /*
1016 * Find the handler and make the change.
1017 */
1018 int rc = PGM_LOCK(pVM);
1019 AssertRCReturn(rc, rc);
1020
1021 PPGMPHYSHANDLER pCur;
1022 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1023 if (RT_SUCCESS(rc))
1024 {
1025 Assert(pCur->Key == GCPhys);
1026 pCur->uUser = uUser;
1027 }
1028 else if (rc == VERR_NOT_FOUND)
1029 {
1030 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1031 rc = VERR_PGM_HANDLER_NOT_FOUND;
1032 }
1033
1034 PGM_UNLOCK(pVM);
1035 return rc;
1036}
1037
1038#if 0 /* unused */
1039
1040/**
1041 * Splits a physical access handler in two.
1042 *
1043 * @returns VBox status code.
1044 * @param pVM The cross context VM structure.
1045 * @param GCPhys Start physical address of the handler.
1046 * @param GCPhysSplit The split address.
1047 */
1048VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1049{
1050 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1051
1052 /*
1053 * Do the allocation without owning the lock.
1054 */
1055 PPGMPHYSHANDLER pNew;
1056 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1057 if (RT_FAILURE(rc))
1058 return rc;
1059
1060 /*
1061 * Get the handler.
1062 */
1063 PGM_LOCK_VOID(pVM);
1064 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1065 if (RT_LIKELY(pCur))
1066 {
1067 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1068 {
1069 /*
1070 * Create new handler node for the 2nd half.
1071 */
1072 *pNew = *pCur;
1073 pNew->Core.Key = GCPhysSplit;
1074 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1075
1076 pCur->Core.KeyLast = GCPhysSplit - 1;
1077 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1078
1079 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1080 {
1081 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1082 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1083 PGM_UNLOCK(pVM);
1084 return VINF_SUCCESS;
1085 }
1086 AssertMsgFailed(("whu?\n"));
1087 rc = VERR_PGM_PHYS_HANDLER_IPE;
1088 }
1089 else
1090 {
1091 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1092 rc = VERR_INVALID_PARAMETER;
1093 }
1094 }
1095 else
1096 {
1097 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1098 rc = VERR_PGM_HANDLER_NOT_FOUND;
1099 }
1100 PGM_UNLOCK(pVM);
1101 MMHyperFree(pVM, pNew);
1102 return rc;
1103}
1104
1105
1106/**
1107 * Joins up two adjacent physical access handlers which has the same callbacks.
1108 *
1109 * @returns VBox status code.
1110 * @param pVM The cross context VM structure.
1111 * @param GCPhys1 Start physical address of the first handler.
1112 * @param GCPhys2 Start physical address of the second handler.
1113 */
1114VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1115{
1116 /*
1117 * Get the handlers.
1118 */
1119 int rc;
1120 PGM_LOCK_VOID(pVM);
1121 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1122 if (RT_LIKELY(pCur1))
1123 {
1124 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1125 if (RT_LIKELY(pCur2))
1126 {
1127 /*
1128 * Make sure that they are adjacent, and that they've got the same callbacks.
1129 */
1130 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1131 {
1132 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1133 {
1134 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1135 if (RT_LIKELY(pCur3 == pCur2))
1136 {
1137 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1138 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1139 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1140 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1141 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1142 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1143 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1144 MMHyperFree(pVM, pCur2);
1145 PGM_UNLOCK(pVM);
1146 return VINF_SUCCESS;
1147 }
1148
1149 Assert(pCur3 == pCur2);
1150 rc = VERR_PGM_PHYS_HANDLER_IPE;
1151 }
1152 else
1153 {
1154 AssertMsgFailed(("mismatching handlers\n"));
1155 rc = VERR_ACCESS_DENIED;
1156 }
1157 }
1158 else
1159 {
1160 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1161 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1162 rc = VERR_INVALID_PARAMETER;
1163 }
1164 }
1165 else
1166 {
1167 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1168 rc = VERR_PGM_HANDLER_NOT_FOUND;
1169 }
1170 }
1171 else
1172 {
1173 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1174 rc = VERR_PGM_HANDLER_NOT_FOUND;
1175 }
1176 PGM_UNLOCK(pVM);
1177 return rc;
1178
1179}
1180
1181#endif /* unused */
1182
1183/**
1184 * Resets any modifications to individual pages in a physical page access
1185 * handler region.
1186 *
1187 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1188 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1189 *
1190 * @returns VBox status code.
1191 * @param pVM The cross context VM structure.
1192 * @param GCPhys The start address of the handler regions, i.e. what you
1193 * passed to PGMR3HandlerPhysicalRegister(),
1194 * PGMHandlerPhysicalRegisterEx() or
1195 * PGMHandlerPhysicalModify().
1196 */
1197VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1198{
1199 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1200 int rc = PGM_LOCK(pVM);
1201 AssertRCReturn(rc, rc);
1202
1203 /*
1204 * Find the handler.
1205 */
1206 PPGMPHYSHANDLER pCur;
1207 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1208 if (RT_SUCCESS(rc))
1209 {
1210 Assert(pCur->Key == GCPhys);
1211
1212 /*
1213 * Validate kind.
1214 */
1215 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1216 switch (pCurType->enmKind)
1217 {
1218 case PGMPHYSHANDLERKIND_WRITE:
1219 case PGMPHYSHANDLERKIND_ALL:
1220 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1221 {
1222 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1223 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1224 Assert(pRam);
1225 Assert(pRam->GCPhys <= pCur->Key);
1226 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1227
1228 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1229 {
1230 /*
1231 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1232 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1233 * to do that now...
1234 */
1235 if (pCur->cAliasedPages)
1236 {
1237 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1238 RTGCPHYS GCPhysPage = pCur->Key;
1239 uint32_t cLeft = pCur->cPages;
1240 bool fFlushIemTlb = false;
1241 while (cLeft-- > 0)
1242 {
1243 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1244 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1245 {
1246 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1247 Assert(pCur->cAliasedPages > 0);
1248 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1249 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1250 --pCur->cAliasedPages;
1251#ifndef VBOX_STRICT
1252 if (pCur->cAliasedPages == 0)
1253 break;
1254#endif
1255 }
1256 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1257 GCPhysPage += GUEST_PAGE_SIZE;
1258 pPage++;
1259 }
1260 Assert(pCur->cAliasedPages == 0);
1261
1262 /*
1263 * Flush IEM TLBs in case they contain any references to aliased pages.
1264 * This is only necessary for MMIO2 aliases.
1265 */
1266 if (fFlushIemTlb)
1267 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1268 }
1269 }
1270 else if (pCur->cTmpOffPages > 0)
1271 {
1272 /*
1273 * Set the flags and flush shadow PT entries.
1274 */
1275 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1276 }
1277
1278 pCur->cAliasedPages = 0;
1279 pCur->cTmpOffPages = 0;
1280
1281 rc = VINF_SUCCESS;
1282 break;
1283 }
1284
1285 /*
1286 * Invalid.
1287 */
1288 default:
1289 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1290 rc = VERR_PGM_PHYS_HANDLER_IPE;
1291 break;
1292 }
1293 }
1294 else if (rc == VERR_NOT_FOUND)
1295 {
1296 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1297 rc = VERR_PGM_HANDLER_NOT_FOUND;
1298 }
1299
1300 PGM_UNLOCK(pVM);
1301 return rc;
1302}
1303
1304
1305/**
1306 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1307 * tracking.
1308 *
1309 * @returns VBox status code.
1310 * @param pVM The cross context VM structure.
1311 * @param GCPhys The start address of the handler region.
1312 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1313 * dirty bits will be set. Caller also made sure it's big
1314 * enough.
1315 * @param offBitmap Dirty bitmap offset.
1316 * @remarks Caller must own the PGM critical section.
1317 */
1318DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1319{
1320 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1321 PGM_LOCK_ASSERT_OWNER(pVM);
1322
1323 /*
1324 * Find the handler.
1325 */
1326 PPGMPHYSHANDLER pCur;
1327 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1328 if (RT_SUCCESS(rc))
1329 {
1330 Assert(pCur->Key == GCPhys);
1331
1332 /*
1333 * Validate kind.
1334 */
1335 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1336 if ( pCurType
1337 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1338 {
1339 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1340
1341 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1342 Assert(pRam);
1343 Assert(pRam->GCPhys <= pCur->Key);
1344 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1345
1346 /*
1347 * Set the flags and flush shadow PT entries.
1348 */
1349 if (pCur->cTmpOffPages > 0)
1350 {
1351 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1352 pCur->cTmpOffPages = 0;
1353 }
1354 else
1355 rc = VINF_SUCCESS;
1356 }
1357 else
1358 {
1359 AssertFailed();
1360 rc = VERR_WRONG_TYPE;
1361 }
1362 }
1363 else if (rc == VERR_NOT_FOUND)
1364 {
1365 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1366 rc = VERR_PGM_HANDLER_NOT_FOUND;
1367 }
1368
1369 return rc;
1370}
1371
1372
1373/**
1374 * Temporarily turns off the access monitoring of a page within a monitored
1375 * physical write/all page access handler region.
1376 *
1377 * Use this when no further \#PFs are required for that page. Be aware that
1378 * a page directory sync might reset the flags, and turn on access monitoring
1379 * for the page.
1380 *
1381 * The caller must do required page table modifications.
1382 *
1383 * @returns VBox status code.
1384 * @param pVM The cross context VM structure.
1385 * @param GCPhys The start address of the access handler. This
1386 * must be a fully page aligned range or we risk
1387 * messing up other handlers installed for the
1388 * start and end pages.
1389 * @param GCPhysPage The physical address of the page to turn off
1390 * access monitoring for.
1391 */
1392VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1393{
1394 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1395 int rc = PGM_LOCK(pVM);
1396 AssertRCReturn(rc, rc);
1397
1398 /*
1399 * Validate the range.
1400 */
1401 PPGMPHYSHANDLER pCur;
1402 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1403 if (RT_SUCCESS(rc))
1404 {
1405 Assert(pCur->Key == GCPhys);
1406 if (RT_LIKELY( GCPhysPage >= pCur->Key
1407 && GCPhysPage <= pCur->KeyLast))
1408 {
1409 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1410 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1411
1412 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1413 AssertReturnStmt( pCurType
1414 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1415 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1416 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1417
1418 /*
1419 * Change the page status.
1420 */
1421 PPGMPAGE pPage;
1422 PPGMRAMRANGE pRam;
1423 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1424 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1425 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1426 {
1427 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1428 pCur->cTmpOffPages++;
1429
1430#ifdef VBOX_WITH_NATIVE_NEM
1431 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1432 if (VM_IS_NEM_ENABLED(pVM))
1433 {
1434 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1435 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1436 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1437 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1438 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1439 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1440 }
1441#endif
1442 }
1443 PGM_UNLOCK(pVM);
1444 return VINF_SUCCESS;
1445 }
1446 PGM_UNLOCK(pVM);
1447 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1448 return VERR_INVALID_PARAMETER;
1449 }
1450 PGM_UNLOCK(pVM);
1451
1452 if (rc == VERR_NOT_FOUND)
1453 {
1454 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1455 return VERR_PGM_HANDLER_NOT_FOUND;
1456 }
1457 return rc;
1458}
1459
1460
1461/**
1462 * Resolves an MMIO2 page.
1463 *
1464 * Caller as taken the PGM lock.
1465 *
1466 * @returns Pointer to the page if valid, NULL otherwise
1467 * @param pVM The cross context VM structure.
1468 * @param pDevIns The device owning it.
1469 * @param hMmio2 The MMIO2 region.
1470 * @param offMmio2Page The offset into the region.
1471 */
1472static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1473{
1474 /* Only works if the handle is in the handle table! */
1475 AssertReturn(hMmio2 != 0, NULL);
1476 hMmio2--;
1477
1478 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1479 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1480 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1481 AssertReturn(pCur, NULL);
1482 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1483
1484 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1485 for (;;)
1486 {
1487#ifdef IN_RING3
1488 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1489#else
1490 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1491#endif
1492
1493 /* Does it match the offset? */
1494 if (offMmio2Page < pCur->cbReal)
1495 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1496
1497 /* Advance if we can. */
1498 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1499 offMmio2Page -= pCur->cbReal;
1500 hMmio2++;
1501 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1502 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1503 AssertReturn(pCur, NULL);
1504 }
1505}
1506
1507
1508/**
1509 * Replaces an MMIO page with an MMIO2 page.
1510 *
1511 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1512 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1513 * backing, the caller must provide a replacement page. For various reasons the
1514 * replacement page must be an MMIO2 page.
1515 *
1516 * The caller must do required page table modifications. You can get away
1517 * without making any modifications since it's an MMIO page, the cost is an extra
1518 * \#PF which will the resync the page.
1519 *
1520 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1521 *
1522 * The caller may still get handler callback even after this call and must be
1523 * able to deal correctly with such calls. The reason for these callbacks are
1524 * either that we're executing in the recompiler (which doesn't know about this
1525 * arrangement) or that we've been restored from saved state (where we won't
1526 * save the change).
1527 *
1528 * @returns VBox status code.
1529 * @param pVM The cross context VM structure.
1530 * @param GCPhys The start address of the access handler. This
1531 * must be a fully page aligned range or we risk
1532 * messing up other handlers installed for the
1533 * start and end pages.
1534 * @param GCPhysPage The physical address of the page to turn off
1535 * access monitoring for and replace with the MMIO2
1536 * page.
1537 * @param pDevIns The device instance owning @a hMmio2.
1538 * @param hMmio2 Handle to the MMIO2 region containing the page
1539 * to remap in the the MMIO page at @a GCPhys.
1540 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1541 * should serve as backing memory.
1542 *
1543 * @remark May cause a page pool flush if used on a page that is already
1544 * aliased.
1545 *
1546 * @note This trick does only work reliably if the two pages are never ever
1547 * mapped in the same page table. If they are the page pool code will
1548 * be confused should either of them be flushed. See the special case
1549 * of zero page aliasing mentioned in #3170.
1550 *
1551 */
1552VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1553 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1554{
1555#ifdef VBOX_WITH_PGM_NEM_MODE
1556 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1557#endif
1558 int rc = PGM_LOCK(pVM);
1559 AssertRCReturn(rc, rc);
1560
1561 /*
1562 * Resolve the MMIO2 reference.
1563 */
1564 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1565 if (RT_LIKELY(pPageRemap))
1566 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1567 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1568 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1569 else
1570 {
1571 PGM_UNLOCK(pVM);
1572 return VERR_OUT_OF_RANGE;
1573 }
1574
1575 /*
1576 * Lookup and validate the range.
1577 */
1578 PPGMPHYSHANDLER pCur;
1579 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1580 if (RT_SUCCESS(rc))
1581 {
1582 Assert(pCur->Key == GCPhys);
1583 if (RT_LIKELY( GCPhysPage >= pCur->Key
1584 && GCPhysPage <= pCur->KeyLast))
1585 {
1586 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1587 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1588 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1589 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1590 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1591
1592 /*
1593 * Validate the page.
1594 */
1595 PPGMPAGE pPage;
1596 PPGMRAMRANGE pRam;
1597 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1598 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1599 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1600 {
1601 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1602 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1603 VERR_PGM_PHYS_NOT_MMIO2);
1604 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1605 {
1606 PGM_UNLOCK(pVM);
1607 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1608 }
1609
1610 /*
1611 * The page is already mapped as some other page, reset it
1612 * to an MMIO/ZERO page before doing the new mapping.
1613 */
1614 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1615 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1616 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1617 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1618 pCur->cAliasedPages--;
1619
1620 /* Since this may be present in the TLB and now be wrong, invalid
1621 the guest physical address part of the IEM TLBs. Note, we do
1622 this here as we will not invalid */
1623 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1624 }
1625 Assert(PGM_PAGE_IS_ZERO(pPage));
1626
1627 /*
1628 * Do the actual remapping here.
1629 * This page now serves as an alias for the backing memory specified.
1630 */
1631 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1632 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1633 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1634 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1635 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1636 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1637 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1638 pCur->cAliasedPages++;
1639 Assert(pCur->cAliasedPages <= pCur->cPages);
1640
1641 /*
1642 * Flush its TLB entry.
1643 *
1644 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1645 * all the other IEM TLB entires. When this one is kicked out and
1646 * reloaded, it will be using the MMIO2 alias, but till then we'll
1647 * continue doing MMIO.
1648 */
1649 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1650 /** @todo Do some preformance checks of calling
1651 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1652 * actually makes sense or not. Screen updates are typically massive
1653 * and important when this kind of aliasing is used, so it may pay of... */
1654
1655#ifdef VBOX_WITH_NATIVE_NEM
1656 /* Tell NEM about the backing and protection change. */
1657 if (VM_IS_NEM_ENABLED(pVM))
1658 {
1659 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1660 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1661 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1662 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1663 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1664 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1665 }
1666#endif
1667 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1668 PGM_UNLOCK(pVM);
1669 return VINF_SUCCESS;
1670 }
1671
1672 PGM_UNLOCK(pVM);
1673 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1674 return VERR_INVALID_PARAMETER;
1675 }
1676
1677 PGM_UNLOCK(pVM);
1678 if (rc == VERR_NOT_FOUND)
1679 {
1680 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1681 return VERR_PGM_HANDLER_NOT_FOUND;
1682 }
1683 return rc;
1684}
1685
1686
1687/**
1688 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1689 *
1690 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1691 * need to be a known MMIO2 page and that only shadow paging may access the
1692 * page. The latter distinction is important because the only use for this
1693 * feature is for mapping the special APIC access page that VT-x uses to detect
1694 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1695 * not written to. At least at the moment.
1696 *
1697 * The caller must do required page table modifications. You can get away
1698 * without making any modifications since it's an MMIO page, the cost is an extra
1699 * \#PF which will the resync the page.
1700 *
1701 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1702 *
1703 *
1704 * @returns VBox status code.
1705 * @param pVM The cross context VM structure.
1706 * @param GCPhys The start address of the access handler. This
1707 * must be a fully page aligned range or we risk
1708 * messing up other handlers installed for the
1709 * start and end pages.
1710 * @param GCPhysPage The physical address of the page to turn off
1711 * access monitoring for.
1712 * @param HCPhysPageRemap The physical address of the HC page that
1713 * serves as backing memory.
1714 *
1715 * @remark May cause a page pool flush if used on a page that is already
1716 * aliased.
1717 */
1718VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1719{
1720/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1721#ifdef VBOX_WITH_PGM_NEM_MODE
1722 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1723#endif
1724 int rc = PGM_LOCK(pVM);
1725 AssertRCReturn(rc, rc);
1726
1727 /*
1728 * Lookup and validate the range.
1729 */
1730 PPGMPHYSHANDLER pCur;
1731 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1732 if (RT_SUCCESS(rc))
1733 {
1734 Assert(pCur->Key == GCPhys);
1735 if (RT_LIKELY( GCPhysPage >= pCur->Key
1736 && GCPhysPage <= pCur->KeyLast))
1737 {
1738 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1739 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1740 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1741 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1742 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1743
1744 /*
1745 * Get and validate the pages.
1746 */
1747 PPGMPAGE pPage;
1748 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1749 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1750 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1751 {
1752 PGM_UNLOCK(pVM);
1753 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1754 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1755 VERR_PGM_PHYS_NOT_MMIO2);
1756 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1757 }
1758 Assert(PGM_PAGE_IS_ZERO(pPage));
1759
1760 /*
1761 * Do the actual remapping here.
1762 * This page now serves as an alias for the backing memory
1763 * specified as far as shadow paging is concerned.
1764 */
1765 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1766 GCPhysPage, pPage, HCPhysPageRemap));
1767 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1768 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1769 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1770 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1771 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1772 pCur->cAliasedPages++;
1773 Assert(pCur->cAliasedPages <= pCur->cPages);
1774
1775 /*
1776 * Flush its TLB entry.
1777 *
1778 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1779 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1780 */
1781 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1782
1783#ifdef VBOX_WITH_NATIVE_NEM
1784 /* Tell NEM about the backing and protection change. */
1785 if (VM_IS_NEM_ENABLED(pVM))
1786 {
1787 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1788 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1789 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1790 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1791 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1792 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1793 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1794 }
1795#endif
1796 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1797 PGM_UNLOCK(pVM);
1798 return VINF_SUCCESS;
1799 }
1800 PGM_UNLOCK(pVM);
1801 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1802 return VERR_INVALID_PARAMETER;
1803 }
1804 PGM_UNLOCK(pVM);
1805
1806 if (rc == VERR_NOT_FOUND)
1807 {
1808 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1809 return VERR_PGM_HANDLER_NOT_FOUND;
1810 }
1811 return rc;
1812}
1813
1814
1815/**
1816 * Checks if a physical range is handled
1817 *
1818 * @returns boolean
1819 * @param pVM The cross context VM structure.
1820 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1821 * @remarks Caller must take the PGM lock...
1822 * @thread EMT.
1823 */
1824VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1825{
1826 /*
1827 * Find the handler.
1828 */
1829 PGM_LOCK_VOID(pVM);
1830 PPGMPHYSHANDLER pCur;
1831 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1832 if (RT_SUCCESS(rc))
1833 {
1834#ifdef VBOX_STRICT
1835 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1836 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1837 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1838 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1839 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
1840#endif
1841 PGM_UNLOCK(pVM);
1842 return true;
1843 }
1844 PGM_UNLOCK(pVM);
1845 return false;
1846}
1847
1848
1849/**
1850 * Checks if it's an disabled all access handler or write access handler at the
1851 * given address.
1852 *
1853 * @returns true if it's an all access handler, false if it's a write access
1854 * handler.
1855 * @param pVM The cross context VM structure.
1856 * @param GCPhys The address of the page with a disabled handler.
1857 *
1858 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1859 */
1860bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
1861{
1862 PGM_LOCK_VOID(pVM);
1863 PPGMPHYSHANDLER pCur;
1864 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1865 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
1866
1867 /* Only whole pages can be disabled. */
1868 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
1869 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
1870
1871 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1872 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1873 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
1874 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
1875 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
1876 PGM_UNLOCK(pVM);
1877 return fRet;
1878}
1879
1880#ifdef VBOX_STRICT
1881
1882/**
1883 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1884 * and its AVL enumerators.
1885 */
1886typedef struct PGMAHAFIS
1887{
1888 /** The current physical address. */
1889 RTGCPHYS GCPhys;
1890 /** Number of errors. */
1891 unsigned cErrors;
1892 /** Pointer to the VM. */
1893 PVM pVM;
1894} PGMAHAFIS, *PPGMAHAFIS;
1895
1896
1897/**
1898 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1899 * that the physical addresses associated with virtual handlers are correct.
1900 *
1901 * @returns Number of mismatches.
1902 * @param pVM The cross context VM structure.
1903 */
1904VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
1905{
1906 PPGM pPGM = &pVM->pgm.s;
1907 PGMAHAFIS State;
1908 State.GCPhys = 0;
1909 State.cErrors = 0;
1910 State.pVM = pVM;
1911
1912 PGM_LOCK_ASSERT_OWNER(pVM);
1913
1914 /*
1915 * Check the RAM flags against the handlers.
1916 */
1917 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
1918 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1919 {
1920 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
1921 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1922 {
1923 PGMPAGE const *pPage = &pRam->aPages[iPage];
1924 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1925 {
1926 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
1927
1928 /*
1929 * Physical first - calculate the state based on the handlers
1930 * active on the page, then compare.
1931 */
1932 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1933 {
1934 /* the first */
1935 PPGMPHYSHANDLER pPhys;
1936 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
1937 if (rc == VERR_NOT_FOUND)
1938 {
1939 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1940 State.GCPhys, &pPhys);
1941 if (RT_SUCCESS(rc))
1942 {
1943 Assert(pPhys->Key >= State.GCPhys);
1944 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
1945 pPhys = NULL;
1946 }
1947 else
1948 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1949 }
1950 else
1951 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
1952
1953 if (pPhys)
1954 {
1955 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
1956 unsigned uState = pPhysType->uState;
1957 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
1958
1959 /* more? */
1960 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1961 {
1962 PPGMPHYSHANDLER pPhys2;
1963 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
1964 pPhys->KeyLast + 1, &pPhys2);
1965 if (rc == VERR_NOT_FOUND)
1966 break;
1967 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
1968 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
1969 break;
1970 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
1971 uState = RT_MAX(uState, pPhysType2->uState);
1972 pPhys = pPhys2;
1973 }
1974
1975 /* compare.*/
1976 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1977 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1978 {
1979 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1980 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
1981 State.cErrors++;
1982 }
1983 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
1984 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
1985 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
1986 State.cErrors++);
1987 }
1988 else
1989 {
1990 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1991 State.cErrors++;
1992 }
1993 }
1994 }
1995 } /* foreach page in ram range. */
1996 } /* foreach ram range. */
1997
1998 /*
1999 * Do the reverse check for physical handlers.
2000 */
2001 /** @todo */
2002
2003 return State.cErrors;
2004}
2005
2006#endif /* VBOX_STRICT */
2007
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette