VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 17595

Last change on this file since 17595 was 17509, checked in by vboxsync, 16 years ago

PGM: Moved the page pool PT flushing code in the access handler bits to where it belongs and called it pgmPoolTrackFlushGCPhys. Fixed a status code corruption bug in PGMR3PhysTlbGCPhys2Ptr (new phys). Made lazy zero page replacement code work in the new code, it's disabled by default because it frequently requires flushing the shadow page pool because the tracking code assuming the HCPhys of a PGMPAGE is unique and never shared.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.8 KB
Line 
1/* $Id: PGMAllHandler.cpp 17509 2009-03-07 01:30:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
52static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
53static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
54
55
56
57/**
58 * Register a access handler for a physical range.
59 *
60 * @returns VBox status code.
61 * @retval VINF_SUCCESS when successfully installed.
62 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
63 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
64 * flagged together with a pool clearing.
65 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
66 * one. A debug assertion is raised.
67 *
68 * @param pVM VM Handle.
69 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
70 * @param GCPhys Start physical address.
71 * @param GCPhysLast Last physical address. (inclusive)
72 * @param pfnHandlerR3 The R3 handler.
73 * @param pvUserR3 User argument to the R3 handler.
74 * @param pfnHandlerR0 The R0 handler.
75 * @param pvUserR0 User argument to the R0 handler.
76 * @param pfnHandlerRC The RC handler.
77 * @param pvUserRC User argument to the RC handler. This can be a value
78 * less that 0x10000 or a (non-null) pointer that is
79 * automatically relocatated.
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
85 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
86 R3PTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserR3=%RHv pfnHandlerR0=%RHv pvUserR0=%RHv pfnHandlerGC=%RRv pvUserGC=%RRv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, R3STRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
95 switch (enmType)
96 {
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 break;
99 case PGMPHYSHANDLERTYPE_MMIO:
100 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
101 /* Simplification in PGMPhysRead among other places. */
102 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
103 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
104 break;
105 default:
106 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
107 return VERR_INVALID_PARAMETER;
108 }
109 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
110 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
111 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
112 VERR_INVALID_PARAMETER);
113 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
114 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
115 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
116 VERR_INVALID_PARAMETER);
117#ifdef VBOX_WITH_NEW_PHYS_CODE
118 AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
119 AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER);
120 AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER);
121#else
122 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerRC, VERR_INVALID_PARAMETER);
123#endif
124
125 /*
126 * We require the range to be within registered ram.
127 * There is no apparent need to support ranges which cover more than one ram range.
128 */
129 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
130 while (pRam && GCPhys > pRam->GCPhysLast)
131 pRam = pRam->CTX_SUFF(pNext);
132 if ( !pRam
133 || GCPhysLast < pRam->GCPhys
134 || GCPhys > pRam->GCPhysLast)
135 {
136#ifdef IN_RING3
137 DBGFR3Info(pVM, "phys", NULL, NULL);
138#endif
139 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
140 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
141 }
142
143 /*
144 * Allocate and initialize the new entry.
145 */
146 PPGMPHYSHANDLER pNew;
147 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
148 if (RT_FAILURE(rc))
149 return rc;
150
151 pNew->Core.Key = GCPhys;
152 pNew->Core.KeyLast = GCPhysLast;
153 pNew->enmType = enmType;
154 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
155 pNew->pfnHandlerR3 = pfnHandlerR3;
156 pNew->pvUserR3 = pvUserR3;
157 pNew->pfnHandlerR0 = pfnHandlerR0;
158 pNew->pvUserR0 = pvUserR0;
159 pNew->pfnHandlerRC = pfnHandlerRC;
160 pNew->pvUserRC = pvUserRC;
161 pNew->pszDesc = pszDesc;
162
163 pgmLock(pVM);
164
165 /*
166 * Try insert into list.
167 */
168 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
169 {
170 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
171 if (rc == VINF_PGM_GCPHYS_ALIASED)
172 {
173 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
174 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
175 }
176 pVM->pgm.s.fPhysCacheFlushPending = true;
177 HWACCMFlushTLB(pVM);
178#ifndef IN_RING3
179 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
180#else
181 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
182#endif
183 pgmUnlock(pVM);
184 if (rc != VINF_SUCCESS)
185 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
186 return rc;
187 }
188
189 pgmUnlock(pVM);
190
191#if defined(IN_RING3) && defined(VBOX_STRICT)
192 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
193#endif
194 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
195 MMHyperFree(pVM, pNew);
196 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
197}
198
199
200/**
201 * Sets ram range flags and attempts updating shadow PTs.
202 *
203 * @returns VBox status code.
204 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
205 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
206 * the guest page aliased or/and mapped by multiple PTs.
207 * @param pVM The VM handle.
208 * @param pCur The physical handler.
209 * @param pRam The RAM range.
210 */
211static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
212{
213 /*
214 * Iterate the guest ram pages updating the flags and flushing PT entries
215 * mapping the page.
216 */
217 bool fFlushTLBs = false;
218 int rc = VINF_SUCCESS;
219 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
220 RTUINT cPages = pCur->cPages;
221 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
222 for (;;)
223 {
224#ifndef VBOX_WITH_NEW_PHYS_CODE
225 /* Physical chunk in dynamically allocated range not present? */
226 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
227 {
228 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
229# ifdef IN_RING3
230 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
231# else
232 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
233# endif
234 if (rc2 != VINF_SUCCESS)
235 return rc2;
236 }
237#endif /* !VBOX_WITH_NEW_PHYS_CODE */
238
239 /* Only do upgrades. */
240 PPGMPAGE pPage = &pRam->aPages[i];
241 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
242 {
243 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
244 Assert(PGM_PAGE_GET_HCPHYS(pPage));
245
246 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
247 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
248 rc = rc2;
249 }
250
251 /* next */
252 if (--cPages == 0)
253 break;
254 i++;
255 }
256
257 if (fFlushTLBs && rc == VINF_SUCCESS)
258 {
259 PGM_INVL_GUEST_TLBS();
260 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
261 }
262 else
263 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc\n", rc));
264 return rc;
265}
266
267
268/**
269 * Register a physical page access handler.
270 *
271 * @returns VBox status code.
272 * @param pVM VM Handle.
273 * @param GCPhys Start physical address.
274 */
275VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
276{
277 /*
278 * Find the handler.
279 */
280 pgmLock(pVM);
281 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
282 if (pCur)
283 {
284 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
285 pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
286
287 /*
288 * Clear the page bits and notify the REM about this change.
289 */
290 HWACCMFlushTLB(pVM);
291 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
292 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
293 pgmUnlock(pVM);
294 MMHyperFree(pVM, pCur);
295 return VINF_SUCCESS;
296 }
297 pgmUnlock(pVM);
298
299 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
300 return VERR_PGM_HANDLER_NOT_FOUND;
301}
302
303
304/**
305 * Shared code with modify.
306 */
307static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
308{
309 RTGCPHYS GCPhysStart = pCur->Core.Key;
310 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
311
312 /*
313 * Page align the range.
314 *
315 * Since we've reset (recalculated) the physical handler state of all pages
316 * we can make use of the page states to figure out whether a page should be
317 * included in the REM notification or not.
318 */
319 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
320 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
321 {
322 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
323
324 if (GCPhysStart & PAGE_OFFSET_MASK)
325 {
326 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
327 if ( pPage
328 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
329 {
330 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
331 if ( GCPhys > GCPhysLast
332 || GCPhys < GCPhysStart)
333 return;
334 GCPhysStart = GCPhys;
335 }
336 else
337 GCPhysStart &= X86_PTE_PAE_PG_MASK;
338 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
339 }
340
341 if (GCPhysLast & PAGE_OFFSET_MASK)
342 {
343 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
344 if ( pPage
345 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
346 {
347 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
348 if ( GCPhys < GCPhysStart
349 || GCPhys > GCPhysLast)
350 return;
351 GCPhysLast = GCPhys;
352 }
353 else
354 GCPhysLast |= PAGE_OFFSET_MASK;
355 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
356 }
357 }
358
359 /*
360 * Tell REM.
361 */
362 const bool fRestoreAsRAM = pCur->pfnHandlerR3
363 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
364#ifndef IN_RING3
365 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
366#else
367 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
368#endif
369}
370
371
372/**
373 * pgmHandlerPhysicalResetRamFlags helper that checks for
374 * other handlers on edge pages.
375 */
376DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
377{
378 /*
379 * Look for other handlers.
380 */
381 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
382 for (;;)
383 {
384 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
385 if ( !pCur
386 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
387 break;
388 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
389 uState = RT_MAX(uState, uThisState);
390
391 /* next? */
392 RTGCPHYS GCPhysNext = fAbove
393 ? pCur->Core.KeyLast + 1
394 : pCur->Core.Key - 1;
395 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
396 break;
397 GCPhys = GCPhysNext;
398 }
399
400 /*
401 * Update if we found something that is a higher priority
402 * state than the current.
403 */
404 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
405 {
406 PPGMPAGE pPage;
407 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
408 if ( RT_SUCCESS(rc)
409 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
410 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
411 else
412 AssertRC(rc);
413 }
414}
415
416
417/**
418 * Resets ram range flags.
419 *
420 * @returns VBox status code.
421 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
422 * @param pVM The VM handle.
423 * @param pCur The physical handler.
424 *
425 * @remark We don't start messing with the shadow page tables, as we've already got code
426 * in Trap0e which deals with out of sync handler flags (originally conceived for
427 * global pages).
428 */
429static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
430{
431 /*
432 * Iterate the guest ram pages updating the state.
433 */
434 RTUINT cPages = pCur->cPages;
435 RTGCPHYS GCPhys = pCur->Core.Key;
436 PPGMRAMRANGE pRamHint = NULL;
437 PPGM pPGM = &pVM->pgm.s;
438 for (;;)
439 {
440 PPGMPAGE pPage;
441 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
442 if (RT_SUCCESS(rc))
443 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
444 else
445 AssertRC(rc);
446
447 /* next */
448 if (--cPages == 0)
449 break;
450 GCPhys += PAGE_SIZE;
451 }
452
453 /*
454 * Check for partial start and end pages.
455 */
456 if (pCur->Core.Key & PAGE_OFFSET_MASK)
457 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
458 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
459 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
460}
461
462
463/**
464 * Modify a physical page access handler.
465 *
466 * Modification can only be done to the range it self, not the type or anything else.
467 *
468 * @returns VBox status code.
469 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
470 * and a new registration must be performed!
471 * @param pVM VM handle.
472 * @param GCPhysCurrent Current location.
473 * @param GCPhys New location.
474 * @param GCPhysLast New last location.
475 */
476VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
477{
478 /*
479 * Remove it.
480 */
481 int rc;
482 pgmLock(pVM);
483 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
484 if (pCur)
485 {
486 /*
487 * Clear the ram flags. (We're gonna move or free it!)
488 */
489 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
490 const bool fRestoreAsRAM = pCur->pfnHandlerR3
491 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
492
493 /*
494 * Validate the new range, modify and reinsert.
495 */
496 if (GCPhysLast >= GCPhys)
497 {
498 /*
499 * We require the range to be within registered ram.
500 * There is no apparent need to support ranges which cover more than one ram range.
501 */
502 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
503 while (pRam && GCPhys > pRam->GCPhysLast)
504 pRam = pRam->CTX_SUFF(pNext);
505 if ( pRam
506 && GCPhys <= pRam->GCPhysLast
507 && GCPhysLast >= pRam->GCPhys)
508 {
509 pCur->Core.Key = GCPhys;
510 pCur->Core.KeyLast = GCPhysLast;
511 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
512
513 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
514 {
515 /*
516 * Set ram flags, flush shadow PT entries and finally tell REM about this.
517 */
518 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
519 if (rc == VINF_PGM_GCPHYS_ALIASED)
520 {
521 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
522 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
523 }
524 pVM->pgm.s.fPhysCacheFlushPending = true;
525
526#ifndef IN_RING3
527 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
528 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
529#else
530 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
531 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
532#endif
533 HWACCMFlushTLB(pVM);
534 pgmUnlock(pVM);
535 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
536 GCPhysCurrent, GCPhys, GCPhysLast));
537 return VINF_SUCCESS;
538 }
539
540 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
541 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
542 }
543 else
544 {
545 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
546 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
547 }
548 }
549 else
550 {
551 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
552 rc = VERR_INVALID_PARAMETER;
553 }
554
555 /*
556 * Invalid new location, free it.
557 * We've only gotta notify REM and free the memory.
558 */
559 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
560 MMHyperFree(pVM, pCur);
561 }
562 else
563 {
564 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
565 rc = VERR_PGM_HANDLER_NOT_FOUND;
566 }
567
568 pgmUnlock(pVM);
569 return rc;
570}
571
572
573/**
574 * Changes the callbacks associated with a physical access handler.
575 *
576 * @returns VBox status code.
577 * @param pVM VM Handle.
578 * @param GCPhys Start physical address.
579 * @param pfnHandlerR3 The R3 handler.
580 * @param pvUserR3 User argument to the R3 handler.
581 * @param pfnHandlerR0 The R0 handler.
582 * @param pvUserR0 User argument to the R0 handler.
583 * @param pfnHandlerRC The RC handler.
584 * @param pvUserRC User argument to the RC handler. Values larger or
585 * equal to 0x10000 will be relocated automatically.
586 * @param pszDesc Pointer to description string. This must not be freed.
587 */
588VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
589 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
590 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
591 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
592 R3PTRTYPE(const char *) pszDesc)
593{
594 /*
595 * Get the handler.
596 */
597 int rc = VINF_SUCCESS;
598 pgmLock(pVM);
599 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
600 if (pCur)
601 {
602 /*
603 * Change callbacks.
604 */
605 pCur->pfnHandlerR3 = pfnHandlerR3;
606 pCur->pvUserR3 = pvUserR3;
607 pCur->pfnHandlerR0 = pfnHandlerR0;
608 pCur->pvUserR0 = pvUserR0;
609 pCur->pfnHandlerRC = pfnHandlerRC;
610 pCur->pvUserRC = pvUserRC;
611 pCur->pszDesc = pszDesc;
612 }
613 else
614 {
615 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
616 rc = VERR_PGM_HANDLER_NOT_FOUND;
617 }
618
619 pgmUnlock(pVM);
620 return rc;
621}
622
623
624/**
625 * Splits a physical access handler in two.
626 *
627 * @returns VBox status code.
628 * @param pVM VM Handle.
629 * @param GCPhys Start physical address of the handler.
630 * @param GCPhysSplit The split address.
631 */
632VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
633{
634 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
635
636 /*
637 * Do the allocation without owning the lock.
638 */
639 PPGMPHYSHANDLER pNew;
640 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
641 if (RT_FAILURE(rc))
642 return rc;
643
644 /*
645 * Get the handler.
646 */
647 pgmLock(pVM);
648 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
649 if (RT_LIKELY(pCur))
650 {
651 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
652 {
653 /*
654 * Create new handler node for the 2nd half.
655 */
656 *pNew = *pCur;
657 pNew->Core.Key = GCPhysSplit;
658 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
659
660 pCur->Core.KeyLast = GCPhysSplit - 1;
661 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
662
663 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
664 {
665 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
666 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
667 pgmUnlock(pVM);
668 return VINF_SUCCESS;
669 }
670 AssertMsgFailed(("whu?\n"));
671 rc = VERR_INTERNAL_ERROR;
672 }
673 else
674 {
675 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
676 rc = VERR_INVALID_PARAMETER;
677 }
678 }
679 else
680 {
681 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
682 rc = VERR_PGM_HANDLER_NOT_FOUND;
683 }
684 pgmUnlock(pVM);
685 MMHyperFree(pVM, pNew);
686 return rc;
687}
688
689
690/**
691 * Joins up two adjacent physical access handlers which has the same callbacks.
692 *
693 * @returns VBox status code.
694 * @param pVM VM Handle.
695 * @param GCPhys1 Start physical address of the first handler.
696 * @param GCPhys2 Start physical address of the second handler.
697 */
698VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
699{
700 /*
701 * Get the handlers.
702 */
703 int rc;
704 pgmLock(pVM);
705 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
706 if (RT_LIKELY(pCur1))
707 {
708 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
709 if (RT_LIKELY(pCur2))
710 {
711 /*
712 * Make sure that they are adjacent, and that they've got the same callbacks.
713 */
714 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
715 {
716 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC
717 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
718 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3))
719 {
720 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
721 if (RT_LIKELY(pCur3 == pCur2))
722 {
723 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
724 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
725 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
726 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
727 pgmUnlock(pVM);
728 MMHyperFree(pVM, pCur2);
729 return VINF_SUCCESS;
730 }
731
732 Assert(pCur3 == pCur2);
733 rc = VERR_INTERNAL_ERROR;
734 }
735 else
736 {
737 AssertMsgFailed(("mismatching handlers\n"));
738 rc = VERR_ACCESS_DENIED;
739 }
740 }
741 else
742 {
743 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
744 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
745 rc = VERR_INVALID_PARAMETER;
746 }
747 }
748 else
749 {
750 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
751 rc = VERR_PGM_HANDLER_NOT_FOUND;
752 }
753 }
754 else
755 {
756 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
757 rc = VERR_PGM_HANDLER_NOT_FOUND;
758 }
759 pgmUnlock(pVM);
760 return rc;
761
762}
763
764
765/**
766 * Resets any modifications to individual pages in a physical
767 * page access handler region.
768 *
769 * This is used in pair with PGMHandlerPhysicalPageTempOff().
770 *
771 * @returns VBox status code.
772 * @param pVM VM Handle
773 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
774 */
775VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
776{
777 pgmLock(pVM);
778
779 /*
780 * Find the handler.
781 */
782 int rc;
783 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
784 if (RT_LIKELY(pCur))
785 {
786 /*
787 * Validate type.
788 */
789 switch (pCur->enmType)
790 {
791 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
792 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
793 case PGMPHYSHANDLERTYPE_MMIO: /* @note Only use when clearing aliased mmio ranges! */
794 {
795 /*
796 * Set the flags and flush shadow PT entries.
797 */
798 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset));
799 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
800 Assert(pRam);
801 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
802 if (rc == VINF_PGM_GCPHYS_ALIASED)
803 {
804 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
805 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
806 }
807 pVM->pgm.s.fPhysCacheFlushPending = true;
808 HWACCMFlushTLB(pVM);
809
810 rc = VINF_SUCCESS;
811 break;
812 }
813
814 /*
815 * Invalid.
816 */
817 default:
818 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
819 rc = VERR_INTERNAL_ERROR;
820 break;
821 }
822 }
823 else
824 {
825 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
826 rc = VERR_PGM_HANDLER_NOT_FOUND;
827 }
828
829 pgmUnlock(pVM);
830 return rc;
831}
832
833
834/**
835 * Temporarily turns off the access monitoring of a page within a monitored
836 * physical write/all page access handler region.
837 *
838 * Use this when no further \#PFs are required for that page. Be aware that
839 * a page directory sync might reset the flags, and turn on access monitoring
840 * for the page.
841 *
842 * The caller must do required page table modifications.
843 *
844 * @returns VBox status code.
845 * @param pVM VM Handle
846 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
847 * This must be a fully page aligned range or we risk messing up other
848 * handlers installed for the start and end pages.
849 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
850 */
851VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
852{
853 /*
854 * Validate the range.
855 */
856 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
857 if (RT_LIKELY(pCur))
858 {
859 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
860 && GCPhysPage <= pCur->Core.KeyLast))
861 {
862 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
863 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
864
865 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
866 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
867 VERR_ACCESS_DENIED);
868
869 /*
870 * Change the page status.
871 */
872 PPGMPAGE pPage;
873 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
874 AssertRCReturn(rc, rc);
875 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
876#ifndef IN_RC
877 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
878#endif
879 return VINF_SUCCESS;
880 }
881
882 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
883 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
884 return VERR_INVALID_PARAMETER;
885 }
886
887 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
888 return VERR_PGM_HANDLER_NOT_FOUND;
889}
890
891
892/**
893 * Temporarily turns off the access monitoring of a page within an MMIO
894 * access handler region and remaps it to another guest physical region.
895 *
896 * Use this when no further \#PFs are required for that page. Be aware that
897 * a page directory sync might reset the flags, and turn on access monitoring
898 * for the page.
899 *
900 * The caller must do required page table modifications.
901 *
902 * @returns VBox status code.
903 * @param pVM VM Handle
904 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
905 * This must be a fully page aligned range or we risk messing up other
906 * handlers installed for the start and end pages.
907 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
908 * @param GCPhysPageRemap Physical address of the page that serves as backing memory.
909 */
910VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
911{
912 /*
913 * Validate the range.
914 */
915 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
916 if (RT_LIKELY(pCur))
917 {
918 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
919 && GCPhysPage <= pCur->Core.KeyLast))
920 {
921 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
922 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
923
924 AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED);
925 /** @todo r=bird: This totally breaks the new PGMPAGE management. Will probably
926 * have to require that the current page is the zero page... Require
927 * GCPhysPageRemap to be a MMIO2 page might help matters because those
928 * pages aren't managed dynamically (at least not yet).
929 * VBOX_WITH_NEW_PHYS_CODE TODO!
930 *
931 * A solution to this would be to temporarily change the page into a MMIO2 one
932 * and record that we've changed it. Only the physical page address would
933 * need to be copied over. The aliased page would have to be MMIO2 ofc, since
934 * RAM or ROM pages would require write sharing which is something we don't
935 * intend to implement just yet...
936 */
937
938 /*
939 * Note! This trick does only work reliably if the two pages are never ever
940 * mapped in the same page table. If they are the page pool code will
941 * be confused should either of them be flushed. See the special case
942 * of zero page aliasing mentioned in #3170.
943 */
944
945 PPGMPAGE pPageRemap;
946 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap);
947 AssertRCReturn(rc, rc);
948
949 /*
950 * Change the page status.
951 */
952 PPGMPAGE pPage;
953 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
954 AssertRCReturn(rc, rc);
955
956 /* Do the actual remapping here. This page now serves as an alias for the backing memory specified. */
957#ifdef VBOX_WITH_NEW_PHYS_CODE
958 AssertReleaseFailed(); /** @todo see todo above! */
959#else
960 pPage->HCPhys = pPageRemap->HCPhys;
961 PGM_PAGE_SET_TRACKING(pPage, 0);
962#endif
963
964 LogFlow(("PGMHandlerPhysicalPageAlias %RGp alias for %RGp (%R[pgmpage]) -> %R[pgmpage]\n",
965 GCPhysPage, GCPhysPageRemap, pPageRemap, pPage));
966 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
967#ifndef IN_RC
968 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
969#endif
970 return VINF_SUCCESS;
971 }
972
973 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
974 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
975 return VERR_INVALID_PARAMETER;
976 }
977
978 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
979 return VERR_PGM_HANDLER_NOT_FOUND;
980}
981
982
983/**
984 * Turns access monitoring of a page within a monitored
985 * physical write/all page access handler regio back on.
986 *
987 * The caller must do required page table modifications.
988 *
989 * @returns VBox status code.
990 * @param pVM VM Handle
991 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
992 * This must be a fully page aligned range or we risk messing up other
993 * handlers installed for the start and end pages.
994 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
995 */
996VMMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
997{
998 /*
999 * Validate the range.
1000 */
1001 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1002 if (RT_LIKELY(pCur))
1003 {
1004 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1005 && GCPhysPage <= pCur->Core.KeyLast))
1006 {
1007 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
1008 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1009
1010 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1011 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1012 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO,
1013 VERR_ACCESS_DENIED);
1014
1015 /*
1016 * Change the page status.
1017 */
1018 PPGMPAGE pPage;
1019 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1020 AssertRCReturn(rc, rc);
1021 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, pgmHandlerPhysicalCalcState(pCur));
1022
1023#ifndef IN_RC
1024 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
1025#endif
1026 return VINF_SUCCESS;
1027 }
1028
1029 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1030 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1031 return VERR_INVALID_PARAMETER;
1032 }
1033
1034 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1035 return VERR_PGM_HANDLER_NOT_FOUND;
1036}
1037
1038
1039/**
1040 * Checks if a physical range is handled
1041 *
1042 * @returns boolean
1043 * @param pVM VM Handle.
1044 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1045 * @remarks Caller must take the PGM lock...
1046 * @threads EMT.
1047 */
1048VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
1049{
1050 /*
1051 * Find the handler.
1052 */
1053 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1054 if (pCur)
1055 {
1056 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1057 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1058 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1059 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
1060 return true;
1061 }
1062
1063 return false;
1064}
1065
1066
1067/**
1068 * Checks if it's an disabled all access handler or write access handler at the
1069 * given address.
1070 *
1071 * @returns true if it's an all access handler, false if it's a write access
1072 * handler.
1073 * @param pVM Pointer to the shared VM structure.
1074 * @param GCPhys The address of the page with a disabled handler.
1075 *
1076 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1077 */
1078bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1079{
1080 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1081 AssertReturn(pCur, true);
1082 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1083 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1084 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO); /* sanity */
1085 /* Only whole pages can be disabled. */
1086 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
1087 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
1088 return pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE;
1089}
1090
1091
1092/**
1093 * Check if particular guest's VA is being monitored.
1094 *
1095 * @returns true or false
1096 * @param pVM VM handle.
1097 * @param GCPtr Virtual address.
1098 * @remarks Will acquire the PGM lock.
1099 * @threads Any.
1100 */
1101VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
1102{
1103 pgmLock(pVM);
1104 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1105 pgmUnlock(pVM);
1106
1107 return pCur != NULL;
1108}
1109
1110
1111/**
1112 * Search for virtual handler with matching physical address
1113 *
1114 * @returns VBox status code
1115 * @param pVM The VM handle.
1116 * @param GCPhys GC physical address to search for.
1117 * @param ppVirt Where to store the pointer to the virtual handler structure.
1118 * @param piPage Where to store the pointer to the index of the cached physical page.
1119 */
1120int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1121{
1122 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1123 Assert(ppVirt);
1124
1125 PPGMPHYS2VIRTHANDLER pCur;
1126 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
1127 if (pCur)
1128 {
1129 /* found a match! */
1130#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1131 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1132#endif
1133 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1134 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1135
1136 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));
1137 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1138 return VINF_SUCCESS;
1139 }
1140
1141 *ppVirt = NULL;
1142 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1143 return VERR_PGM_HANDLER_NOT_FOUND;
1144}
1145
1146
1147/**
1148 * Deal with aliases in phys2virt.
1149 *
1150 * As pointed out by the various todos, this currently only deals with
1151 * aliases where the two ranges match 100%.
1152 *
1153 * @param pVM The VM handle.
1154 * @param pPhys2Virt The node we failed insert.
1155 */
1156static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1157{
1158 /*
1159 * First find the node which is conflicting with us.
1160 */
1161 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1162 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1163 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1164 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1165#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1166 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
1167 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1168#endif
1169 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1170 {
1171 /** @todo do something clever here... */
1172 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1173 pPhys2Virt->offNextAlias = 0;
1174 return;
1175 }
1176
1177 /*
1178 * Insert ourselves as the next node.
1179 */
1180 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1181 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1182 else
1183 {
1184 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1185 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1186 | PGMPHYS2VIRTHANDLER_IN_TREE;
1187 }
1188 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1189 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1190 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1191}
1192
1193
1194/**
1195 * Resets one virtual handler range.
1196 *
1197 * This is called by HandlerVirtualUpdate when it has detected some kind of
1198 * problem and have started clearing the virtual handler page states (or
1199 * when there have been registration/deregistrations). For this reason this
1200 * function will only update the page status if it's lower than desired.
1201 *
1202 * @returns 0
1203 * @param pNode Pointer to a PGMVIRTHANDLER.
1204 * @param pvUser The VM handle.
1205 */
1206DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1207{
1208 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1209 PVM pVM = (PVM)pvUser;
1210
1211 /*
1212 * Iterate the pages and apply the new state.
1213 */
1214 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1215 PPGMRAMRANGE pRamHint = NULL;
1216 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
1217 RTGCUINTPTR cbLeft = pCur->cb;
1218 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1219 {
1220 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1221 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1222 {
1223 /*
1224 * Update the page state wrt virtual handlers.
1225 */
1226 PPGMPAGE pPage;
1227 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1228 if ( RT_SUCCESS(rc)
1229 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1230 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1231 else
1232 AssertRC(rc);
1233
1234 /*
1235 * Need to insert the page in the Phys2Virt lookup tree?
1236 */
1237 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1238 {
1239#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1240 AssertRelease(!pPhys2Virt->offNextAlias);
1241#endif
1242 unsigned cbPhys = cbLeft;
1243 if (cbPhys > PAGE_SIZE - offPage)
1244 cbPhys = PAGE_SIZE - offPage;
1245 else
1246 Assert(iPage == pCur->cPages - 1);
1247 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1248 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1249 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1250 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1251#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1252 else
1253 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1254 ("%RGp-%RGp offNextAlias=%#RX32\n",
1255 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1256#endif
1257 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
1258 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1259 }
1260 }
1261 cbLeft -= PAGE_SIZE - offPage;
1262 offPage = 0;
1263 }
1264
1265 return 0;
1266}
1267
1268#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1269
1270/**
1271 * Worker for pgmHandlerVirtualDumpPhysPages.
1272 *
1273 * @returns 0 (continue enumeration).
1274 * @param pNode The virtual handler node.
1275 * @param pvUser User argument, unused.
1276 */
1277static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1278{
1279 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1280 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1281 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1282 return 0;
1283}
1284
1285
1286/**
1287 * Assertion / logging helper for dumping all the
1288 * virtual handlers to the log.
1289 *
1290 * @param pVM Pointer to the shared VM structure.
1291 */
1292void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1293{
1294 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
1295 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1296}
1297
1298#endif /* VBOX_STRICT || LOG_ENABLED */
1299#ifdef VBOX_STRICT
1300
1301/**
1302 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1303 * and its AVL enumerators.
1304 */
1305typedef struct PGMAHAFIS
1306{
1307 /** The current physical address. */
1308 RTGCPHYS GCPhys;
1309 /** The state we've calculated. */
1310 unsigned uVirtStateFound;
1311 /** The state we're matching up to. */
1312 unsigned uVirtState;
1313 /** Number of errors. */
1314 unsigned cErrors;
1315 /** The VM handle. */
1316 PVM pVM;
1317} PGMAHAFIS, *PPGMAHAFIS;
1318
1319
1320#if 0 /* unused */
1321/**
1322 * Verify virtual handler by matching physical address.
1323 *
1324 * @returns 0
1325 * @param pNode Pointer to a PGMVIRTHANDLER.
1326 * @param pvUser Pointer to user parameter.
1327 */
1328static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1329{
1330 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1331 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1332
1333 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1334 {
1335 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1336 {
1337 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1338 if (pState->uVirtState < uState)
1339 {
1340 error
1341 }
1342
1343 if (pState->uVirtState == uState)
1344 break; //??
1345 }
1346 }
1347 return 0;
1348}
1349#endif /* unused */
1350
1351
1352/**
1353 * Verify a virtual handler (enumeration callback).
1354 *
1355 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1356 * the virtual handlers, esp. that the physical addresses matches up.
1357 *
1358 * @returns 0
1359 * @param pNode Pointer to a PGMVIRTHANDLER.
1360 * @param pvUser Pointer to a PPGMAHAFIS structure.
1361 */
1362static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1363{
1364 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1365 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1366 PVM pVM = pState->pVM;
1367
1368 /*
1369 * Validate the type and calc state.
1370 */
1371 switch (pVirt->enmType)
1372 {
1373 case PGMVIRTHANDLERTYPE_WRITE:
1374 case PGMVIRTHANDLERTYPE_ALL:
1375 break;
1376 default:
1377 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1378 pState->cErrors++;
1379 return 0;
1380 }
1381 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1382
1383 /*
1384 * Check key alignment.
1385 */
1386 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
1387 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1388 {
1389 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1390 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
1391 pState->cErrors++;
1392 }
1393
1394 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
1395 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1396 {
1397 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1398 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
1399 pState->cErrors++;
1400 }
1401
1402 /*
1403 * Check pages for sanity and state.
1404 */
1405 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
1406 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1407 {
1408 RTGCPHYS GCPhysGst;
1409 uint64_t fGst;
1410 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1411 if ( rc == VERR_PAGE_NOT_PRESENT
1412 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1413 {
1414 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1415 {
1416 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
1417 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1418 pState->cErrors++;
1419 }
1420 continue;
1421 }
1422
1423 AssertRCReturn(rc, 0);
1424 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1425 {
1426 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1427 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1428 pState->cErrors++;
1429 continue;
1430 }
1431
1432 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1433 if (!pPage)
1434 {
1435 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1436 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1437 pState->cErrors++;
1438 continue;
1439 }
1440
1441 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1442 {
1443 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
1444 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
1445 pState->cErrors++;
1446 continue;
1447 }
1448 } /* for pages in virtual mapping. */
1449
1450 return 0;
1451}
1452
1453
1454/**
1455 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1456 * that the physical addresses associated with virtual handlers are correct.
1457 *
1458 * @returns Number of mismatches.
1459 * @param pVM The VM handle.
1460 */
1461VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1462{
1463 PPGM pPGM = &pVM->pgm.s;
1464 PGMAHAFIS State;
1465 State.GCPhys = 0;
1466 State.uVirtState = 0;
1467 State.uVirtStateFound = 0;
1468 State.cErrors = 0;
1469 State.pVM = pVM;
1470
1471 /*
1472 * Check the RAM flags against the handlers.
1473 */
1474 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); pRam; pRam = pRam->CTX_SUFF(pNext))
1475 {
1476 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1477 for (unsigned iPage = 0; iPage < cPages; iPage++)
1478 {
1479 PGMPAGE const *pPage = &pRam->aPages[iPage];
1480 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1481 {
1482 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1483
1484 /*
1485 * Physical first - calculate the state based on the handlers
1486 * active on the page, then compare.
1487 */
1488 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1489 {
1490 /* the first */
1491 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1492 if (!pPhys)
1493 {
1494 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1495 if ( pPhys
1496 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1497 pPhys = NULL;
1498 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1499 }
1500 if (pPhys)
1501 {
1502 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1503
1504 /* more? */
1505 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1506 {
1507 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1508 pPhys->Core.KeyLast + 1, true);
1509 if ( !pPhys2
1510 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1511 break;
1512 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1513 uState = RT_MAX(uState, uState2);
1514 pPhys = pPhys2;
1515 }
1516
1517 /* compare.*/
1518 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1519 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1520 {
1521 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1522 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1523 State.cErrors++;
1524 }
1525
1526#ifdef IN_RING3
1527 /* validate that REM is handling it. */
1528 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1529 /* ignore shadowed ROM for the time being. */
1530# ifdef VBOX_WITH_NEW_PHYS_CODE
1531 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW
1532# else
1533 && (pPage->HCPhys & (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)) != (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)
1534# endif
1535 )
1536 {
1537 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1538 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1539 State.cErrors++;
1540 }
1541#endif
1542 }
1543 else
1544 {
1545 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1546 State.cErrors++;
1547 }
1548 }
1549
1550 /*
1551 * Virtual handlers.
1552 */
1553 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1554 {
1555 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1556#if 1
1557 /* locate all the matching physical ranges. */
1558 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1559 RTGCPHYS GCPhysKey = State.GCPhys;
1560 for (;;)
1561 {
1562 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1563 GCPhysKey, true /* above-or-equal */);
1564 if ( !pPhys2Virt
1565 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1566 break;
1567
1568 /* the head */
1569 GCPhysKey = pPhys2Virt->Core.KeyLast;
1570 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1571 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1572 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1573
1574 /* any aliases */
1575 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1576 {
1577 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1578 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1579 uState = pgmHandlerVirtualCalcState(pCur);
1580 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1581 }
1582
1583 /* done? */
1584 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1585 break;
1586 }
1587#else
1588 /* very slow */
1589 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1590#endif
1591 if (State.uVirtState != State.uVirtStateFound)
1592 {
1593 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1594 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1595 State.cErrors++;
1596 }
1597 }
1598 }
1599 } /* foreach page in ram range. */
1600 } /* foreach ram range. */
1601
1602 /*
1603 * Check that the physical addresses of the virtual handlers matches up
1604 * and that they are otherwise sane.
1605 */
1606 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1607
1608 /*
1609 * Do the reverse check for physical handlers.
1610 */
1611 /** @todo */
1612
1613 return State.cErrors;
1614}
1615
1616#endif /* VBOX_STRICT */
1617
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette