VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 18291

Last change on this file since 18291 was 18266, checked in by vboxsync, 16 years ago

PGM: Made PGMR3PhysRomProtect use instead of doing a full pool flush for each call (expensive using reset), this fixes a reset assertion in pgmPoolMonitorFlush. Changed pgmPoolTrackFlushGCPhys to return VINF_PGM_SYNC_CR3 and set VM_FF_PGM_SYNCR3 and PGM_SYNC_CLEAR_PGM_POOL instead of leave this to the caller.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 65.3 KB
Line 
1/* $Id: PGMAllHandler.cpp 18266 2009-03-25 17:25:53Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PGM
27#include <VBox/dbgf.h>
28#include <VBox/pgm.h>
29#include <VBox/iom.h>
30#include <VBox/mm.h>
31#include <VBox/em.h>
32#include <VBox/stam.h>
33#include <VBox/rem.h>
34#include <VBox/dbgf.h>
35#include <VBox/rem.h>
36#include "PGMInternal.h"
37#include <VBox/vm.h>
38
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <VBox/selm.h>
46
47
48/*******************************************************************************
49* Internal Functions *
50*******************************************************************************/
51static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
52static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
53static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
54
55
56
57/**
58 * Register a access handler for a physical range.
59 *
60 * @returns VBox status code.
61 * @retval VINF_SUCCESS when successfully installed.
62 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
63 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
64 * flagged together with a pool clearing.
65 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
66 * one. A debug assertion is raised.
67 *
68 * @param pVM VM Handle.
69 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
70 * @param GCPhys Start physical address.
71 * @param GCPhysLast Last physical address. (inclusive)
72 * @param pfnHandlerR3 The R3 handler.
73 * @param pvUserR3 User argument to the R3 handler.
74 * @param pfnHandlerR0 The R0 handler.
75 * @param pvUserR0 User argument to the R0 handler.
76 * @param pfnHandlerRC The RC handler.
77 * @param pvUserRC User argument to the RC handler. This can be a value
78 * less that 0x10000 or a (non-null) pointer that is
79 * automatically relocatated.
80 * @param pszDesc Pointer to description string. This must not be freed.
81 */
82VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
83 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
84 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
85 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
86 R3PTRTYPE(const char *) pszDesc)
87{
88 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserR3=%RHv pfnHandlerR0=%RHv pvUserR0=%RHv pfnHandlerGC=%RRv pvUserGC=%RRv pszDesc=%s\n",
89 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, R3STRING(pszDesc)));
90
91 /*
92 * Validate input.
93 */
94 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
95 switch (enmType)
96 {
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 break;
99 case PGMPHYSHANDLERTYPE_MMIO:
100 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
101 /* Simplification in PGMPhysRead among other places. */
102 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
103 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
104 break;
105 default:
106 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
107 return VERR_INVALID_PARAMETER;
108 }
109 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
110 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
111 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
112 VERR_INVALID_PARAMETER);
113 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
114 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
115 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
116 VERR_INVALID_PARAMETER);
117#ifdef VBOX_WITH_NEW_PHYS_CODE
118 AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
119 AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER);
120 AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER);
121#else
122 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerRC, VERR_INVALID_PARAMETER);
123#endif
124
125 /*
126 * We require the range to be within registered ram.
127 * There is no apparent need to support ranges which cover more than one ram range.
128 */
129 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
130 while (pRam && GCPhys > pRam->GCPhysLast)
131 pRam = pRam->CTX_SUFF(pNext);
132 if ( !pRam
133 || GCPhysLast < pRam->GCPhys
134 || GCPhys > pRam->GCPhysLast)
135 {
136#ifdef IN_RING3
137 DBGFR3Info(pVM, "phys", NULL, NULL);
138#endif
139 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
140 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
141 }
142
143 /*
144 * Allocate and initialize the new entry.
145 */
146 PPGMPHYSHANDLER pNew;
147 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
148 if (RT_FAILURE(rc))
149 return rc;
150
151 pNew->Core.Key = GCPhys;
152 pNew->Core.KeyLast = GCPhysLast;
153 pNew->enmType = enmType;
154 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
155 pNew->pfnHandlerR3 = pfnHandlerR3;
156 pNew->pvUserR3 = pvUserR3;
157 pNew->pfnHandlerR0 = pfnHandlerR0;
158 pNew->pvUserR0 = pvUserR0;
159 pNew->pfnHandlerRC = pfnHandlerRC;
160 pNew->pvUserRC = pvUserRC;
161 pNew->pszDesc = pszDesc;
162
163 pgmLock(pVM);
164
165 /*
166 * Try insert into list.
167 */
168 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
169 {
170 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
171 if (rc == VINF_PGM_SYNC_CR3)
172 rc = VINF_PGM_GCPHYS_ALIASED;
173 pVM->pgm.s.fPhysCacheFlushPending = true;
174 HWACCMFlushTLB(pVM);
175#ifndef IN_RING3
176 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
177#else
178 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
179#endif
180 pgmUnlock(pVM);
181 if (rc != VINF_SUCCESS)
182 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
183 return rc;
184 }
185
186 pgmUnlock(pVM);
187
188#if defined(IN_RING3) && defined(VBOX_STRICT)
189 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
190#endif
191 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
192 MMHyperFree(pVM, pNew);
193 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
194}
195
196
197/**
198 * Sets ram range flags and attempts updating shadow PTs.
199 *
200 * @returns VBox status code.
201 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
202 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
203 * the guest page aliased or/and mapped by multiple PTs. FFs set.
204 * @param pVM The VM handle.
205 * @param pCur The physical handler.
206 * @param pRam The RAM range.
207 */
208static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
209{
210 /*
211 * Iterate the guest ram pages updating the flags and flushing PT entries
212 * mapping the page.
213 */
214 bool fFlushTLBs = false;
215 int rc = VINF_SUCCESS;
216 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
217 uint32_t cPages = pCur->cPages;
218 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
219 for (;;)
220 {
221#ifndef VBOX_WITH_NEW_PHYS_CODE
222 /* Physical chunk in dynamically allocated range not present? */
223 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
224 {
225 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
226# ifdef IN_RING3
227 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
228# else
229 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
230# endif
231 if (rc2 != VINF_SUCCESS)
232 return rc2;
233 }
234
235#endif /* !VBOX_WITH_NEW_PHYS_CODE */
236 PPGMPAGE pPage = &pRam->aPages[i];
237#ifdef VBOX_WITH_NEW_PHYS_CODE
238 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage),
239 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
240#endif
241
242 /* Only do upgrades. */
243 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
244 {
245 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
246#ifndef VBOX_WITH_NEW_PHYS_CODE
247 Assert(PGM_PAGE_GET_HCPHYS(pPage));
248#endif
249
250 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
251 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
252 rc = rc2;
253 }
254
255 /* next */
256 if (--cPages == 0)
257 break;
258 i++;
259 }
260
261 if (fFlushTLBs && rc == VINF_SUCCESS)
262 {
263 PGM_INVL_GUEST_TLBS();
264 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
265 }
266 else
267 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc\n", rc));
268 return rc;
269}
270
271
272/**
273 * Register a physical page access handler.
274 *
275 * @returns VBox status code.
276 * @param pVM VM Handle.
277 * @param GCPhys Start physical address.
278 */
279VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
280{
281 /*
282 * Find the handler.
283 */
284 pgmLock(pVM);
285 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
286 if (pCur)
287 {
288 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
289 pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
290
291 /*
292 * Clear the page bits and notify the REM about this change.
293 */
294 HWACCMFlushTLB(pVM);
295 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
296 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
297 pgmUnlock(pVM);
298 MMHyperFree(pVM, pCur);
299 return VINF_SUCCESS;
300 }
301 pgmUnlock(pVM);
302
303 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
304 return VERR_PGM_HANDLER_NOT_FOUND;
305}
306
307
308/**
309 * Shared code with modify.
310 */
311static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
312{
313 RTGCPHYS GCPhysStart = pCur->Core.Key;
314 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
315
316 /*
317 * Page align the range.
318 *
319 * Since we've reset (recalculated) the physical handler state of all pages
320 * we can make use of the page states to figure out whether a page should be
321 * included in the REM notification or not.
322 */
323 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
324 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
325 {
326 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
327
328 if (GCPhysStart & PAGE_OFFSET_MASK)
329 {
330 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
331 if ( pPage
332 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
333 {
334 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
335 if ( GCPhys > GCPhysLast
336 || GCPhys < GCPhysStart)
337 return;
338 GCPhysStart = GCPhys;
339 }
340 else
341 GCPhysStart &= X86_PTE_PAE_PG_MASK;
342 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
343 }
344
345 if (GCPhysLast & PAGE_OFFSET_MASK)
346 {
347 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
348 if ( pPage
349 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
350 {
351 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
352 if ( GCPhys < GCPhysStart
353 || GCPhys > GCPhysLast)
354 return;
355 GCPhysLast = GCPhys;
356 }
357 else
358 GCPhysLast |= PAGE_OFFSET_MASK;
359 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
360 }
361 }
362
363 /*
364 * Tell REM.
365 */
366 const bool fRestoreAsRAM = pCur->pfnHandlerR3
367 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
368#ifndef IN_RING3
369 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
370#else
371 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
372#endif
373}
374
375
376/**
377 * pgmHandlerPhysicalResetRamFlags helper that checks for
378 * other handlers on edge pages.
379 */
380DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
381{
382 /*
383 * Look for other handlers.
384 */
385 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
386 for (;;)
387 {
388 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
389 if ( !pCur
390 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
391 break;
392 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
393 uState = RT_MAX(uState, uThisState);
394
395 /* next? */
396 RTGCPHYS GCPhysNext = fAbove
397 ? pCur->Core.KeyLast + 1
398 : pCur->Core.Key - 1;
399 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
400 break;
401 GCPhys = GCPhysNext;
402 }
403
404 /*
405 * Update if we found something that is a higher priority
406 * state than the current.
407 */
408 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
409 {
410 PPGMPAGE pPage;
411 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
412 if ( RT_SUCCESS(rc)
413 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
414 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
415 else
416 AssertRC(rc);
417 }
418}
419
420
421#ifdef VBOX_WITH_NEW_PHYS_CODE
422/**
423 * Resets an aliased page.
424 *
425 * @param pVM The VM.
426 * @param pPage The page.
427 * @param GCPhysPage The page address in case it comes in handy.
428 */
429void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
430{
431 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO);
432 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
433
434 /*
435 * Flush any shadow page table references *first*.
436 */
437 bool fFlushTLBs = false;
438 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
439 AssertLogRelRCReturnVoid(rc);
440# ifdef IN_RC
441 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)
442 PGM_INVL_GUEST_TLBS();
443# else
444 HWACCMFlushTLB(pVM);
445# endif
446 pVM->pgm.s.fPhysCacheFlushPending = true;
447
448 /*
449 * Make it an MMIO/Zero page.
450 */
451 PGM_PAGE_SET_HCPHYS(pPage, pVM->pgm.s.HCPhysZeroPg);
452 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO);
453 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
454 PGM_PAGE_SET_PAGEID(pPage, NIL_GMM_PAGEID);
455 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
456
457 NOREF(GCPhysPage);
458}
459#endif
460
461
462/**
463 * Resets ram range flags.
464 *
465 * @returns VBox status code.
466 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
467 * @param pVM The VM handle.
468 * @param pCur The physical handler.
469 *
470 * @remark We don't start messing with the shadow page tables, as we've already got code
471 * in Trap0e which deals with out of sync handler flags (originally conceived for
472 * global pages).
473 */
474static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
475{
476 /*
477 * Iterate the guest ram pages updating the state.
478 */
479 RTUINT cPages = pCur->cPages;
480 RTGCPHYS GCPhys = pCur->Core.Key;
481 PPGMRAMRANGE pRamHint = NULL;
482 PPGM pPGM = &pVM->pgm.s;
483 for (;;)
484 {
485 PPGMPAGE pPage;
486 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
487 if (RT_SUCCESS(rc))
488 {
489#ifdef VBOX_WITH_NEW_PHYS_CODE
490 /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business.
491 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
492 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
493 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys);
494 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
495#endif
496 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
497 }
498 else
499 AssertRC(rc);
500
501 /* next */
502 if (--cPages == 0)
503 break;
504 GCPhys += PAGE_SIZE;
505 }
506
507 /*
508 * Check for partial start and end pages.
509 */
510 if (pCur->Core.Key & PAGE_OFFSET_MASK)
511 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
512 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
513 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
514}
515
516
517/**
518 * Modify a physical page access handler.
519 *
520 * Modification can only be done to the range it self, not the type or anything else.
521 *
522 * @returns VBox status code.
523 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
524 * and a new registration must be performed!
525 * @param pVM VM handle.
526 * @param GCPhysCurrent Current location.
527 * @param GCPhys New location.
528 * @param GCPhysLast New last location.
529 */
530VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
531{
532 /*
533 * Remove it.
534 */
535 int rc;
536 pgmLock(pVM);
537 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
538 if (pCur)
539 {
540 /*
541 * Clear the ram flags. (We're gonna move or free it!)
542 */
543 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
544 const bool fRestoreAsRAM = pCur->pfnHandlerR3
545 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
546
547 /*
548 * Validate the new range, modify and reinsert.
549 */
550 if (GCPhysLast >= GCPhys)
551 {
552 /*
553 * We require the range to be within registered ram.
554 * There is no apparent need to support ranges which cover more than one ram range.
555 */
556 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
557 while (pRam && GCPhys > pRam->GCPhysLast)
558 pRam = pRam->CTX_SUFF(pNext);
559 if ( pRam
560 && GCPhys <= pRam->GCPhysLast
561 && GCPhysLast >= pRam->GCPhys)
562 {
563 pCur->Core.Key = GCPhys;
564 pCur->Core.KeyLast = GCPhysLast;
565 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
566
567 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
568 {
569 /*
570 * Set ram flags, flush shadow PT entries and finally tell REM about this.
571 */
572 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
573 pVM->pgm.s.fPhysCacheFlushPending = true;
574
575#ifndef IN_RING3
576 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
577 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
578#else
579 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
580 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
581#endif
582 HWACCMFlushTLB(pVM);
583 pgmUnlock(pVM);
584 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
585 GCPhysCurrent, GCPhys, GCPhysLast));
586 return VINF_SUCCESS;
587 }
588
589 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
590 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
591 }
592 else
593 {
594 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
595 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
596 }
597 }
598 else
599 {
600 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
601 rc = VERR_INVALID_PARAMETER;
602 }
603
604 /*
605 * Invalid new location, free it.
606 * We've only gotta notify REM and free the memory.
607 */
608 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
609 MMHyperFree(pVM, pCur);
610 }
611 else
612 {
613 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
614 rc = VERR_PGM_HANDLER_NOT_FOUND;
615 }
616
617 pgmUnlock(pVM);
618 return rc;
619}
620
621
622/**
623 * Changes the callbacks associated with a physical access handler.
624 *
625 * @returns VBox status code.
626 * @param pVM VM Handle.
627 * @param GCPhys Start physical address.
628 * @param pfnHandlerR3 The R3 handler.
629 * @param pvUserR3 User argument to the R3 handler.
630 * @param pfnHandlerR0 The R0 handler.
631 * @param pvUserR0 User argument to the R0 handler.
632 * @param pfnHandlerRC The RC handler.
633 * @param pvUserRC User argument to the RC handler. Values larger or
634 * equal to 0x10000 will be relocated automatically.
635 * @param pszDesc Pointer to description string. This must not be freed.
636 */
637VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
638 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
639 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
640 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
641 R3PTRTYPE(const char *) pszDesc)
642{
643 /*
644 * Get the handler.
645 */
646 int rc = VINF_SUCCESS;
647 pgmLock(pVM);
648 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
649 if (pCur)
650 {
651 /*
652 * Change callbacks.
653 */
654 pCur->pfnHandlerR3 = pfnHandlerR3;
655 pCur->pvUserR3 = pvUserR3;
656 pCur->pfnHandlerR0 = pfnHandlerR0;
657 pCur->pvUserR0 = pvUserR0;
658 pCur->pfnHandlerRC = pfnHandlerRC;
659 pCur->pvUserRC = pvUserRC;
660 pCur->pszDesc = pszDesc;
661 }
662 else
663 {
664 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
665 rc = VERR_PGM_HANDLER_NOT_FOUND;
666 }
667
668 pgmUnlock(pVM);
669 return rc;
670}
671
672
673/**
674 * Splits a physical access handler in two.
675 *
676 * @returns VBox status code.
677 * @param pVM VM Handle.
678 * @param GCPhys Start physical address of the handler.
679 * @param GCPhysSplit The split address.
680 */
681VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
682{
683 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
684
685 /*
686 * Do the allocation without owning the lock.
687 */
688 PPGMPHYSHANDLER pNew;
689 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
690 if (RT_FAILURE(rc))
691 return rc;
692
693 /*
694 * Get the handler.
695 */
696 pgmLock(pVM);
697 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
698 if (RT_LIKELY(pCur))
699 {
700 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
701 {
702 /*
703 * Create new handler node for the 2nd half.
704 */
705 *pNew = *pCur;
706 pNew->Core.Key = GCPhysSplit;
707 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
708
709 pCur->Core.KeyLast = GCPhysSplit - 1;
710 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
711
712 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
713 {
714 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
715 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
716 pgmUnlock(pVM);
717 return VINF_SUCCESS;
718 }
719 AssertMsgFailed(("whu?\n"));
720 rc = VERR_INTERNAL_ERROR;
721 }
722 else
723 {
724 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
725 rc = VERR_INVALID_PARAMETER;
726 }
727 }
728 else
729 {
730 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
731 rc = VERR_PGM_HANDLER_NOT_FOUND;
732 }
733 pgmUnlock(pVM);
734 MMHyperFree(pVM, pNew);
735 return rc;
736}
737
738
739/**
740 * Joins up two adjacent physical access handlers which has the same callbacks.
741 *
742 * @returns VBox status code.
743 * @param pVM VM Handle.
744 * @param GCPhys1 Start physical address of the first handler.
745 * @param GCPhys2 Start physical address of the second handler.
746 */
747VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
748{
749 /*
750 * Get the handlers.
751 */
752 int rc;
753 pgmLock(pVM);
754 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
755 if (RT_LIKELY(pCur1))
756 {
757 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
758 if (RT_LIKELY(pCur2))
759 {
760 /*
761 * Make sure that they are adjacent, and that they've got the same callbacks.
762 */
763 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
764 {
765 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC
766 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
767 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3))
768 {
769 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
770 if (RT_LIKELY(pCur3 == pCur2))
771 {
772 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
773 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
774 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
775 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
776 pgmUnlock(pVM);
777 MMHyperFree(pVM, pCur2);
778 return VINF_SUCCESS;
779 }
780
781 Assert(pCur3 == pCur2);
782 rc = VERR_INTERNAL_ERROR;
783 }
784 else
785 {
786 AssertMsgFailed(("mismatching handlers\n"));
787 rc = VERR_ACCESS_DENIED;
788 }
789 }
790 else
791 {
792 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
793 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
794 rc = VERR_INVALID_PARAMETER;
795 }
796 }
797 else
798 {
799 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
800 rc = VERR_PGM_HANDLER_NOT_FOUND;
801 }
802 }
803 else
804 {
805 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
806 rc = VERR_PGM_HANDLER_NOT_FOUND;
807 }
808 pgmUnlock(pVM);
809 return rc;
810
811}
812
813
814/**
815 * Resets any modifications to individual pages in a physical
816 * page access handler region.
817 *
818 * This is used in pair with PGMHandlerPhysicalPageTempOff() or
819 * PGMHandlerPhysicalPageAlias().
820 *
821 * @returns VBox status code.
822 * @param pVM VM Handle
823 * @param GCPhys The start address of the handler regions, i.e. what you
824 * passed to PGMR3HandlerPhysicalRegister(),
825 * PGMHandlerPhysicalRegisterEx() or
826 * PGMHandlerPhysicalModify().
827 */
828VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
829{
830 pgmLock(pVM);
831
832 /*
833 * Find the handler.
834 */
835 int rc;
836 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
837 if (RT_LIKELY(pCur))
838 {
839 /*
840 * Validate type.
841 */
842 switch (pCur->enmType)
843 {
844 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
845 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
846 case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
847 {
848 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */
849 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
850 Assert(pRam);
851 Assert(pRam->GCPhys <= pCur->Core.Key);
852 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
853
854#ifdef VBOX_WITH_NEW_PHYS_CODE
855 if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO)
856 {
857 /*
858 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
859 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
860 * to do that now...
861 */
862 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
863 uint32_t cLeft = pCur->cPages;
864 while (cLeft-- > 0)
865 {
866 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
867 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT));
868 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
869 pPage++;
870 }
871 }
872 else
873#endif
874 {
875 /*
876 * Set the flags and flush shadow PT entries.
877 */
878 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
879 pVM->pgm.s.fPhysCacheFlushPending = true;
880 HWACCMFlushTLB(pVM);
881 }
882
883 rc = VINF_SUCCESS;
884 break;
885 }
886
887 /*
888 * Invalid.
889 */
890 default:
891 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
892 rc = VERR_INTERNAL_ERROR;
893 break;
894 }
895 }
896 else
897 {
898 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
899 rc = VERR_PGM_HANDLER_NOT_FOUND;
900 }
901
902 pgmUnlock(pVM);
903 return rc;
904}
905
906
907/**
908 * Temporarily turns off the access monitoring of a page within a monitored
909 * physical write/all page access handler region.
910 *
911 * Use this when no further \#PFs are required for that page. Be aware that
912 * a page directory sync might reset the flags, and turn on access monitoring
913 * for the page.
914 *
915 * The caller must do required page table modifications.
916 *
917 * @returns VBox status code.
918 * @param pVM VM Handle
919 * @param GCPhys The start address of the access handler. This
920 * must be a fully page aligned range or we risk
921 * messing up other handlers installed for the
922 * start and end pages.
923 * @param GCPhysPage The physical address of the page to turn off
924 * access monitoring for.
925 */
926VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
927{
928 /*
929 * Validate the range.
930 */
931 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
932 if (RT_LIKELY(pCur))
933 {
934 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
935 && GCPhysPage <= pCur->Core.KeyLast))
936 {
937 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
938 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
939
940 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
941 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
942 VERR_ACCESS_DENIED);
943
944 /*
945 * Change the page status.
946 */
947 PPGMPAGE pPage;
948 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
949 AssertRCReturn(rc, rc);
950 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
951#ifndef IN_RC
952 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
953#endif
954 return VINF_SUCCESS;
955 }
956
957 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
958 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
959 return VERR_INVALID_PARAMETER;
960 }
961
962 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
963 return VERR_PGM_HANDLER_NOT_FOUND;
964}
965
966
967/**
968 * Replaces an MMIO page with an MMIO2 page.
969 *
970 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
971 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
972 * backing, the caller must provide a replacement page. For various reasons the
973 * replacement page must be an MMIO2 page.
974 *
975 * The caller must do required page table modifications. You can get away
976 * without making any modifations since it's an MMIO page, the cost is an extra
977 * \#PF which will the resync the page.
978 *
979 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
980 *
981 * The caller may still get handler callback even after this call and must be
982 * able to deal correctly with such calls. The reason for these callbacks are
983 * either that we're executing in the recompiler (which doesn't know about this
984 * arrangement) or that we've been restored from saved state (where we won't
985 * save the change).
986 *
987 * @returns VBox status code.
988 * @param pVM The VM handle
989 * @param GCPhys The start address of the access handler. This
990 * must be a fully page aligned range or we risk
991 * messing up other handlers installed for the
992 * start and end pages.
993 * @param GCPhysPage The physical address of the page to turn off
994 * access monitoring for.
995 * @param GCPhysPageRemap The physical address of the MMIO2 page that
996 * serves as backing memory.
997 *
998 * @remark May cause a page pool flush if used on a page that is already
999 * aliased.
1000 *
1001 * @note This trick does only work reliably if the two pages are never ever
1002 * mapped in the same page table. If they are the page pool code will
1003 * be confused should either of them be flushed. See the special case
1004 * of zero page aliasing mentioned in #3170.
1005 *
1006 */
1007VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
1008{
1009 /*
1010 * Lookup and validate the range.
1011 */
1012 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1013 if (RT_LIKELY(pCur))
1014 {
1015 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1016 && GCPhysPage <= pCur->Core.KeyLast))
1017 {
1018 AssertReturn(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, VERR_ACCESS_DENIED);
1019 AssertReturn(!(pCur->Core.Key & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1020 AssertReturn((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, VERR_INVALID_PARAMETER);
1021
1022 /*
1023 * Get and validate the two pages.
1024 */
1025 PPGMPAGE pPageRemap;
1026 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPageRemap, &pPageRemap);
1027 AssertRCReturn(rc, rc);
1028#ifdef VBOX_WITH_NEW_PHYS_CODE
1029 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1030 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
1031 VERR_PGM_PHYS_NOT_MMIO2);
1032#endif
1033
1034 PPGMPAGE pPage;
1035 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1036 AssertRCReturn(rc, rc);
1037#ifdef VBOX_WITH_NEW_PHYS_CODE
1038 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1039 {
1040 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1041 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1042 VERR_PGM_PHYS_NOT_MMIO2);
1043 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPage))
1044 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1045
1046 /*
1047 * The page is already mapped as some other page, reset it
1048 * to an MMIO/ZERO page before doing the new mapping.
1049 */
1050 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1051 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1052 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage);
1053 }
1054 Assert(PGM_PAGE_IS_ZERO(pPage));
1055#endif
1056
1057 /*
1058 * Do the actual remapping here.
1059 * This page now serves as an alias for the backing memory specified.
1060 */
1061 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
1062 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
1063#ifdef VBOX_WITH_NEW_PHYS_CODE
1064 PGM_PAGE_SET_HCPHYS(pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1065 PGM_PAGE_SET_TYPE(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1066 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1067 PGM_PAGE_SET_PAGEID(pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1068 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1069 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
1070#else
1071 pPage->HCPhys = pPageRemap->HCPhys;
1072 PGM_PAGE_SET_TRACKING(pPage, 0);
1073 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1074#endif
1075
1076#ifndef IN_RC
1077 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
1078#endif
1079 return VINF_SUCCESS;
1080 }
1081
1082 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1083 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1084 return VERR_INVALID_PARAMETER;
1085 }
1086
1087 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1088 return VERR_PGM_HANDLER_NOT_FOUND;
1089}
1090
1091
1092#if 0/**@todo delete this. */
1093/**
1094 * Turns access monitoring of a page within a monitored
1095 * physical write/all page access handler regio back on.
1096 *
1097 * The caller must do required page table modifications.
1098 *
1099 * @returns VBox status code.
1100 * @param pVM VM Handle
1101 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1102 * This must be a fully page aligned range or we risk messing up other
1103 * handlers installed for the start and end pages.
1104 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
1105 */
1106VMMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1107{
1108 /*
1109 * Validate the range.
1110 */
1111 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1112 if (RT_LIKELY(pCur))
1113 {
1114 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1115 && GCPhysPage <= pCur->Core.KeyLast))
1116 {
1117 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
1118 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1119
1120 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1121 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1122 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO,
1123 VERR_ACCESS_DENIED);
1124
1125 /*
1126 * Change the page status.
1127 */
1128 PPGMPAGE pPage;
1129 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
1130 AssertRCReturn(rc, rc);
1131 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, pgmHandlerPhysicalCalcState(pCur));
1132
1133#ifndef IN_RC
1134 HWACCMInvalidatePhysPage(pVM, GCPhysPage);
1135#endif
1136 return VINF_SUCCESS;
1137 }
1138
1139 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1140 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1141 return VERR_INVALID_PARAMETER;
1142 }
1143
1144 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1145 return VERR_PGM_HANDLER_NOT_FOUND;
1146}
1147#endif
1148
1149
1150/**
1151 * Checks if a physical range is handled
1152 *
1153 * @returns boolean
1154 * @param pVM VM Handle.
1155 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1156 * @remarks Caller must take the PGM lock...
1157 * @threads EMT.
1158 */
1159VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
1160{
1161 /*
1162 * Find the handler.
1163 */
1164 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1165 if (pCur)
1166 {
1167 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1168 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1169 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1170 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
1171 return true;
1172 }
1173
1174 return false;
1175}
1176
1177
1178/**
1179 * Checks if it's an disabled all access handler or write access handler at the
1180 * given address.
1181 *
1182 * @returns true if it's an all access handler, false if it's a write access
1183 * handler.
1184 * @param pVM Pointer to the shared VM structure.
1185 * @param GCPhys The address of the page with a disabled handler.
1186 *
1187 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1188 */
1189bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1190{
1191 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1192 AssertReturn(pCur, true);
1193 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1194 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1195 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO); /* sanity */
1196 /* Only whole pages can be disabled. */
1197 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
1198 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
1199 return pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE;
1200}
1201
1202
1203/**
1204 * Check if particular guest's VA is being monitored.
1205 *
1206 * @returns true or false
1207 * @param pVM VM handle.
1208 * @param GCPtr Virtual address.
1209 * @remarks Will acquire the PGM lock.
1210 * @threads Any.
1211 */
1212VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
1213{
1214 pgmLock(pVM);
1215 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1216 pgmUnlock(pVM);
1217
1218 return pCur != NULL;
1219}
1220
1221
1222/**
1223 * Search for virtual handler with matching physical address
1224 *
1225 * @returns VBox status code
1226 * @param pVM The VM handle.
1227 * @param GCPhys GC physical address to search for.
1228 * @param ppVirt Where to store the pointer to the virtual handler structure.
1229 * @param piPage Where to store the pointer to the index of the cached physical page.
1230 */
1231int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1232{
1233 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1234 Assert(ppVirt);
1235
1236 PPGMPHYS2VIRTHANDLER pCur;
1237 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
1238 if (pCur)
1239 {
1240 /* found a match! */
1241#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1242 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1243#endif
1244 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1245 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1246
1247 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));
1248 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1249 return VINF_SUCCESS;
1250 }
1251
1252 *ppVirt = NULL;
1253 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1254 return VERR_PGM_HANDLER_NOT_FOUND;
1255}
1256
1257
1258/**
1259 * Deal with aliases in phys2virt.
1260 *
1261 * As pointed out by the various todos, this currently only deals with
1262 * aliases where the two ranges match 100%.
1263 *
1264 * @param pVM The VM handle.
1265 * @param pPhys2Virt The node we failed insert.
1266 */
1267static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1268{
1269 /*
1270 * First find the node which is conflicting with us.
1271 */
1272 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1273 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1274 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1275 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1276#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1277 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
1278 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1279#endif
1280 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1281 {
1282 /** @todo do something clever here... */
1283 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1284 pPhys2Virt->offNextAlias = 0;
1285 return;
1286 }
1287
1288 /*
1289 * Insert ourselves as the next node.
1290 */
1291 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1292 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1293 else
1294 {
1295 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1296 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1297 | PGMPHYS2VIRTHANDLER_IN_TREE;
1298 }
1299 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1300 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1301 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1302}
1303
1304
1305/**
1306 * Resets one virtual handler range.
1307 *
1308 * This is called by HandlerVirtualUpdate when it has detected some kind of
1309 * problem and have started clearing the virtual handler page states (or
1310 * when there have been registration/deregistrations). For this reason this
1311 * function will only update the page status if it's lower than desired.
1312 *
1313 * @returns 0
1314 * @param pNode Pointer to a PGMVIRTHANDLER.
1315 * @param pvUser The VM handle.
1316 */
1317DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1318{
1319 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1320 PVM pVM = (PVM)pvUser;
1321
1322 /*
1323 * Iterate the pages and apply the new state.
1324 */
1325 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1326 PPGMRAMRANGE pRamHint = NULL;
1327 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
1328 RTGCUINTPTR cbLeft = pCur->cb;
1329 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1330 {
1331 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1332 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1333 {
1334 /*
1335 * Update the page state wrt virtual handlers.
1336 */
1337 PPGMPAGE pPage;
1338 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1339 if ( RT_SUCCESS(rc)
1340 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1341 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1342 else
1343 AssertRC(rc);
1344
1345 /*
1346 * Need to insert the page in the Phys2Virt lookup tree?
1347 */
1348 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1349 {
1350#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1351 AssertRelease(!pPhys2Virt->offNextAlias);
1352#endif
1353 unsigned cbPhys = cbLeft;
1354 if (cbPhys > PAGE_SIZE - offPage)
1355 cbPhys = PAGE_SIZE - offPage;
1356 else
1357 Assert(iPage == pCur->cPages - 1);
1358 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1359 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1360 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1361 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1362#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1363 else
1364 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1365 ("%RGp-%RGp offNextAlias=%#RX32\n",
1366 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1367#endif
1368 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
1369 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1370 }
1371 }
1372 cbLeft -= PAGE_SIZE - offPage;
1373 offPage = 0;
1374 }
1375
1376 return 0;
1377}
1378
1379#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1380
1381/**
1382 * Worker for pgmHandlerVirtualDumpPhysPages.
1383 *
1384 * @returns 0 (continue enumeration).
1385 * @param pNode The virtual handler node.
1386 * @param pvUser User argument, unused.
1387 */
1388static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1389{
1390 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1391 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1392 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1393 return 0;
1394}
1395
1396
1397/**
1398 * Assertion / logging helper for dumping all the
1399 * virtual handlers to the log.
1400 *
1401 * @param pVM Pointer to the shared VM structure.
1402 */
1403void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1404{
1405 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
1406 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1407}
1408
1409#endif /* VBOX_STRICT || LOG_ENABLED */
1410#ifdef VBOX_STRICT
1411
1412/**
1413 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1414 * and its AVL enumerators.
1415 */
1416typedef struct PGMAHAFIS
1417{
1418 /** The current physical address. */
1419 RTGCPHYS GCPhys;
1420 /** The state we've calculated. */
1421 unsigned uVirtStateFound;
1422 /** The state we're matching up to. */
1423 unsigned uVirtState;
1424 /** Number of errors. */
1425 unsigned cErrors;
1426 /** The VM handle. */
1427 PVM pVM;
1428} PGMAHAFIS, *PPGMAHAFIS;
1429
1430
1431#if 0 /* unused */
1432/**
1433 * Verify virtual handler by matching physical address.
1434 *
1435 * @returns 0
1436 * @param pNode Pointer to a PGMVIRTHANDLER.
1437 * @param pvUser Pointer to user parameter.
1438 */
1439static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1440{
1441 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1442 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1443
1444 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1445 {
1446 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1447 {
1448 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1449 if (pState->uVirtState < uState)
1450 {
1451 error
1452 }
1453
1454 if (pState->uVirtState == uState)
1455 break; //??
1456 }
1457 }
1458 return 0;
1459}
1460#endif /* unused */
1461
1462
1463/**
1464 * Verify a virtual handler (enumeration callback).
1465 *
1466 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1467 * the virtual handlers, esp. that the physical addresses matches up.
1468 *
1469 * @returns 0
1470 * @param pNode Pointer to a PGMVIRTHANDLER.
1471 * @param pvUser Pointer to a PPGMAHAFIS structure.
1472 */
1473static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1474{
1475 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1476 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1477 PVM pVM = pState->pVM;
1478
1479 /*
1480 * Validate the type and calc state.
1481 */
1482 switch (pVirt->enmType)
1483 {
1484 case PGMVIRTHANDLERTYPE_WRITE:
1485 case PGMVIRTHANDLERTYPE_ALL:
1486 break;
1487 default:
1488 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1489 pState->cErrors++;
1490 return 0;
1491 }
1492 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1493
1494 /*
1495 * Check key alignment.
1496 */
1497 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
1498 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1499 {
1500 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1501 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
1502 pState->cErrors++;
1503 }
1504
1505 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
1506 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1507 {
1508 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1509 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
1510 pState->cErrors++;
1511 }
1512
1513 /*
1514 * Check pages for sanity and state.
1515 */
1516 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
1517 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1518 {
1519 RTGCPHYS GCPhysGst;
1520 uint64_t fGst;
1521 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1522 if ( rc == VERR_PAGE_NOT_PRESENT
1523 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1524 {
1525 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1526 {
1527 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
1528 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1529 pState->cErrors++;
1530 }
1531 continue;
1532 }
1533
1534 AssertRCReturn(rc, 0);
1535 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1536 {
1537 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1538 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1539 pState->cErrors++;
1540 continue;
1541 }
1542
1543 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1544 if (!pPage)
1545 {
1546 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1547 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1548 pState->cErrors++;
1549 continue;
1550 }
1551
1552 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1553 {
1554 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
1555 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
1556 pState->cErrors++;
1557 continue;
1558 }
1559 } /* for pages in virtual mapping. */
1560
1561 return 0;
1562}
1563
1564
1565/**
1566 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1567 * that the physical addresses associated with virtual handlers are correct.
1568 *
1569 * @returns Number of mismatches.
1570 * @param pVM The VM handle.
1571 */
1572VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1573{
1574 PPGM pPGM = &pVM->pgm.s;
1575 PGMAHAFIS State;
1576 State.GCPhys = 0;
1577 State.uVirtState = 0;
1578 State.uVirtStateFound = 0;
1579 State.cErrors = 0;
1580 State.pVM = pVM;
1581
1582 /*
1583 * Check the RAM flags against the handlers.
1584 */
1585 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); pRam; pRam = pRam->CTX_SUFF(pNext))
1586 {
1587 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1588 for (unsigned iPage = 0; iPage < cPages; iPage++)
1589 {
1590 PGMPAGE const *pPage = &pRam->aPages[iPage];
1591 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1592 {
1593 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1594
1595 /*
1596 * Physical first - calculate the state based on the handlers
1597 * active on the page, then compare.
1598 */
1599 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1600 {
1601 /* the first */
1602 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1603 if (!pPhys)
1604 {
1605 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1606 if ( pPhys
1607 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1608 pPhys = NULL;
1609 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1610 }
1611 if (pPhys)
1612 {
1613 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1614
1615 /* more? */
1616 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1617 {
1618 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1619 pPhys->Core.KeyLast + 1, true);
1620 if ( !pPhys2
1621 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1622 break;
1623 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1624 uState = RT_MAX(uState, uState2);
1625 pPhys = pPhys2;
1626 }
1627
1628 /* compare.*/
1629 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1630 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1631 {
1632 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1633 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1634 State.cErrors++;
1635 }
1636
1637#ifdef IN_RING3
1638 /* validate that REM is handling it. */
1639 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1640 /* ignore shadowed ROM for the time being. */
1641# ifdef VBOX_WITH_NEW_PHYS_CODE
1642 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW
1643# else
1644 && (pPage->HCPhys & (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)) != (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)
1645# endif
1646 )
1647 {
1648 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1649 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1650 State.cErrors++;
1651 }
1652#endif
1653 }
1654 else
1655 {
1656 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1657 State.cErrors++;
1658 }
1659 }
1660
1661 /*
1662 * Virtual handlers.
1663 */
1664 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1665 {
1666 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1667#if 1
1668 /* locate all the matching physical ranges. */
1669 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1670 RTGCPHYS GCPhysKey = State.GCPhys;
1671 for (;;)
1672 {
1673 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1674 GCPhysKey, true /* above-or-equal */);
1675 if ( !pPhys2Virt
1676 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1677 break;
1678
1679 /* the head */
1680 GCPhysKey = pPhys2Virt->Core.KeyLast;
1681 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1682 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1683 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1684
1685 /* any aliases */
1686 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1687 {
1688 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1689 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1690 uState = pgmHandlerVirtualCalcState(pCur);
1691 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1692 }
1693
1694 /* done? */
1695 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1696 break;
1697 }
1698#else
1699 /* very slow */
1700 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1701#endif
1702 if (State.uVirtState != State.uVirtStateFound)
1703 {
1704 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1705 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1706 State.cErrors++;
1707 }
1708 }
1709 }
1710 } /* foreach page in ram range. */
1711 } /* foreach ram range. */
1712
1713 /*
1714 * Check that the physical addresses of the virtual handlers matches up
1715 * and that they are otherwise sane.
1716 */
1717 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1718
1719 /*
1720 * Do the reverse check for physical handlers.
1721 */
1722 /** @todo */
1723
1724 return State.cErrors;
1725}
1726
1727#endif /* VBOX_STRICT */
1728
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette