VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 7932

Last change on this file since 7932 was 7753, checked in by vboxsync, 17 years ago

The PGM bits of the MMIO cleanup.
Moved the parts of PGMR3Reset that deals with RAM (zeroing it) and sketched out the new code there.
Fixed a bug in PGM_PAGE_INIT_ZERO* where the type and state was switched.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 54.7 KB
Line 
1/* $Id: PGMAllHandler.cpp 7753 2008-04-04 20:35:44Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/dbgf.h>
24#include <VBox/pgm.h>
25#include <VBox/iom.h>
26#include <VBox/mm.h>
27#include <VBox/em.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/dbgf.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <VBox/selm.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
48static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
49static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
50
51
52
53/**
54 * Register a access handler for a physical range.
55 *
56 * @returns VBox status code.
57 * @retval VINF_SUCCESS when successfully installed.
58 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
59 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
60 * flagged together with a pool clearing.
61 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
62 * one. A debug assertion is raised.
63 *
64 * @param pVM VM Handle.
65 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
66 * @param GCPhys Start physical address.
67 * @param GCPhysLast Last physical address. (inclusive)
68 * @param pfnHandlerR3 The R3 handler.
69 * @param pvUserR3 User argument to the R3 handler.
70 * @param pfnHandlerR0 The R0 handler.
71 * @param pvUserR0 User argument to the R0 handler.
72 * @param pfnHandlerGC The GC handler.
73 * @param pvUserGC User argument to the GC handler.
74 * This must be a GC pointer because it will be relocated!
75 * @param pszDesc Pointer to description string. This must not be freed.
76 */
77PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
78 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
79 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
80 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
81 R3PTRTYPE(const char *) pszDesc)
82{
83 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
84 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
85
86 /*
87 * Validate input.
88 */
89 if (GCPhys >= GCPhysLast)
90 {
91 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
92 return VERR_INVALID_PARAMETER;
93 }
94 switch (enmType)
95 {
96 case PGMPHYSHANDLERTYPE_MMIO:
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
99 break;
100 default:
101 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
102 return VERR_INVALID_PARAMETER;
103 }
104 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
105 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
106 {
107 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
108 return VERR_INVALID_PARAMETER;
109 }
110 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerGC, VERR_INVALID_PARAMETER);
111
112 /*
113 * We require the range to be within registered ram.
114 * There is no apparent need to support ranges which cover more than one ram range.
115 */
116 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
117 while (pRam && GCPhys > pRam->GCPhysLast)
118 pRam = CTXALLSUFF(pRam->pNext);
119 if ( !pRam
120 || GCPhysLast < pRam->GCPhys
121 || GCPhys > pRam->GCPhysLast)
122 {
123#ifdef IN_RING3
124 DBGFR3Info(pVM, "phys", NULL, NULL);
125#endif
126 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
127 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
128 }
129
130 /*
131 * Allocate and initialize the new entry.
132 */
133 PPGMPHYSHANDLER pNew;
134 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
135 if (VBOX_FAILURE(rc))
136 return rc;
137
138 pNew->Core.Key = GCPhys;
139 pNew->Core.KeyLast = GCPhysLast;
140 pNew->enmType = enmType;
141 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
142 pNew->pfnHandlerR3 = pfnHandlerR3;
143 pNew->pvUserR3 = pvUserR3;
144 pNew->pfnHandlerR0 = pfnHandlerR0;
145 pNew->pvUserR0 = pvUserR0;
146 pNew->pfnHandlerGC = pfnHandlerGC;
147 pNew->pvUserGC = pvUserGC;
148 pNew->pszDesc = pszDesc;
149
150 pgmLock(pVM);
151
152 /*
153 * Try insert into list.
154 */
155 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
156 {
157 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
158 if (rc == VINF_PGM_GCPHYS_ALIASED)
159 {
160 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
161 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
162 }
163 pVM->pgm.s.fPhysCacheFlushPending = true;
164#ifndef IN_RING3
165 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
166#else
167 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
168#endif
169 pgmUnlock(pVM);
170 if (rc != VINF_SUCCESS)
171 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
172 return rc;
173 }
174
175 pgmUnlock(pVM);
176
177#if defined(IN_RING3) && defined(VBOX_STRICT)
178 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
179#endif
180 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
181 MMHyperFree(pVM, pNew);
182 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
183}
184
185
186/**
187 * Sets ram range flags and attempts updating shadow PTs.
188 *
189 * @returns VBox status code.
190 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
191 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
192 * the guest page aliased or/and mapped by multiple PTs.
193 * @param pVM The VM handle.
194 * @param pCur The physical handler.
195 * @param pRam The RAM range.
196 */
197static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
198{
199 /*
200 * Iterate the guest ram pages updating the flags and flushing PT entries
201 * mapping the page.
202 */
203 bool fFlushTLBs = false;
204#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
205 int rc = VINF_SUCCESS;
206#else
207 const int rc = VINF_PGM_GCPHYS_ALIASED;
208#endif
209 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
210 RTUINT cPages = pCur->cPages;
211 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
212 for (;;)
213 {
214 /* Physical chunk in dynamically allocated range not present? */
215 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
216 {
217 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
218#ifdef IN_RING3
219 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
220#else
221 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
222#endif
223 if (rc2 != VINF_SUCCESS)
224 return rc2;
225 }
226
227 /* Only do upgrades. */
228 PPGMPAGE pPage = &pRam->aPages[i];
229 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
230 {
231 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
232 Assert(PGM_PAGE_GET_HCPHYS(pPage));
233
234#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
235 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
236 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
237 const uint16_t u16 = pRam->aPages[i].HCPhys >> MM_RAM_FLAGS_IDX_SHIFT; /** @todo PAGE FLAGS */
238 if (u16)
239 {
240 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
241 pgmPoolTrackFlushGCPhysPT(pVM,
242 pPage,
243 u16 & MM_RAM_FLAGS_IDX_MASK,
244 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
245 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
246 pgmPoolTrackFlushGCPhysPTs(pVM, pPage, u16 & MM_RAM_FLAGS_IDX_MASK);
247 else
248 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
249 fFlushTLBs = true;
250 }
251#elif defined(PGMPOOL_WITH_CACHE)
252 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
253 fFlushTLBs = true;
254#endif
255 }
256
257 /* next */
258 if (--cPages == 0)
259 break;
260 i++;
261 }
262
263 if (fFlushTLBs && rc == VINF_SUCCESS)
264 {
265 PGM_INVL_GUEST_TLBS();
266 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
267 }
268 else
269 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
270 return rc;
271}
272
273
274/**
275 * Register a physical page access handler.
276 *
277 * @returns VBox status code.
278 * @param pVM VM Handle.
279 * @param GCPhys Start physical address.
280 */
281PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
282{
283 /*
284 * Find the handler.
285 */
286 pgmLock(pVM);
287 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
288 if (pCur)
289 {
290 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
291 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
292
293 /*
294 * Clear the page bits and notify the REM about this change.
295 */
296 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
297 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
298 pgmUnlock(pVM);
299 MMHyperFree(pVM, pCur);
300 return VINF_SUCCESS;
301 }
302 pgmUnlock(pVM);
303
304 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
305 return VERR_PGM_HANDLER_NOT_FOUND;
306}
307
308
309/**
310 * Shared code with modify.
311 */
312static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
313{
314 RTGCPHYS GCPhysStart = pCur->Core.Key;
315 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
316
317 /*
318 * Page align the range.
319 *
320 * Since we've reset (recalculated) the physical handler state of all pages
321 * we can make use of the page states to figure out whether a page should be
322 * included in the REM notification or not.
323 */
324 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
325 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
326 {
327 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
328
329 if (GCPhysStart & PAGE_OFFSET_MASK)
330 {
331 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
332 if ( pPage
333 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
334 {
335 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
336 if ( GCPhys > GCPhysLast
337 || GCPhys < GCPhysStart)
338 return;
339 GCPhysStart = GCPhys;
340 }
341 else
342 GCPhysStart &= X86_PTE_PAE_PG_MASK;
343 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
344 }
345
346 if (GCPhysLast & PAGE_OFFSET_MASK)
347 {
348 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
349 if ( pPage
350 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
351 {
352 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
353 if ( GCPhys < GCPhysStart
354 || GCPhys > GCPhysLast)
355 return;
356 GCPhysLast = GCPhys;
357 }
358 else
359 GCPhysLast |= PAGE_OFFSET_MASK;
360 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
361 }
362 }
363
364 /*
365 * Tell REM.
366 */
367 const bool fRestoreAsRAM = pCur->pfnHandlerR3
368 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
369#ifndef IN_RING3
370 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
371#else
372 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
373#endif
374}
375
376
377/**
378 * pgmHandlerPhysicalResetRamFlags helper that checks for
379 * other handlers on edge pages.
380 */
381DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
382{
383 /*
384 * Look for other handlers.
385 */
386 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
387 for (;;)
388 {
389 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
390 if ( !pCur
391 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
392 break;
393 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
394 uState = RT_MAX(uState, uThisState);
395
396 /* next? */
397 RTGCPHYS GCPhysNext = fAbove
398 ? pCur->Core.KeyLast + 1
399 : pCur->Core.Key - 1;
400 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
401 break;
402 GCPhys = GCPhysNext;
403 }
404
405 /*
406 * Update if we found something that is a higher priority
407 * state than the current.
408 */
409 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
410 {
411 PPGMPAGE pPage;
412 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
413 if ( RT_SUCCESS(rc)
414 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
415 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
416 else
417 AssertRC(rc);
418 }
419}
420
421
422/**
423 * Resets ram range flags.
424 *
425 * @returns VBox status code.
426 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
427 * @param pVM The VM handle.
428 * @param pCur The physical handler.
429 *
430 * @remark We don't start messing with the shadow page tables, as we've already got code
431 * in Trap0e which deals with out of sync handler flags (originally conceived for
432 * global pages).
433 */
434static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
435{
436 /*
437 * Iterate the guest ram pages updating the state.
438 */
439 RTUINT cPages = pCur->cPages;
440 RTGCPHYS GCPhys = pCur->Core.Key;
441 PPGMRAMRANGE pRamHint = NULL;
442 PPGM pPGM = &pVM->pgm.s;
443 for (;;)
444 {
445 PPGMPAGE pPage;
446 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
447 if (RT_SUCCESS(rc))
448 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
449 else
450 AssertRC(rc);
451
452 /* next */
453 if (--cPages == 0)
454 break;
455 GCPhys += PAGE_SIZE;
456 }
457
458 /*
459 * Check for partial start and end pages.
460 */
461 if (pCur->Core.Key & PAGE_OFFSET_MASK)
462 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
463 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
464 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
465}
466
467
468/**
469 * Modify a physical page access handler.
470 *
471 * Modification can only be done to the range it self, not the type or anything else.
472 *
473 * @returns VBox status code.
474 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
475 * and a new registration must be performed!
476 * @param pVM VM handle.
477 * @param GCPhysCurrent Current location.
478 * @param GCPhys New location.
479 * @param GCPhysLast New last location.
480 */
481PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
482{
483 /*
484 * Remove it.
485 */
486 int rc;
487 pgmLock(pVM);
488 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
489 if (pCur)
490 {
491 /*
492 * Clear the ram flags. (We're gonna move or free it!)
493 */
494 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
495 const bool fRestoreAsRAM = pCur->pfnHandlerR3
496 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
497
498 /*
499 * Validate the new range, modify and reinsert.
500 */
501 if (GCPhysLast >= GCPhys)
502 {
503 /*
504 * We require the range to be within registered ram.
505 * There is no apparent need to support ranges which cover more than one ram range.
506 */
507 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
508 while (pRam && GCPhys > pRam->GCPhysLast)
509 pRam = CTXALLSUFF(pRam->pNext);
510 if ( pRam
511 && GCPhys <= pRam->GCPhysLast
512 && GCPhysLast >= pRam->GCPhys)
513 {
514 pCur->Core.Key = GCPhys;
515 pCur->Core.KeyLast = GCPhysLast;
516 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
517
518 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
519 {
520 /*
521 * Set ram flags, flush shadow PT entries and finally tell REM about this.
522 */
523 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
524 if (rc == VINF_PGM_GCPHYS_ALIASED)
525 {
526 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
527 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
528 }
529 pVM->pgm.s.fPhysCacheFlushPending = true;
530
531#ifndef IN_RING3
532 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
533 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
534#else
535 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
536 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
537#endif
538 pgmUnlock(pVM);
539 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
540 GCPhysCurrent, GCPhys, GCPhysLast));
541 return VINF_SUCCESS;
542 }
543
544 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
545 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
546 }
547 else
548 {
549 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
550 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
551 }
552 }
553 else
554 {
555 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
556 rc = VERR_INVALID_PARAMETER;
557 }
558
559 /*
560 * Invalid new location, free it.
561 * We've only gotta notify REM and free the memory.
562 */
563 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
564 MMHyperFree(pVM, pCur);
565 }
566 else
567 {
568 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
569 rc = VERR_PGM_HANDLER_NOT_FOUND;
570 }
571
572 pgmUnlock(pVM);
573 return rc;
574}
575
576
577/**
578 * Changes the callbacks associated with a physical access handler.
579 *
580 * @returns VBox status code.
581 * @param pVM VM Handle.
582 * @param GCPhys Start physical address.
583 * @param pfnHandlerR3 The R3 handler.
584 * @param pvUserR3 User argument to the R3 handler.
585 * @param pfnHandlerR0 The R0 handler.
586 * @param pvUserR0 User argument to the R0 handler.
587 * @param pfnHandlerGC The GC handler.
588 * @param pvUserGC User argument to the GC handler.
589 * This must be a GC pointer because it will be relocated!
590 * @param pszDesc Pointer to description string. This must not be freed.
591 */
592PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
593 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
594 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
595 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
596 R3PTRTYPE(const char *) pszDesc)
597{
598 /*
599 * Get the handler.
600 */
601 int rc = VINF_SUCCESS;
602 pgmLock(pVM);
603 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
604 if (pCur)
605 {
606 /*
607 * Change callbacks.
608 */
609 pCur->pfnHandlerR3 = pfnHandlerR3;
610 pCur->pvUserR3 = pvUserR3;
611 pCur->pfnHandlerR0 = pfnHandlerR0;
612 pCur->pvUserR0 = pvUserR0;
613 pCur->pfnHandlerGC = pfnHandlerGC;
614 pCur->pvUserGC = pvUserGC;
615 pCur->pszDesc = pszDesc;
616 }
617 else
618 {
619 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
620 rc = VERR_PGM_HANDLER_NOT_FOUND;
621 }
622
623 pgmUnlock(pVM);
624 return rc;
625}
626
627
628/**
629 * Splitts a physical access handler in two.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param GCPhys Start physical address of the handler.
634 * @param GCPhysSplit The split address.
635 */
636PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
637{
638 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
639
640 /*
641 * Do the allocation without owning the lock.
642 */
643 PPGMPHYSHANDLER pNew;
644 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
645 if (VBOX_FAILURE(rc))
646 return rc;
647
648 /*
649 * Get the handler.
650 */
651 pgmLock(pVM);
652 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
653 if (pCur)
654 {
655 if (GCPhysSplit <= pCur->Core.KeyLast)
656 {
657 /*
658 * Create new handler node for the 2nd half.
659 */
660 *pNew = *pCur;
661 pNew->Core.Key = GCPhysSplit;
662 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
663
664 pCur->Core.KeyLast = GCPhysSplit - 1;
665 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
666
667 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
668 {
669 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
670 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
671 pgmUnlock(pVM);
672 return VINF_SUCCESS;
673 }
674 AssertMsgFailed(("whu?\n"));
675 rc = VERR_INTERNAL_ERROR;
676 }
677 else
678 {
679 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
680 rc = VERR_INVALID_PARAMETER;
681 }
682 }
683 else
684 {
685 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
686 rc = VERR_PGM_HANDLER_NOT_FOUND;
687 }
688 pgmUnlock(pVM);
689 MMHyperFree(pVM, pNew);
690 return rc;
691}
692
693
694/**
695 * Joins up two adjacent physical access handlers which has the same callbacks.
696 *
697 * @returns VBox status code.
698 * @param pVM VM Handle.
699 * @param GCPhys1 Start physical address of the first handler.
700 * @param GCPhys2 Start physical address of the second handler.
701 */
702PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
703{
704 /*
705 * Get the handlers.
706 */
707 int rc;
708 pgmLock(pVM);
709 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
710 if (pCur1)
711 {
712 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
713 if (pCur2)
714 {
715 /*
716 * Make sure that they are adjacent, and that they've got the same callbacks.
717 */
718 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
719 {
720 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
721 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
722 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
723 {
724 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
725 if (pCur3 == pCur2)
726 {
727 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
728 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
729 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
730 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
731 pgmUnlock(pVM);
732 MMHyperFree(pVM, pCur2);
733 return VINF_SUCCESS;
734 }
735
736 Assert(pCur3 == pCur2);
737 rc = VERR_INTERNAL_ERROR;
738 }
739 else
740 {
741 AssertMsgFailed(("mismatching handlers\n"));
742 rc = VERR_ACCESS_DENIED;
743 }
744 }
745 else
746 {
747 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
748 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
749 rc = VERR_INVALID_PARAMETER;
750 }
751 }
752 else
753 {
754 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
755 rc = VERR_PGM_HANDLER_NOT_FOUND;
756 }
757 }
758 else
759 {
760 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
761 rc = VERR_PGM_HANDLER_NOT_FOUND;
762 }
763 pgmUnlock(pVM);
764 return rc;
765
766}
767
768
769/**
770 * Resets any modifications to individual pages in a physical
771 * page access handler region.
772 *
773 * This is used in pair with PGMHandlerPhysicalPageTempOff().
774 *
775 * @returns VBox status code.
776 * @param pVM VM Handle
777 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
778 */
779PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
780{
781 pgmLock(pVM);
782
783 /*
784 * Find the handler.
785 */
786 int rc;
787 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
788 if (pCur)
789 {
790 /*
791 * Validate type.
792 */
793 switch (pCur->enmType)
794 {
795 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
796 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
797 {
798 /*
799 * Set the flags and flush shadow PT entries.
800 */
801 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
802 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
803 Assert(pRam);
804 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
805 if (rc == VINF_PGM_GCPHYS_ALIASED)
806 {
807 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
808 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
809 }
810 pVM->pgm.s.fPhysCacheFlushPending = true;
811
812 rc = VINF_SUCCESS;
813 break;
814 }
815
816 /*
817 * Invalid.
818 */
819 case PGMPHYSHANDLERTYPE_MMIO:
820 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
821 rc = VERR_INTERNAL_ERROR;
822 break;
823
824 default:
825 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
826 rc = VERR_INTERNAL_ERROR;
827 break;
828 }
829 }
830 else
831 {
832 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
833 rc = VERR_PGM_HANDLER_NOT_FOUND;
834 }
835
836 pgmUnlock(pVM);
837 return rc;
838}
839
840
841/**
842 * Temporarily turns off the access monitoring of a page within a monitored
843 * physical write/all page access handler region.
844 *
845 * Use this when no further \#PFs are required for that page. Be aware that
846 * a page directory sync might reset the flags, and turn on access monitoring
847 * for the page.
848 *
849 * The caller must do required page table modifications.
850 *
851 * @returns VBox status code.
852 * @param pVM VM Handle
853 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
854 * This must be a fully page aligned range or we risk messing up other
855 * handlers installed for the start and end pages.
856 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
857 */
858PGMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
859{
860 /*
861 * Validate the range.
862 */
863 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
864 if (pCur)
865 {
866 if ( GCPhysPage >= pCur->Core.Key
867 && GCPhysPage <= pCur->Core.KeyLast)
868 {
869 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
870 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
871
872 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
873 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
874 VERR_ACCESS_DENIED);
875
876 /*
877 * Change the page status.
878 */
879 PPGMPAGE pPage;
880 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
881 AssertRCReturn(rc, rc);
882 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
883 return VINF_SUCCESS;
884 }
885
886 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
887 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
888 return VERR_INVALID_PARAMETER;
889 }
890
891 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
892 return VERR_PGM_HANDLER_NOT_FOUND;
893}
894
895
896/**
897 * Turns access monitoring of a page within a monitored
898 * physical write/all page access handler regio back on.
899 *
900 * The caller must do required page table modifications.
901 *
902 * @returns VBox status code.
903 * @param pVM VM Handle
904 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
905 * This must be a fully page aligned range or we risk messing up other
906 * handlers installed for the start and end pages.
907 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
908 */
909PGMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
910{
911 /*
912 * Validate the range.
913 */
914 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
915 if (pCur)
916 {
917 if ( GCPhysPage >= pCur->Core.Key
918 && GCPhysPage <= pCur->Core.KeyLast)
919 {
920 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
921 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
922
923 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
924 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
925 VERR_ACCESS_DENIED);
926
927 /*
928 * Change the page status.
929 */
930 PPGMPAGE pPage;
931 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
932 AssertRCReturn(rc, rc);
933 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, pgmHandlerPhysicalCalcState(pCur));
934 return VINF_SUCCESS;
935 }
936
937 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
938 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
939 return VERR_INVALID_PARAMETER;
940 }
941
942 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
943 return VERR_PGM_HANDLER_NOT_FOUND;
944}
945
946
947/**
948 * Checks if a physical range is handled
949 *
950 * @returns boolean
951 * @param pVM VM Handle
952 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
953 */
954PGMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
955{
956 /*
957 * Find the handler.
958 */
959 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
960 if (pCur)
961 {
962 if ( GCPhys >= pCur->Core.Key
963 && GCPhys <= pCur->Core.KeyLast)
964 {
965 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
966 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
967 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
968 return true;
969 }
970 }
971
972 return false;
973}
974
975
976/**
977 * Search for virtual handler with matching physical address
978 *
979 * @returns VBox status code
980 * @param pVM The VM handle.
981 * @param GCPhys GC physical address to search for.
982 * @param ppVirt Where to store the pointer to the virtual handler structure.
983 * @param piPage Where to store the pointer to the index of the cached physical page.
984 */
985int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
986{
987 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
988 Assert(ppVirt);
989
990 PPGMPHYS2VIRTHANDLER pCur;
991 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
992 if (pCur)
993 {
994 /* found a match! */
995#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
996 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
997#endif
998 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
999 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1000
1001 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n", GCPhys, (*ppVirt)->GCPtr, *piPage));
1002 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
1003 return VINF_SUCCESS;
1004 }
1005
1006 *ppVirt = NULL;
1007 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
1008 return VERR_PGM_HANDLER_NOT_FOUND;
1009}
1010
1011
1012/**
1013 * Deal with aliases in phys2virt.
1014 *
1015 * As pointed out by the various todos, this currently only deals with
1016 * aliases where the two ranges match 100%.
1017 *
1018 * @param pVM The VM handle.
1019 * @param pPhys2Virt The node we failed insert.
1020 */
1021static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1022{
1023 /*
1024 * First find the node which is conflicting with us.
1025 */
1026 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1027 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1028 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1029 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1030#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1031 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
1032 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1033#endif
1034 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1035 {
1036 /** @todo do something clever here... */
1037 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1038 pPhys2Virt->offNextAlias = 0;
1039 return;
1040 }
1041
1042 /*
1043 * Insert ourselves as the next node.
1044 */
1045 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1046 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1047 else
1048 {
1049 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1050 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1051 | PGMPHYS2VIRTHANDLER_IN_TREE;
1052 }
1053 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1054 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1055 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1056}
1057
1058
1059/**
1060 * Resets one virtual handler range.
1061 *
1062 * This is called by HandlerVirtualUpdate when it has detected some kind of
1063 * problem and have started clearing the virtual handler page states (or
1064 * when there have been registration/deregistrations). For this reason this
1065 * function will only update the page status if it's lower than desired.
1066 *
1067 * @returns 0
1068 * @param pNode Pointer to a PGMVIRTHANDLER.
1069 * @param pvUser The VM handle.
1070 */
1071DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1072{
1073 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1074 PVM pVM = (PVM)pvUser;
1075
1076 /*
1077 * Iterate the pages and apply the new state.
1078 */
1079 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1080 PPGMRAMRANGE pRamHint = NULL;
1081 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
1082 RTGCUINTPTR cbLeft = pCur->cb;
1083 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1084 {
1085 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1086 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1087 {
1088 /*
1089 * Update the page state wrt virtual handlers.
1090 */
1091 PPGMPAGE pPage;
1092 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1093 if ( RT_SUCCESS(rc)
1094 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1095 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1096 else
1097 AssertRC(rc);
1098
1099 /*
1100 * Need to insert the page in the Phys2Virt lookup tree?
1101 */
1102 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1103 {
1104#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1105 AssertRelease(!pPhys2Virt->offNextAlias);
1106#endif
1107 unsigned cbPhys = cbLeft;
1108 if (cbPhys > PAGE_SIZE - offPage)
1109 cbPhys = PAGE_SIZE - offPage;
1110 else
1111 Assert(iPage == pCur->cPages - 1);
1112 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1113 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1114 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1115 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1116#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1117 else
1118 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1119 ("%VGp-%VGp offNextAlias=%#RX32\n",
1120 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1121#endif
1122 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
1123 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1124 }
1125 }
1126 cbLeft -= PAGE_SIZE - offPage;
1127 offPage = 0;
1128 }
1129
1130 return 0;
1131}
1132
1133
1134#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1135/**
1136 * Worker for pgmHandlerVirtualDumpPhysPages.
1137 *
1138 * @returns 0 (continue enumeration).
1139 * @param pNode The virtual handler node.
1140 * @param pvUser User argument, unused.
1141 */
1142static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1143{
1144 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1145 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1146 Log(("PHYS2VIRT: Range %VGp-%VGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1147 return 0;
1148}
1149
1150
1151/**
1152 * Assertion / logging helper for dumping all the
1153 * virtual handlers to the log.
1154 *
1155 * @param pVM Pointer to the shared VM structure.
1156 */
1157void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1158{
1159 RTAvlroGCPhysDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->PhysToVirtHandlers, true /* from left */,
1160 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1161}
1162#endif /* VBOX_STRICT || LOG_ENABLED */
1163
1164#ifdef VBOX_STRICT
1165
1166/**
1167 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1168 * and its AVL enumerators.
1169 */
1170typedef struct PGMAHAFIS
1171{
1172 /** The current physical address. */
1173 RTGCPHYS GCPhys;
1174 /** The state we've calculated. */
1175 unsigned uVirtStateFound;
1176 /** The state we're matching up to. */
1177 unsigned uVirtState;
1178 /** Number of errors. */
1179 unsigned cErrors;
1180 /** The VM handle. */
1181 PVM pVM;
1182} PGMAHAFIS, *PPGMAHAFIS;
1183
1184
1185#if 0 /* unused */
1186/**
1187 * Verify virtual handler by matching physical address.
1188 *
1189 * @returns 0
1190 * @param pNode Pointer to a PGMVIRTHANDLER.
1191 * @param pvUser Pointer to user parameter.
1192 */
1193static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1194{
1195 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1196 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1197
1198 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1199 {
1200 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1201 {
1202 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1203 if (pState->uVirtState < uState)
1204 {
1205 error
1206 }
1207
1208 if (pState->uVirtState == uState)
1209 break; //??
1210 }
1211 }
1212 return 0;
1213}
1214#endif /* unused */
1215
1216
1217/**
1218 * Verify a virtual handler (enumeration callback).
1219 *
1220 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1221 * the virtual handlers, esp. that the physical addresses matches up.
1222 *
1223 * @returns 0
1224 * @param pNode Pointer to a PGMVIRTHANDLER.
1225 * @param pvUser Pointer to a PPGMAHAFIS structure.
1226 */
1227static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1228{
1229 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1230 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1231 PVM pVM = pState->pVM;
1232
1233 /*
1234 * Validate the type and calc state.
1235 */
1236 switch (pVirt->enmType)
1237 {
1238 case PGMVIRTHANDLERTYPE_WRITE:
1239 case PGMVIRTHANDLERTYPE_ALL:
1240 break;
1241 default:
1242 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1243 pState->cErrors++;
1244 return 0;
1245 }
1246 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1247
1248 /*
1249 * Check key alignment.
1250 */
1251 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->GCPtr & PAGE_OFFSET_MASK)
1252 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1253 {
1254 AssertMsgFailed(("virt handler phys has incorrect key! %VGp %VGv %s\n",
1255 pVirt->aPhysToVirt[0].Core.Key, pVirt->GCPtr, HCSTRING(pVirt->pszDesc)));
1256 pState->cErrors++;
1257 }
1258
1259 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->GCPtrLast & PAGE_OFFSET_MASK)
1260 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1261 {
1262 AssertMsgFailed(("virt handler phys has incorrect key! %VGp %VGv %s\n",
1263 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->GCPtrLast, HCSTRING(pVirt->pszDesc)));
1264 pState->cErrors++;
1265 }
1266
1267 /*
1268 * Check pages for sanity and state.
1269 */
1270 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->GCPtr;
1271 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1272 {
1273 RTGCPHYS GCPhysGst;
1274 uint64_t fGst;
1275 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1276 if ( rc == VERR_PAGE_NOT_PRESENT
1277 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1278 {
1279 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1280 {
1281 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysNew=~0 iPage=%#x %VGv %s\n",
1282 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1283 pState->cErrors++;
1284 }
1285 continue;
1286 }
1287
1288 AssertRCReturn(rc, 0);
1289 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1290 {
1291 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1292 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1293 pState->cErrors++;
1294 continue;
1295 }
1296
1297 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1298 if (!pPage)
1299 {
1300 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1301 GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1302 pState->cErrors++;
1303 continue;
1304 }
1305
1306 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1307 {
1308 AssertMsgFailed(("virt handler state mismatch. HCPhys=%VHp GCPhysGst=%VGp iPage=%#x %VGv state=%d expected>=%d %s\n",
1309 pPage->HCPhys, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, HCSTRING(pVirt->pszDesc)));
1310 pState->cErrors++;
1311 continue;
1312 }
1313 } /* for pages in virtual mapping. */
1314
1315 return 0;
1316}
1317
1318
1319/**
1320 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1321 * that the physical addresses associated with virtual handlers are correct.
1322 *
1323 * @returns Number of mismatches.
1324 * @param pVM The VM handle.
1325 */
1326PGMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1327{
1328 PPGM pPGM = &pVM->pgm.s;
1329 PGMAHAFIS State;
1330 State.GCPhys = 0;
1331 State.uVirtState = 0;
1332 State.uVirtStateFound = 0;
1333 State.cErrors = 0;
1334 State.pVM = pVM;
1335
1336 /*
1337 * Check the RAM flags against the handlers.
1338 */
1339 for (PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges); pRam; pRam = CTXALLSUFF(pRam->pNext))
1340 {
1341 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1342 for (unsigned iPage = 0; iPage < cPages; iPage++)
1343 {
1344 PGMPAGE const *pPage = &pRam->aPages[iPage];
1345 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1346 {
1347 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1348
1349 /*
1350 * Physical first - calculate the state based on the handlers
1351 * active on the page, then compare.
1352 */
1353 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1354 {
1355 /* the first */
1356 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys);
1357 if (!pPhys)
1358 {
1359 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1360 if ( pPhys
1361 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1362 pPhys = NULL;
1363 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1364 }
1365 if (pPhys)
1366 {
1367 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1368
1369 /* more? */
1370 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1371 {
1372 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers,
1373 pPhys->Core.KeyLast + 1, true);
1374 if ( !pPhys2
1375 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1376 break;
1377 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1378 uState = RT_MAX(uState, uState2);
1379 pPhys = pPhys2;
1380 }
1381
1382 /* compare.*/
1383 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1384 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1385 {
1386 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1387 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1388 State.cErrors++;
1389 }
1390
1391#ifdef IN_RING3
1392 /* validate that REM is handling it. */
1393 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1394 /* ignore shadowed ROM for the time being. */ /// @todo PAGE FLAGS
1395 && (pPage->HCPhys & (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)) != (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2))
1396 {
1397 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1398 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1399 State.cErrors++;
1400 }
1401#endif
1402 }
1403 else
1404 {
1405 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1406 State.cErrors++;
1407 }
1408 }
1409
1410 /*
1411 * Virtual handlers.
1412 */
1413 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1414 {
1415 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1416#if 1
1417 /* locate all the matching physical ranges. */
1418 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1419 RTGCPHYS GCPhysKey = State.GCPhys;
1420 for (;;)
1421 {
1422 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers,
1423 GCPhysKey, true /* above-or-equal */);
1424 if ( !pPhys2Virt
1425 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1426 break;
1427
1428 /* the head */
1429 GCPhysKey = pPhys2Virt->Core.KeyLast;
1430 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1431 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1432 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1433
1434 /* any aliases */
1435 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1436 {
1437 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1438 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1439 uState = pgmHandlerVirtualCalcState(pCur);
1440 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1441 }
1442
1443 /* done? */
1444 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1445 break;
1446 }
1447#else
1448 /* very slow */
1449 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1450#endif
1451 if (State.uVirtState != State.uVirtStateFound)
1452 {
1453 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1454 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1455 State.cErrors++;
1456 }
1457 }
1458 }
1459 } /* foreach page in ram range. */
1460 } /* foreach ram range. */
1461
1462 /*
1463 * Check that the physical addresses of the virtual handlers matches up
1464 * and that they are otherwise sane.
1465 */
1466 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1467
1468 /*
1469 * Do the reverse check for physical handlers.
1470 */
1471 /** @todo */
1472
1473 return State.cErrors;
1474}
1475
1476#endif /* VBOX_STRICT */
1477
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette