VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 6927

Last change on this file since 6927 was 6927, checked in by vboxsync, 17 years ago

Converted MM_RAM_FLAGS_VIRTUAL_HANDLER, MM_RAM_FLAGS_VIRTUAL_WRITE
and MM_RAM_FLAGS_VIRTUAL_ALL into a two bit state variable in PGMPAGE.
I've checked this trice because, like last time, bugs may have odd
sideeffects and hide for a while before showing up. Hope I got this
right (unlike for phys).

Fixed a regression from the MM_RAM_FLAGS_PHYSICAL in the physical read/write code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 55.5 KB
Line 
1/* $Id: PGMAllHandler.cpp 6927 2008-02-12 20:44:35Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/dbgf.h>
24#include <VBox/pgm.h>
25#include <VBox/iom.h>
26#include <VBox/mm.h>
27#include <VBox/em.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/dbgf.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <VBox/selm.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
48static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
49static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
50
51
52
53/**
54 * Register a access handler for a physical range.
55 *
56 * @returns VBox status code.
57 * @retval VINF_SUCCESS when successfully installed.
58 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
59 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
60 * flagged together with a pool clearing.
61 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
62 * one. A debug assertion is raised.
63 *
64 * @param pVM VM Handle.
65 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
66 * @param GCPhys Start physical address.
67 * @param GCPhysLast Last physical address. (inclusive)
68 * @param pfnHandlerR3 The R3 handler.
69 * @param pvUserR3 User argument to the R3 handler.
70 * @param pfnHandlerR0 The R0 handler.
71 * @param pvUserR0 User argument to the R0 handler.
72 * @param pfnHandlerGC The GC handler.
73 * @param pvUserGC User argument to the GC handler.
74 * This must be a GC pointer because it will be relocated!
75 * @param pszDesc Pointer to description string. This must not be freed.
76 */
77PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
78 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
79 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
80 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
81 R3PTRTYPE(const char *) pszDesc)
82{
83 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
84 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
85
86 /*
87 * Validate input.
88 */
89 if (GCPhys >= GCPhysLast)
90 {
91 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
92 return VERR_INVALID_PARAMETER;
93 }
94 switch (enmType)
95 {
96 case PGMPHYSHANDLERTYPE_MMIO:
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
99 break;
100 default:
101 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
102 return VERR_INVALID_PARAMETER;
103 }
104 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
105 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
106 {
107 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
108 return VERR_INVALID_PARAMETER;
109 }
110 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerGC, VERR_INVALID_PARAMETER);
111
112 /*
113 * We require the range to be within registered ram.
114 * There is no apparent need to support ranges which cover more than one ram range.
115 */
116 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
117 while (pRam && GCPhys > pRam->GCPhysLast)
118 pRam = CTXALLSUFF(pRam->pNext);
119 if ( !pRam
120 || GCPhysLast < pRam->GCPhys
121 || GCPhys > pRam->GCPhysLast)
122 {
123#ifdef IN_RING3
124 /*
125 * If this is an MMIO registration, we'll just add a range for it.
126 */
127 if ( enmType == PGMPHYSHANDLERTYPE_MMIO
128 && ( !pRam
129 || GCPhysLast < pRam->GCPhys)
130 )
131 {
132 size_t cb = GCPhysLast - GCPhys + 1;
133 Assert(cb == RT_ALIGN_Z(cb, PAGE_SIZE));
134 int rc = PGMR3PhysRegister(pVM, NULL, GCPhys, cb, MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO, NULL, pszDesc);
135 if (VBOX_FAILURE(rc))
136 return rc;
137
138 /* search again. */
139 pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
140 while (pRam && GCPhys > pRam->GCPhysLast)
141 pRam = CTXALLSUFF(pRam->pNext);
142 }
143
144 if ( !pRam
145 || GCPhysLast < pRam->GCPhys
146 || GCPhys > pRam->GCPhysLast)
147#endif /* IN_RING3 */
148 {
149#ifdef IN_RING3
150 DBGFR3Info(pVM, "phys", NULL, NULL);
151#endif
152 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
153 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
154 }
155 }
156
157 /*
158 * Allocate and initialize the new entry.
159 */
160 PPGMPHYSHANDLER pNew;
161 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
162 if (VBOX_FAILURE(rc))
163 return rc;
164
165 pNew->Core.Key = GCPhys;
166 pNew->Core.KeyLast = GCPhysLast;
167 pNew->enmType = enmType;
168 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
169 pNew->pfnHandlerR3 = pfnHandlerR3;
170 pNew->pvUserR3 = pvUserR3;
171 pNew->pfnHandlerR0 = pfnHandlerR0;
172 pNew->pvUserR0 = pvUserR0;
173 pNew->pfnHandlerGC = pfnHandlerGC;
174 pNew->pvUserGC = pvUserGC;
175 pNew->pszDesc = pszDesc;
176
177 pgmLock(pVM);
178
179 /*
180 * Try insert into list.
181 */
182 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
183 {
184 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
185 if (rc == VINF_PGM_GCPHYS_ALIASED)
186 {
187 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
188 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
189 }
190 pVM->pgm.s.fPhysCacheFlushPending = true;
191#ifndef IN_RING3
192 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
193#else
194 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
195#endif
196 pgmUnlock(pVM);
197 if (rc != VINF_SUCCESS)
198 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
199 return rc;
200 }
201
202 pgmUnlock(pVM);
203
204#if defined(IN_RING3) && defined(VBOX_STRICT)
205 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
206#endif
207 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
208 MMHyperFree(pVM, pNew);
209 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
210}
211
212
213/**
214 * Sets ram range flags and attempts updating shadow PTs.
215 *
216 * @returns VBox status code.
217 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
218 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
219 * the guest page aliased or/and mapped by multiple PTs.
220 * @param pVM The VM handle.
221 * @param pCur The physical handler.
222 * @param pRam The RAM range.
223 */
224static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
225{
226 /*
227 * Iterate the guest ram pages updating the flags and flushing PT entries
228 * mapping the page.
229 */
230 bool fFlushTLBs = false;
231#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
232 int rc = VINF_SUCCESS;
233#else
234 const int rc = VINF_PGM_GCPHYS_ALIASED;
235#endif
236 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
237 RTUINT cPages = pCur->cPages;
238 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
239 for (;;)
240 {
241 /* Physical chunk in dynamically allocated range not present? */
242 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
243 {
244 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
245#ifdef IN_RING3
246 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
247#else
248 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
249#endif
250 if (rc2 != VINF_SUCCESS)
251 return rc2;
252 }
253
254 /* Only do upgrades. */
255 PPGMPAGE pPage = &pRam->aPages[i];
256 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
257 {
258 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
259 Assert(PGM_PAGE_GET_HCPHYS(pPage));
260
261#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
262 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
263 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
264 const uint16_t u16 = pRam->aPages[i].HCPhys >> MM_RAM_FLAGS_IDX_SHIFT; /** @todo PAGE FLAGS */
265 if (u16)
266 {
267 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
268 pgmPoolTrackFlushGCPhysPT(pVM,
269 pPage,
270 u16 & MM_RAM_FLAGS_IDX_MASK,
271 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
272 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
273 pgmPoolTrackFlushGCPhysPTs(pVM, pPage, u16 & MM_RAM_FLAGS_IDX_MASK);
274 else
275 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
276 fFlushTLBs = true;
277 }
278#elif defined(PGMPOOL_WITH_CACHE)
279 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
280 fFlushTLBs = true;
281#endif
282 }
283
284 /* next */
285 if (--cPages == 0)
286 break;
287 i++;
288 }
289
290 if (fFlushTLBs && rc == VINF_SUCCESS)
291 {
292 PGM_INVL_GUEST_TLBS();
293 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
294 }
295 else
296 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
297 return rc;
298}
299
300
301/**
302 * Register a physical page access handler.
303 *
304 * @returns VBox status code.
305 * @param pVM VM Handle.
306 * @param GCPhys Start physical address.
307 */
308PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
309{
310 /*
311 * Find the handler.
312 */
313 pgmLock(pVM);
314 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
315 if (pCur)
316 {
317 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
318 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
319
320 /*
321 * Clear the page bits and notify the REM about this change.
322 */
323 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
324 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
325 pgmUnlock(pVM);
326 MMHyperFree(pVM, pCur);
327 return VINF_SUCCESS;
328 }
329 pgmUnlock(pVM);
330
331 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
332 return VERR_PGM_HANDLER_NOT_FOUND;
333}
334
335
336/**
337 * Shared code with modify.
338 */
339static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
340{
341 RTGCPHYS GCPhysStart = pCur->Core.Key;
342 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
343
344 /*
345 * Page align the range.
346 *
347 * Since we've reset (recalculated) the physical handler state of all pages
348 * we can make use of the page states to figure out whether a page should be
349 * included in the REM notification or not.
350 */
351 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
352 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
353 {
354 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
355
356 if (GCPhysStart & PAGE_OFFSET_MASK)
357 {
358 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
359 if ( pPage
360 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
361 {
362 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
363 if ( GCPhys > GCPhysLast
364 || GCPhys < GCPhysStart)
365 return;
366 GCPhysStart = GCPhys;
367 }
368 else
369 GCPhysStart &= X86_PTE_PAE_PG_MASK;
370 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
371 }
372
373 if (GCPhysLast & PAGE_OFFSET_MASK)
374 {
375 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
376 if ( pPage
377 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
378 {
379 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
380 if ( GCPhys < GCPhysStart
381 || GCPhys > GCPhysLast)
382 return;
383 GCPhysLast = GCPhys;
384 }
385 else
386 GCPhysLast |= PAGE_OFFSET_MASK;
387 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
388 }
389 }
390
391 /*
392 * Tell REM.
393 */
394 const bool fRestoreAsRAM = pCur->pfnHandlerR3
395 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
396#ifndef IN_RING3
397 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
398#else
399 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
400#endif
401}
402
403
404/**
405 * pgmHandlerPhysicalResetRamFlags helper that checks for
406 * other handlers on edge pages.
407 */
408DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
409{
410 /*
411 * Look for other handlers.
412 */
413 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
414 for (;;)
415 {
416 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
417 if ( !pCur
418 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
419 break;
420 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
421 uState = RT_MAX(uState, uThisState);
422
423 /* next? */
424 RTGCPHYS GCPhysNext = fAbove
425 ? pCur->Core.KeyLast + 1
426 : pCur->Core.Key - 1;
427 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
428 break;
429 GCPhys = GCPhysNext;
430 }
431
432 /*
433 * Update if we found something that is a higher priority
434 * state than the current.
435 */
436 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
437 {
438 PPGMPAGE pPage;
439 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
440 if ( RT_SUCCESS(rc)
441 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
442 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
443 else
444 AssertRC(rc);
445 }
446}
447
448
449/**
450 * Resets ram range flags.
451 *
452 * @returns VBox status code.
453 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
454 * @param pVM The VM handle.
455 * @param pCur The physical handler.
456 *
457 * @remark We don't start messing with the shadow page tables, as we've already got code
458 * in Trap0e which deals with out of sync handler flags (originally conceived for
459 * global pages).
460 */
461static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
462{
463 /*
464 * Iterate the guest ram pages updating the state.
465 */
466 RTUINT cPages = pCur->cPages;
467 RTGCPHYS GCPhys = pCur->Core.Key;
468 PPGMRAMRANGE pRamHint = NULL;
469 PPGM pPGM = &pVM->pgm.s;
470 for (;;)
471 {
472 PPGMPAGE pPage;
473 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
474 if (RT_SUCCESS(rc))
475 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
476 else
477 AssertRC(rc);
478
479 /* next */
480 if (--cPages == 0)
481 break;
482 GCPhys += PAGE_SIZE;
483 }
484
485 /*
486 * Check for partial start and end pages.
487 */
488 if (pCur->Core.Key & PAGE_OFFSET_MASK)
489 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
490 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
491 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
492}
493
494
495/**
496 * Modify a physical page access handler.
497 *
498 * Modification can only be done to the range it self, not the type or anything else.
499 *
500 * @returns VBox status code.
501 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
502 * and a new registration must be performed!
503 * @param pVM VM handle.
504 * @param GCPhysCurrent Current location.
505 * @param GCPhys New location.
506 * @param GCPhysLast New last location.
507 */
508PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
509{
510 /*
511 * Remove it.
512 */
513 int rc;
514 pgmLock(pVM);
515 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
516 if (pCur)
517 {
518 /*
519 * Clear the ram flags. (We're gonna move or free it!)
520 */
521 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
522 const bool fRestoreAsRAM = pCur->pfnHandlerR3
523 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
524
525 /*
526 * Validate the new range, modify and reinsert.
527 */
528 if (GCPhysLast >= GCPhys)
529 {
530 /*
531 * We require the range to be within registered ram.
532 * There is no apparent need to support ranges which cover more than one ram range.
533 */
534 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
535 while (pRam && GCPhys > pRam->GCPhysLast)
536 pRam = CTXALLSUFF(pRam->pNext);
537 if ( pRam
538 && GCPhys <= pRam->GCPhysLast
539 && GCPhysLast >= pRam->GCPhys)
540 {
541 pCur->Core.Key = GCPhys;
542 pCur->Core.KeyLast = GCPhysLast;
543 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
544
545 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
546 {
547 /*
548 * Set ram flags, flush shadow PT entries and finally tell REM about this.
549 */
550 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
551 if (rc == VINF_PGM_GCPHYS_ALIASED)
552 {
553 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
554 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
555 }
556 pVM->pgm.s.fPhysCacheFlushPending = true;
557
558#ifndef IN_RING3
559 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
560 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
561#else
562 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
563 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
564#endif
565 pgmUnlock(pVM);
566 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
567 GCPhysCurrent, GCPhys, GCPhysLast));
568 return VINF_SUCCESS;
569 }
570
571 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
572 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
573 }
574 else
575 {
576 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
577 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
578 }
579 }
580 else
581 {
582 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
583 rc = VERR_INVALID_PARAMETER;
584 }
585
586 /*
587 * Invalid new location, free it.
588 * We've only gotta notify REM and free the memory.
589 */
590 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
591 MMHyperFree(pVM, pCur);
592 }
593 else
594 {
595 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
596 rc = VERR_PGM_HANDLER_NOT_FOUND;
597 }
598
599 pgmUnlock(pVM);
600 return rc;
601}
602
603
604/**
605 * Changes the callbacks associated with a physical access handler.
606 *
607 * @returns VBox status code.
608 * @param pVM VM Handle.
609 * @param GCPhys Start physical address.
610 * @param pfnHandlerR3 The R3 handler.
611 * @param pvUserR3 User argument to the R3 handler.
612 * @param pfnHandlerR0 The R0 handler.
613 * @param pvUserR0 User argument to the R0 handler.
614 * @param pfnHandlerGC The GC handler.
615 * @param pvUserGC User argument to the GC handler.
616 * This must be a GC pointer because it will be relocated!
617 * @param pszDesc Pointer to description string. This must not be freed.
618 */
619PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
620 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
621 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
622 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
623 R3PTRTYPE(const char *) pszDesc)
624{
625 /*
626 * Get the handler.
627 */
628 int rc = VINF_SUCCESS;
629 pgmLock(pVM);
630 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
631 if (pCur)
632 {
633 /*
634 * Change callbacks.
635 */
636 pCur->pfnHandlerR3 = pfnHandlerR3;
637 pCur->pvUserR3 = pvUserR3;
638 pCur->pfnHandlerR0 = pfnHandlerR0;
639 pCur->pvUserR0 = pvUserR0;
640 pCur->pfnHandlerGC = pfnHandlerGC;
641 pCur->pvUserGC = pvUserGC;
642 pCur->pszDesc = pszDesc;
643 }
644 else
645 {
646 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
647 rc = VERR_PGM_HANDLER_NOT_FOUND;
648 }
649
650 pgmUnlock(pVM);
651 return rc;
652}
653
654
655/**
656 * Splitts a physical access handler in two.
657 *
658 * @returns VBox status code.
659 * @param pVM VM Handle.
660 * @param GCPhys Start physical address of the handler.
661 * @param GCPhysSplit The split address.
662 */
663PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
664{
665 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
666
667 /*
668 * Do the allocation without owning the lock.
669 */
670 PPGMPHYSHANDLER pNew;
671 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
672 if (VBOX_FAILURE(rc))
673 return rc;
674
675 /*
676 * Get the handler.
677 */
678 pgmLock(pVM);
679 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
680 if (pCur)
681 {
682 if (GCPhysSplit <= pCur->Core.KeyLast)
683 {
684 /*
685 * Create new handler node for the 2nd half.
686 */
687 *pNew = *pCur;
688 pNew->Core.Key = GCPhysSplit;
689 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
690
691 pCur->Core.KeyLast = GCPhysSplit - 1;
692 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
693
694 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
695 {
696 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
697 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
698 pgmUnlock(pVM);
699 return VINF_SUCCESS;
700 }
701 AssertMsgFailed(("whu?\n"));
702 rc = VERR_INTERNAL_ERROR;
703 }
704 else
705 {
706 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
707 rc = VERR_INVALID_PARAMETER;
708 }
709 }
710 else
711 {
712 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
713 rc = VERR_PGM_HANDLER_NOT_FOUND;
714 }
715 pgmUnlock(pVM);
716 MMHyperFree(pVM, pNew);
717 return rc;
718}
719
720
721/**
722 * Joins up two adjacent physical access handlers which has the same callbacks.
723 *
724 * @returns VBox status code.
725 * @param pVM VM Handle.
726 * @param GCPhys1 Start physical address of the first handler.
727 * @param GCPhys2 Start physical address of the second handler.
728 */
729PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
730{
731 /*
732 * Get the handlers.
733 */
734 int rc;
735 pgmLock(pVM);
736 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
737 if (pCur1)
738 {
739 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
740 if (pCur2)
741 {
742 /*
743 * Make sure that they are adjacent, and that they've got the same callbacks.
744 */
745 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
746 {
747 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
748 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
749 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
750 {
751 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
752 if (pCur3 == pCur2)
753 {
754 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
755 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
756 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
757 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
758 pgmUnlock(pVM);
759 MMHyperFree(pVM, pCur2);
760 return VINF_SUCCESS;
761 }
762
763 Assert(pCur3 == pCur2);
764 rc = VERR_INTERNAL_ERROR;
765 }
766 else
767 {
768 AssertMsgFailed(("mismatching handlers\n"));
769 rc = VERR_ACCESS_DENIED;
770 }
771 }
772 else
773 {
774 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
775 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
776 rc = VERR_INVALID_PARAMETER;
777 }
778 }
779 else
780 {
781 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
782 rc = VERR_PGM_HANDLER_NOT_FOUND;
783 }
784 }
785 else
786 {
787 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
788 rc = VERR_PGM_HANDLER_NOT_FOUND;
789 }
790 pgmUnlock(pVM);
791 return rc;
792
793}
794
795
796/**
797 * Resets any modifications to individual pages in a physical
798 * page access handler region.
799 *
800 * This is used in pair with PGMHandlerPhysicalPageTempOff().
801 *
802 * @returns VBox status code.
803 * @param pVM VM Handle
804 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
805 */
806PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
807{
808 pgmLock(pVM);
809
810 /*
811 * Find the handler.
812 */
813 int rc;
814 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
815 if (pCur)
816 {
817 /*
818 * Validate type.
819 */
820 switch (pCur->enmType)
821 {
822 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
823 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
824 {
825 /*
826 * Set the flags and flush shadow PT entries.
827 */
828 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
829 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
830 Assert(pRam);
831 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
832 if (rc == VINF_PGM_GCPHYS_ALIASED)
833 {
834 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
835 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
836 }
837 pVM->pgm.s.fPhysCacheFlushPending = true;
838
839 rc = VINF_SUCCESS;
840 break;
841 }
842
843 /*
844 * Invalid.
845 */
846 case PGMPHYSHANDLERTYPE_MMIO:
847 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
848 rc = VERR_INTERNAL_ERROR;
849 break;
850
851 default:
852 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857 else
858 {
859 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
860 rc = VERR_PGM_HANDLER_NOT_FOUND;
861 }
862
863 pgmUnlock(pVM);
864 return rc;
865}
866
867
868/**
869 * Temporarily turns off the access monitoring of a page within a monitored
870 * physical write/all page access handler region.
871 *
872 * Use this when no further \#PFs are required for that page. Be aware that
873 * a page directory sync might reset the flags, and turn on access monitoring
874 * for the page.
875 *
876 * The caller must do required page table modifications.
877 *
878 * @returns VBox status code.
879 * @param pVM VM Handle
880 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
881 * This must be a fully page aligned range or we risk messing up other
882 * handlers installed for the start and end pages.
883 * @param GCPhysPage Physical address of the page to turn off access monitoring for.
884 */
885PGMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
886{
887 /*
888 * Validate the range.
889 */
890 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
891 if (pCur)
892 {
893 if ( GCPhysPage >= pCur->Core.Key
894 && GCPhysPage <= pCur->Core.KeyLast)
895 {
896 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
897 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
898
899 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
900 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
901 VERR_ACCESS_DENIED);
902
903 /*
904 * Change the page status.
905 */
906 PPGMPAGE pPage;
907 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
908 AssertRCReturn(rc, rc);
909 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
910 return VINF_SUCCESS;
911 }
912
913 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
914 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
915 return VERR_INVALID_PARAMETER;
916 }
917
918 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
919 return VERR_PGM_HANDLER_NOT_FOUND;
920}
921
922
923/**
924 * Turns access monitoring of a page within a monitored
925 * physical write/all page access handler regio back on.
926 *
927 * The caller must do required page table modifications.
928 *
929 * @returns VBox status code.
930 * @param pVM VM Handle
931 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
932 * This must be a fully page aligned range or we risk messing up other
933 * handlers installed for the start and end pages.
934 * @param GCPhysPage Physical address of the page to turn on access monitoring for.
935 */
936PGMDECL(int) PGMHandlerPhysicalPageReset(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
937{
938 /*
939 * Validate the range.
940 */
941 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
942 if (pCur)
943 {
944 if ( GCPhysPage >= pCur->Core.Key
945 && GCPhysPage <= pCur->Core.KeyLast)
946 {
947 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
948 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
949
950 AssertReturn( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
951 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
952 VERR_ACCESS_DENIED);
953
954 /*
955 * Change the page status.
956 */
957 PPGMPAGE pPage;
958 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysPage, &pPage);
959 AssertRCReturn(rc, rc);
960 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, pgmHandlerPhysicalCalcState(pCur));
961 return VINF_SUCCESS;
962 }
963
964 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
965 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
966 return VERR_INVALID_PARAMETER;
967 }
968
969 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
970 return VERR_PGM_HANDLER_NOT_FOUND;
971}
972
973
974/**
975 * Checks if a physical range is handled
976 *
977 * @returns boolean
978 * @param pVM VM Handle
979 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
980 */
981PGMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
982{
983 /*
984 * Find the handler.
985 */
986 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
987 if (pCur)
988 {
989 if ( GCPhys >= pCur->Core.Key
990 && GCPhys <= pCur->Core.KeyLast)
991 {
992 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
993 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
994 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
995 return true;
996 }
997 }
998
999 return false;
1000}
1001
1002
1003/**
1004 * Search for virtual handler with matching physical address
1005 *
1006 * @returns VBox status code
1007 * @param pVM The VM handle.
1008 * @param GCPhys GC physical address to search for.
1009 * @param ppVirt Where to store the pointer to the virtual handler structure.
1010 * @param piPage Where to store the pointer to the index of the cached physical page.
1011 */
1012int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1013{
1014 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
1015 Assert(ppVirt);
1016
1017 PPGMPHYS2VIRTHANDLER pCur;
1018 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
1019 if (pCur)
1020 {
1021 /* found a match! */
1022#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1023 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1024#endif
1025 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1026 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1027
1028 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n", GCPhys, (*ppVirt)->GCPtr, *piPage));
1029 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
1030 return VINF_SUCCESS;
1031 }
1032
1033 *ppVirt = NULL;
1034 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
1035 return VERR_PGM_HANDLER_NOT_FOUND;
1036}
1037
1038
1039/**
1040 * Deal with aliases in phys2virt.
1041 *
1042 * As pointed out by the various todos, this currently only deals with
1043 * aliases where the two ranges match 100%.
1044 *
1045 * @param pVM The VM handle.
1046 * @param pPhys2Virt The node we failed insert.
1047 */
1048static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1049{
1050 /*
1051 * First find the node which is conflicting with us.
1052 */
1053 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
1054 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1055 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1056 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1057#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1058 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
1059 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1060#endif
1061 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1062 {
1063 /** @todo do something clever here... */
1064 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1065 pPhys2Virt->offNextAlias = 0;
1066 return;
1067 }
1068
1069 /*
1070 * Insert ourselves as the next node.
1071 */
1072 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1073 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1074 else
1075 {
1076 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1077 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1078 | PGMPHYS2VIRTHANDLER_IN_TREE;
1079 }
1080 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1081 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1082 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1083}
1084
1085
1086/**
1087 * Resets one virtual handler range.
1088 *
1089 * This is called by HandlerVirtualUpdate when it has detected some kind of
1090 * problem and have started clearing the virtual handler page states (or
1091 * when there have been registration/deregistrations). For this reason this
1092 * function will only update the page status if it's lower than desired.
1093 *
1094 * @returns 0
1095 * @param pNode Pointer to a PGMVIRTHANDLER.
1096 * @param pvUser The VM handle.
1097 */
1098DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1099{
1100 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1101 PVM pVM = (PVM)pvUser;
1102
1103 /*
1104 * Iterate the pages and apply the new state.
1105 */
1106 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1107 PPGMRAMRANGE pRamHint = NULL;
1108 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
1109 RTGCUINTPTR cbLeft = pCur->cb;
1110 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1111 {
1112 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1113 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1114 {
1115 /*
1116 * Update the page state wrt virtual handlers.
1117 */
1118 PPGMPAGE pPage;
1119 int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1120 if ( RT_SUCCESS(rc)
1121 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1122 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1123 else
1124 AssertRC(rc);
1125
1126 /*
1127 * Need to insert the page in the Phys2Virt lookup tree?
1128 */
1129 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1130 {
1131#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1132 AssertRelease(!pPhys2Virt->offNextAlias);
1133#endif
1134 unsigned cbPhys = cbLeft;
1135 if (cbPhys > PAGE_SIZE - offPage)
1136 cbPhys = PAGE_SIZE - offPage;
1137 else
1138 Assert(iPage == pCur->cPages - 1);
1139 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1140 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1141 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1142 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1143#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1144 else
1145 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1146 ("%VGp-%VGp offNextAlias=%#RX32\n",
1147 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1148#endif
1149 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
1150 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1151 }
1152 }
1153 cbLeft -= PAGE_SIZE - offPage;
1154 offPage = 0;
1155 }
1156
1157 return 0;
1158}
1159
1160
1161#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1162/**
1163 * Worker for pgmHandlerVirtualDumpPhysPages.
1164 *
1165 * @returns 0 (continue enumeration).
1166 * @param pNode The virtual handler node.
1167 * @param pvUser User argument, unused.
1168 */
1169static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1170{
1171 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1172 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1173 Log(("PHYS2VIRT: Range %VGp-%VGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1174 return 0;
1175}
1176
1177
1178/**
1179 * Assertion / logging helper for dumping all the
1180 * virtual handlers to the log.
1181 *
1182 * @param pVM Pointer to the shared VM structure.
1183 */
1184void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1185{
1186 RTAvlroGCPhysDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->PhysToVirtHandlers, true /* from left */,
1187 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1188}
1189#endif /* VBOX_STRICT || LOG_ENABLED */
1190
1191#ifdef VBOX_STRICT
1192
1193/**
1194 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1195 * and its AVL enumerators.
1196 */
1197typedef struct PGMAHAFIS
1198{
1199 /** The current physical address. */
1200 RTGCPHYS GCPhys;
1201 /** The state we've calculated. */
1202 unsigned uVirtStateFound;
1203 /** The state we're matching up to. */
1204 unsigned uVirtState;
1205 /** Number of errors. */
1206 unsigned cErrors;
1207 /** The VM handle. */
1208 PVM pVM;
1209} PGMAHAFIS, *PPGMAHAFIS;
1210
1211
1212#if 0 /* unused */
1213/**
1214 * Verify virtual handler by matching physical address.
1215 *
1216 * @returns 0
1217 * @param pNode Pointer to a PGMVIRTHANDLER.
1218 * @param pvUser Pointer to user parameter.
1219 */
1220static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1221{
1222 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1223 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1224
1225 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1226 {
1227 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1228 {
1229 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1230 if (pState->uVirtState < uState)
1231 {
1232 error
1233 }
1234
1235 if (pState->uVirtState == uState)
1236 break; //??
1237 }
1238 }
1239 return 0;
1240}
1241#endif /* unused */
1242
1243
1244/**
1245 * Verify a virtual handler (enumeration callback).
1246 *
1247 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1248 * the virtual handlers, esp. that the physical addresses matches up.
1249 *
1250 * @returns 0
1251 * @param pNode Pointer to a PGMVIRTHANDLER.
1252 * @param pvUser Pointer to a PPGMAHAFIS structure.
1253 */
1254static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1255{
1256 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1257 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1258 PVM pVM = pState->pVM;
1259
1260 /*
1261 * Validate the type and calc state.
1262 */
1263 switch (pVirt->enmType)
1264 {
1265 case PGMVIRTHANDLERTYPE_WRITE:
1266 case PGMVIRTHANDLERTYPE_ALL:
1267 break;
1268 default:
1269 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1270 pState->cErrors++;
1271 return 0;
1272 }
1273 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1274
1275 /*
1276 * Check key alignment.
1277 */
1278 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->GCPtr & PAGE_OFFSET_MASK)
1279 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1280 {
1281 AssertMsgFailed(("virt handler phys has incorrect key! %VGp %VGv %s\n",
1282 pVirt->aPhysToVirt[0].Core.Key, pVirt->GCPtr, HCSTRING(pVirt->pszDesc)));
1283 pState->cErrors++;
1284 }
1285
1286 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->GCPtrLast & PAGE_OFFSET_MASK)
1287 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1288 {
1289 AssertMsgFailed(("virt handler phys has incorrect key! %VGp %VGv %s\n",
1290 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->GCPtrLast, HCSTRING(pVirt->pszDesc)));
1291 pState->cErrors++;
1292 }
1293
1294 /*
1295 * Check pages for sanity and state.
1296 */
1297 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->GCPtr;
1298 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1299 {
1300 RTGCPHYS GCPhysGst;
1301 uint64_t fGst;
1302 int rc = PGMGstGetPage(pVM, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1303 if ( rc == VERR_PAGE_NOT_PRESENT
1304 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1305 {
1306 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1307 {
1308 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysNew=~0 iPage=%#x %VGv %s\n",
1309 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1310 pState->cErrors++;
1311 }
1312 continue;
1313 }
1314
1315 AssertRCReturn(rc, 0);
1316 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1317 {
1318 AssertMsgFailed(("virt handler phys out of sync. %VGp GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1319 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1320 pState->cErrors++;
1321 continue;
1322 }
1323
1324 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysGst);
1325 if (!pPage)
1326 {
1327 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%VGp iPage=%#x %VGv %s\n",
1328 GCPhysGst, iPage, GCPtr, HCSTRING(pVirt->pszDesc)));
1329 pState->cErrors++;
1330 continue;
1331 }
1332
1333 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1334 {
1335 AssertMsgFailed(("virt handler state mismatch. HCPhys=%VHp GCPhysGst=%VGp iPage=%#x %VGv state=%d expected>=%d %s\n",
1336 pPage->HCPhys, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, HCSTRING(pVirt->pszDesc)));
1337 pState->cErrors++;
1338 continue;
1339 }
1340 } /* for pages in virtual mapping. */
1341
1342 return 0;
1343}
1344
1345
1346/**
1347 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1348 * that the physical addresses associated with virtual handlers are correct.
1349 *
1350 * @returns Number of mismatches.
1351 * @param pVM The VM handle.
1352 */
1353PGMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1354{
1355 PPGM pPGM = &pVM->pgm.s;
1356 PGMAHAFIS State;
1357 State.GCPhys = 0;
1358 State.uVirtState = 0;
1359 State.uVirtStateFound = 0;
1360 State.cErrors = 0;
1361 State.pVM = pVM;
1362
1363 /*
1364 * Check the RAM flags against the handlers.
1365 */
1366 for (PPGMRAMRANGE pRam = CTXALLSUFF(pPGM->pRamRanges); pRam; pRam = CTXALLSUFF(pRam->pNext))
1367 {
1368 const unsigned cPages = pRam->cb >> PAGE_SHIFT;
1369 for (unsigned iPage = 0; iPage < cPages; iPage++)
1370 {
1371 PGMPAGE const *pPage = &pRam->aPages[iPage];
1372 if (PGM_PAGE_HAVE_ANY_HANDLERS(pPage))
1373 {
1374 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1375
1376 /*
1377 * Physical first - calculate the state based on the handlers
1378 * active on the page, then compare.
1379 */
1380 if (PGM_PAGE_HAVE_ANY_PHYSICAL_HANDLERS(pPage))
1381 {
1382 /* the first */
1383 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys);
1384 if (!pPhys)
1385 {
1386 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1387 if ( pPhys
1388 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1389 pPhys = NULL;
1390 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1391 }
1392 if (pPhys)
1393 {
1394 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1395
1396 /* more? */
1397 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1398 {
1399 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers,
1400 pPhys->Core.KeyLast + 1, true);
1401 if ( !pPhys2
1402 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1403 break;
1404 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1405 uState = RT_MAX(uState, uState2);
1406 pPhys = pPhys2;
1407 }
1408
1409 /* compare.*/
1410 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1411 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1412 {
1413 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1414 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1415 State.cErrors++;
1416 }
1417
1418#ifdef IN_RING3
1419 /* validate that REM is handling it. */
1420 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1421 /* ignore shadowed ROM for the time being. */ /// @todo PAGE FLAGS
1422 && (pPage->HCPhys & (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2)) != (MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO2))
1423 {
1424 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1425 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1426 State.cErrors++;
1427 }
1428#endif
1429 }
1430 else
1431 {
1432 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1433 State.cErrors++;
1434 }
1435 }
1436
1437 /*
1438 * Virtual handlers.
1439 */
1440 if (PGM_PAGE_HAVE_ACTIVE_VIRTUAL_HANDLERS(pPage))
1441 {
1442 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1443#if 1
1444 /* locate all the matching physical ranges. */
1445 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1446 RTGCPHYS GCPhysKey = State.GCPhys;
1447 for (;;)
1448 {
1449 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers,
1450 GCPhysKey, true /* above-or-equal */);
1451 if ( !pPhys2Virt
1452 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1453 break;
1454
1455 /* the head */
1456 GCPhysKey = pPhys2Virt->Core.KeyLast;
1457 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1458 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1459 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1460
1461 /* any aliases */
1462 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1463 {
1464 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1465 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1466 uState = pgmHandlerVirtualCalcState(pCur);
1467 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1468 }
1469
1470 /* done? */
1471 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1472 break;
1473 }
1474#else
1475 /* very slow */
1476 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1477#endif
1478 if (State.uVirtState != State.uVirtStateFound)
1479 {
1480 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1481 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1482 State.cErrors++;
1483 }
1484 }
1485 }
1486 } /* foreach page in ram range. */
1487 } /* foreach ram range. */
1488
1489 /*
1490 * Check that the physical addresses of the virtual handlers matches up
1491 * and that they are otherwise sane.
1492 */
1493 RTAvlroGCPtrDoWithAll(CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1494
1495 /*
1496 * Do the reverse check for physical handlers.
1497 */
1498 /** @todo */
1499
1500 return State.cErrors;
1501}
1502
1503#endif /* VBOX_STRICT */
1504
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette