VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 38025

Last change on this file since 38025 was 37354, checked in by vboxsync, 14 years ago

PGM: Fixed locking issues in PGMR3PhysMMIORegister and PGMR3PhysMMIODeregister. Also addressed a harmless on in PGMR3PhysRomRegister (only used at init time, so no races). Fortified the code with assertions more lock assertion, replacing the incorrect PGMIsLocked() checks (we only care if the current thread is the lock owner). Cleaned up some ReturnStmt macros and adding more of them.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 70.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 37354 2011-06-07 15:05:32Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/vmm/dbgf.h>
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/mm.h>
27#include <VBox/vmm/em.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/rem.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vmm/vm.h>
34#include "PGMInline.h"
35
36#include <VBox/log.h>
37#include <iprt/assert.h>
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/string.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/vmm/selm.h>
43
44
45/*******************************************************************************
46* Internal Functions *
47*******************************************************************************/
48static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
49static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
50static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
51
52
53
54/**
55 * Register a access handler for a physical range.
56 *
57 * @returns VBox status code.
58 * @retval VINF_SUCCESS when successfully installed.
59 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
60 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
61 * flagged together with a pool clearing.
62 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
63 * one. A debug assertion is raised.
64 *
65 * @param pVM VM Handle.
66 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
67 * @param GCPhys Start physical address.
68 * @param GCPhysLast Last physical address. (inclusive)
69 * @param pfnHandlerR3 The R3 handler.
70 * @param pvUserR3 User argument to the R3 handler.
71 * @param pfnHandlerR0 The R0 handler.
72 * @param pvUserR0 User argument to the R0 handler.
73 * @param pfnHandlerRC The RC handler.
74 * @param pvUserRC User argument to the RC handler. This can be a value
75 * less that 0x10000 or a (non-null) pointer that is
76 * automatically relocated.
77 * @param pszDesc Pointer to description string. This must not be freed.
78 */
79VMMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
80 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
81 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
82 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
83 R3PTRTYPE(const char *) pszDesc)
84{
85 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%RGp GCPhysLast=%RGp pfnHandlerR3=%RHv pvUserR3=%RHv pfnHandlerR0=%RHv pvUserR0=%RHv pfnHandlerGC=%RRv pvUserGC=%RRv pszDesc=%s\n",
86 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerRC, pvUserRC, R3STRING(pszDesc)));
87
88 /*
89 * Validate input.
90 */
91 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
92 switch (enmType)
93 {
94 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
95 break;
96 case PGMPHYSHANDLERTYPE_MMIO:
97 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
98 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others. */
99 AssertMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
100 AssertMsgReturn((GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
101 break;
102 default:
103 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
104 return VERR_INVALID_PARAMETER;
105 }
106 AssertMsgReturn( (RTRCUINTPTR)pvUserRC < 0x10000
107 || MMHyperR3ToRC(pVM, MMHyperRCToR3(pVM, pvUserRC)) == pvUserRC,
108 ("Not RC pointer! pvUserRC=%RRv\n", pvUserRC),
109 VERR_INVALID_PARAMETER);
110 AssertMsgReturn( (RTR0UINTPTR)pvUserR0 < 0x10000
111 || MMHyperR3ToR0(pVM, MMHyperR0ToR3(pVM, pvUserR0)) == pvUserR0,
112 ("Not R0 pointer! pvUserR0=%RHv\n", pvUserR0),
113 VERR_INVALID_PARAMETER);
114 AssertPtrReturn(pfnHandlerR3, VERR_INVALID_POINTER);
115 AssertReturn(pfnHandlerR0, VERR_INVALID_PARAMETER);
116 AssertReturn(pfnHandlerRC, VERR_INVALID_PARAMETER);
117
118 /*
119 * We require the range to be within registered ram.
120 * There is no apparent need to support ranges which cover more than one ram range.
121 */
122 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
123 if ( !pRam
124 || GCPhysLast < pRam->GCPhys
125 || GCPhys > pRam->GCPhysLast)
126 {
127#ifdef IN_RING3
128 DBGFR3Info(pVM, "phys", NULL, NULL);
129#endif
130 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
131 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
132 }
133
134 /*
135 * Allocate and initialize the new entry.
136 */
137 PPGMPHYSHANDLER pNew;
138 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
139 if (RT_FAILURE(rc))
140 return rc;
141
142 pNew->Core.Key = GCPhys;
143 pNew->Core.KeyLast = GCPhysLast;
144 pNew->enmType = enmType;
145 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
146 pNew->cAliasedPages = 0;
147 pNew->cTmpOffPages = 0;
148 pNew->pfnHandlerR3 = pfnHandlerR3;
149 pNew->pvUserR3 = pvUserR3;
150 pNew->pfnHandlerR0 = pfnHandlerR0;
151 pNew->pvUserR0 = pvUserR0;
152 pNew->pfnHandlerRC = pfnHandlerRC;
153 pNew->pvUserRC = pvUserRC;
154 pNew->pszDesc = pszDesc;
155
156 pgmLock(pVM);
157
158 /*
159 * Try insert into list.
160 */
161 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core))
162 {
163 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
164 if (rc == VINF_PGM_SYNC_CR3)
165 rc = VINF_PGM_GCPHYS_ALIASED;
166 pgmUnlock(pVM);
167#ifndef IN_RING3
168 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
169#else
170 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
171#endif
172 if (rc != VINF_SUCCESS)
173 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
174 return rc;
175 }
176
177 pgmUnlock(pVM);
178
179#if defined(IN_RING3) && defined(VBOX_STRICT)
180 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
181#endif
182 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
183 MMHyperFree(pVM, pNew);
184 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
185}
186
187
188/**
189 * Sets ram range flags and attempts updating shadow PTs.
190 *
191 * @returns VBox status code.
192 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
193 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
194 * the guest page aliased or/and mapped by multiple PTs. FFs set.
195 * @param pVM The VM handle.
196 * @param pCur The physical handler.
197 * @param pRam The RAM range.
198 */
199static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
200{
201 /*
202 * Iterate the guest ram pages updating the flags and flushing PT entries
203 * mapping the page.
204 */
205 bool fFlushTLBs = false;
206 int rc = VINF_SUCCESS;
207 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
208 uint32_t cPages = pCur->cPages;
209 uint32_t i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
210 for (;;)
211 {
212 PPGMPAGE pPage = &pRam->aPages[i];
213 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage),
214 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << PAGE_SHIFT), pPage));
215
216 /* Only do upgrades. */
217 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
218 {
219 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
220
221 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRam->GCPhys + (i << PAGE_SHIFT), pPage,
222 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
223 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
224 rc = rc2;
225 }
226
227 /* next */
228 if (--cPages == 0)
229 break;
230 i++;
231 }
232
233 if (fFlushTLBs)
234 {
235 PGM_INVL_ALL_VCPU_TLBS(pVM);
236 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
237 }
238 else
239 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_ISSET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
240
241 return rc;
242}
243
244
245/**
246 * Register a physical page access handler.
247 *
248 * @returns VBox status code.
249 * @param pVM VM Handle.
250 * @param GCPhys Start physical address.
251 */
252VMMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
253{
254 /*
255 * Find the handler.
256 */
257 pgmLock(pVM);
258 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
259 if (pCur)
260 {
261 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
262 pCur->Core.Key, pCur->Core.KeyLast, R3STRING(pCur->pszDesc)));
263
264 /*
265 * Clear the page bits, notify the REM about this change and clear
266 * the cache.
267 */
268 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
269 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
270 pVM->pgm.s.pLastPhysHandlerR0 = 0;
271 pVM->pgm.s.pLastPhysHandlerR3 = 0;
272 pVM->pgm.s.pLastPhysHandlerRC = 0;
273 MMHyperFree(pVM, pCur);
274 pgmUnlock(pVM);
275 return VINF_SUCCESS;
276 }
277 pgmUnlock(pVM);
278
279 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
280 return VERR_PGM_HANDLER_NOT_FOUND;
281}
282
283
284/**
285 * Shared code with modify.
286 */
287static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
288{
289 RTGCPHYS GCPhysStart = pCur->Core.Key;
290 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
291
292 /*
293 * Page align the range.
294 *
295 * Since we've reset (recalculated) the physical handler state of all pages
296 * we can make use of the page states to figure out whether a page should be
297 * included in the REM notification or not.
298 */
299 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
300 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
301 {
302 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
303
304 if (GCPhysStart & PAGE_OFFSET_MASK)
305 {
306 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
307 if ( pPage
308 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
309 {
310 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
311 if ( GCPhys > GCPhysLast
312 || GCPhys < GCPhysStart)
313 return;
314 GCPhysStart = GCPhys;
315 }
316 else
317 GCPhysStart &= X86_PTE_PAE_PG_MASK;
318 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
319 }
320
321 if (GCPhysLast & PAGE_OFFSET_MASK)
322 {
323 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
324 if ( pPage
325 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
326 {
327 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
328 if ( GCPhys < GCPhysStart
329 || GCPhys > GCPhysLast)
330 return;
331 GCPhysLast = GCPhys;
332 }
333 else
334 GCPhysLast |= PAGE_OFFSET_MASK;
335 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
336 }
337 }
338
339 /*
340 * Tell REM.
341 */
342 const bool fRestoreAsRAM = pCur->pfnHandlerR3
343 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
344#ifndef IN_RING3
345 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
346#else
347 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
348#endif
349}
350
351
352/**
353 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
354 * edge pages.
355 */
356DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVM pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
357{
358 /*
359 * Look for other handlers.
360 */
361 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
362 for (;;)
363 {
364 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
365 if ( !pCur
366 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
367 break;
368 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
369 uState = RT_MAX(uState, uThisState);
370
371 /* next? */
372 RTGCPHYS GCPhysNext = fAbove
373 ? pCur->Core.KeyLast + 1
374 : pCur->Core.Key - 1;
375 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
376 break;
377 GCPhys = GCPhysNext;
378 }
379
380 /*
381 * Update if we found something that is a higher priority
382 * state than the current.
383 */
384 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
385 {
386 PPGMPAGE pPage;
387 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
388 if ( RT_SUCCESS(rc)
389 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
390 {
391 /* This should normally not be necessary. */
392 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
393 bool fFlushTLBs ;
394 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
395 if (RT_SUCCESS(rc) && fFlushTLBs)
396 PGM_INVL_ALL_VCPU_TLBS(pVM);
397 else
398 AssertRC(rc);
399 }
400 else
401 AssertRC(rc);
402 }
403}
404
405
406/**
407 * Resets an aliased page.
408 *
409 * @param pVM The VM.
410 * @param pPage The page.
411 * @param GCPhysPage The page address in case it comes in handy.
412 * @param fDoAccounting Whether to perform accounting. (Only set during
413 * reset where pgmR3PhysRamReset doesn't have the
414 * handler structure handy.)
415 */
416void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, bool fDoAccounting)
417{
418 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO);
419 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
420
421 /*
422 * Flush any shadow page table references *first*.
423 */
424 bool fFlushTLBs = false;
425 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
426 AssertLogRelRCReturnVoid(rc);
427# ifdef IN_RC
428 if (fFlushTLBs && rc != VINF_PGM_SYNC_CR3)
429 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM));
430# else
431 HWACCMFlushTLBOnAllVCpus(pVM);
432# endif
433
434 /*
435 * Make it an MMIO/Zero page.
436 */
437 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
438 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
439 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
440 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
441 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
442
443 /* Flush its TLB entry. */
444 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
445
446 /*
447 * Do accounting for pgmR3PhysRamReset.
448 */
449 if (fDoAccounting)
450 {
451 PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysPage);
452 if (RT_LIKELY(pHandler))
453 {
454 Assert(pHandler->cAliasedPages > 0);
455 pHandler->cAliasedPages--;
456 }
457 else
458 AssertFailed();
459 }
460}
461
462
463/**
464 * Resets ram range flags.
465 *
466 * @returns VBox status code.
467 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
468 * @param pVM The VM handle.
469 * @param pCur The physical handler.
470 *
471 * @remark We don't start messing with the shadow page tables, as we've
472 * already got code in Trap0e which deals with out of sync handler
473 * flags (originally conceived for global pages).
474 */
475static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
476{
477 /*
478 * Iterate the guest ram pages updating the state.
479 */
480 RTUINT cPages = pCur->cPages;
481 RTGCPHYS GCPhys = pCur->Core.Key;
482 PPGMRAMRANGE pRamHint = NULL;
483 for (;;)
484 {
485 PPGMPAGE pPage;
486 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
487 if (RT_SUCCESS(rc))
488 {
489 /* Reset MMIO2 for MMIO pages to MMIO, since this aliasing is our business.
490 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
491 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
492 {
493 Assert(pCur->cAliasedPages > 0);
494 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, false /*fDoAccounting*/);
495 pCur->cAliasedPages--;
496 }
497 AssertMsg(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO || PGM_PAGE_IS_MMIO(pPage), ("%RGp %R[pgmpage]\n", GCPhys, pPage));
498 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
499 }
500 else
501 AssertRC(rc);
502
503 /* next */
504 if (--cPages == 0)
505 break;
506 GCPhys += PAGE_SIZE;
507 }
508
509 pCur->cAliasedPages = 0;
510 pCur->cTmpOffPages = 0;
511
512 /*
513 * Check for partial start and end pages.
514 */
515 if (pCur->Core.Key & PAGE_OFFSET_MASK)
516 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
517 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_OFFSET_MASK)
518 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
519}
520
521
522/**
523 * Modify a physical page access handler.
524 *
525 * Modification can only be done to the range it self, not the type or anything else.
526 *
527 * @returns VBox status code.
528 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
529 * and a new registration must be performed!
530 * @param pVM VM handle.
531 * @param GCPhysCurrent Current location.
532 * @param GCPhys New location.
533 * @param GCPhysLast New last location.
534 */
535VMMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
536{
537 /*
538 * Remove it.
539 */
540 int rc;
541 pgmLock(pVM);
542 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
543 if (pCur)
544 {
545 /*
546 * Clear the ram flags. (We're gonna move or free it!)
547 */
548 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
549 const bool fRestoreAsRAM = pCur->pfnHandlerR3
550 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
551
552 /*
553 * Validate the new range, modify and reinsert.
554 */
555 if (GCPhysLast >= GCPhys)
556 {
557 /*
558 * We require the range to be within registered ram.
559 * There is no apparent need to support ranges which cover more than one ram range.
560 */
561 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
562 if ( pRam
563 && GCPhys <= pRam->GCPhysLast
564 && GCPhysLast >= pRam->GCPhys)
565 {
566 pCur->Core.Key = GCPhys;
567 pCur->Core.KeyLast = GCPhysLast;
568 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
569
570 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
571 {
572 PGMPHYSHANDLERTYPE enmType = pCur->enmType;
573 RTGCPHYS cb = GCPhysLast - GCPhys + 1;
574 bool fHasHCHandler = !!pCur->pfnHandlerR3;
575
576 /*
577 * Set ram flags, flush shadow PT entries and finally tell REM about this.
578 */
579 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
580 pgmUnlock(pVM);
581
582#ifndef IN_RING3
583 REMNotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys, cb,
584 fHasHCHandler, fRestoreAsRAM);
585#else
586 REMR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysCurrent, GCPhys, cb,
587 fHasHCHandler, fRestoreAsRAM);
588#endif
589 PGM_INVL_ALL_VCPU_TLBS(pVM);
590 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
591 GCPhysCurrent, GCPhys, GCPhysLast));
592 return VINF_SUCCESS;
593 }
594
595 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
596 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
597 }
598 else
599 {
600 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
601 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
602 }
603 }
604 else
605 {
606 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
607 rc = VERR_INVALID_PARAMETER;
608 }
609
610 /*
611 * Invalid new location, flush the cache and free it.
612 * We've only gotta notify REM and free the memory.
613 */
614 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
615 pVM->pgm.s.pLastPhysHandlerR0 = 0;
616 pVM->pgm.s.pLastPhysHandlerR3 = 0;
617 pVM->pgm.s.pLastPhysHandlerRC = 0;
618 MMHyperFree(pVM, pCur);
619 }
620 else
621 {
622 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
623 rc = VERR_PGM_HANDLER_NOT_FOUND;
624 }
625
626 pgmUnlock(pVM);
627 return rc;
628}
629
630
631/**
632 * Changes the callbacks associated with a physical access handler.
633 *
634 * @returns VBox status code.
635 * @param pVM VM Handle.
636 * @param GCPhys Start physical address.
637 * @param pfnHandlerR3 The R3 handler.
638 * @param pvUserR3 User argument to the R3 handler.
639 * @param pfnHandlerR0 The R0 handler.
640 * @param pvUserR0 User argument to the R0 handler.
641 * @param pfnHandlerRC The RC handler.
642 * @param pvUserRC User argument to the RC handler. Values larger or
643 * equal to 0x10000 will be relocated automatically.
644 * @param pszDesc Pointer to description string. This must not be freed.
645 */
646VMMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
647 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
648 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
649 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC, RTRCPTR pvUserRC,
650 R3PTRTYPE(const char *) pszDesc)
651{
652 /*
653 * Get the handler.
654 */
655 int rc = VINF_SUCCESS;
656 pgmLock(pVM);
657 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
658 if (pCur)
659 {
660 /*
661 * Change callbacks.
662 */
663 pCur->pfnHandlerR3 = pfnHandlerR3;
664 pCur->pvUserR3 = pvUserR3;
665 pCur->pfnHandlerR0 = pfnHandlerR0;
666 pCur->pvUserR0 = pvUserR0;
667 pCur->pfnHandlerRC = pfnHandlerRC;
668 pCur->pvUserRC = pvUserRC;
669 pCur->pszDesc = pszDesc;
670 }
671 else
672 {
673 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
674 rc = VERR_PGM_HANDLER_NOT_FOUND;
675 }
676
677 pgmUnlock(pVM);
678 return rc;
679}
680
681
682/**
683 * Splits a physical access handler in two.
684 *
685 * @returns VBox status code.
686 * @param pVM VM Handle.
687 * @param GCPhys Start physical address of the handler.
688 * @param GCPhysSplit The split address.
689 */
690VMMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
691{
692 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
693
694 /*
695 * Do the allocation without owning the lock.
696 */
697 PPGMPHYSHANDLER pNew;
698 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
699 if (RT_FAILURE(rc))
700 return rc;
701
702 /*
703 * Get the handler.
704 */
705 pgmLock(pVM);
706 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
707 if (RT_LIKELY(pCur))
708 {
709 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
710 {
711 /*
712 * Create new handler node for the 2nd half.
713 */
714 *pNew = *pCur;
715 pNew->Core.Key = GCPhysSplit;
716 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
717
718 pCur->Core.KeyLast = GCPhysSplit - 1;
719 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
720
721 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
722 {
723 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
724 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
725 pgmUnlock(pVM);
726 return VINF_SUCCESS;
727 }
728 AssertMsgFailed(("whu?\n"));
729 rc = VERR_INTERNAL_ERROR;
730 }
731 else
732 {
733 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
734 rc = VERR_INVALID_PARAMETER;
735 }
736 }
737 else
738 {
739 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
740 rc = VERR_PGM_HANDLER_NOT_FOUND;
741 }
742 pgmUnlock(pVM);
743 MMHyperFree(pVM, pNew);
744 return rc;
745}
746
747
748/**
749 * Joins up two adjacent physical access handlers which has the same callbacks.
750 *
751 * @returns VBox status code.
752 * @param pVM VM Handle.
753 * @param GCPhys1 Start physical address of the first handler.
754 * @param GCPhys2 Start physical address of the second handler.
755 */
756VMMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
757{
758 /*
759 * Get the handlers.
760 */
761 int rc;
762 pgmLock(pVM);
763 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
764 if (RT_LIKELY(pCur1))
765 {
766 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
767 if (RT_LIKELY(pCur2))
768 {
769 /*
770 * Make sure that they are adjacent, and that they've got the same callbacks.
771 */
772 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
773 {
774 if (RT_LIKELY( pCur1->pfnHandlerRC == pCur2->pfnHandlerRC
775 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
776 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3))
777 {
778 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
779 if (RT_LIKELY(pCur3 == pCur2))
780 {
781 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
782 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
783 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
784 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
785 pVM->pgm.s.pLastPhysHandlerR0 = 0;
786 pVM->pgm.s.pLastPhysHandlerR3 = 0;
787 pVM->pgm.s.pLastPhysHandlerRC = 0;
788 MMHyperFree(pVM, pCur2);
789 pgmUnlock(pVM);
790 return VINF_SUCCESS;
791 }
792
793 Assert(pCur3 == pCur2);
794 rc = VERR_INTERNAL_ERROR;
795 }
796 else
797 {
798 AssertMsgFailed(("mismatching handlers\n"));
799 rc = VERR_ACCESS_DENIED;
800 }
801 }
802 else
803 {
804 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
805 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
806 rc = VERR_INVALID_PARAMETER;
807 }
808 }
809 else
810 {
811 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
812 rc = VERR_PGM_HANDLER_NOT_FOUND;
813 }
814 }
815 else
816 {
817 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
818 rc = VERR_PGM_HANDLER_NOT_FOUND;
819 }
820 pgmUnlock(pVM);
821 return rc;
822
823}
824
825
826/**
827 * Resets any modifications to individual pages in a physical page access
828 * handler region.
829 *
830 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
831 * PGMHandlerPhysicalPageAlias() or PGMHandlerPhysicalPageAliasHC().
832 *
833 * @returns VBox status code.
834 * @param pVM VM Handle
835 * @param GCPhys The start address of the handler regions, i.e. what you
836 * passed to PGMR3HandlerPhysicalRegister(),
837 * PGMHandlerPhysicalRegisterEx() or
838 * PGMHandlerPhysicalModify().
839 */
840VMMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
841{
842 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
843 pgmLock(pVM);
844
845 /*
846 * Find the handler.
847 */
848 int rc;
849 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
850 if (RT_LIKELY(pCur))
851 {
852 /*
853 * Validate type.
854 */
855 switch (pCur->enmType)
856 {
857 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
858 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
859 case PGMPHYSHANDLERTYPE_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
860 {
861 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */
862 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
863 Assert(pRam);
864 Assert(pRam->GCPhys <= pCur->Core.Key);
865 Assert(pRam->GCPhysLast >= pCur->Core.KeyLast);
866
867 if (pCur->enmType == PGMPHYSHANDLERTYPE_MMIO)
868 {
869 /*
870 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
871 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
872 * to do that now...
873 */
874 if (pCur->cAliasedPages)
875 {
876 PPGMPAGE pPage = &pRam->aPages[(pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT];
877 uint32_t cLeft = pCur->cPages;
878 while (cLeft-- > 0)
879 {
880 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
881 {
882 Assert(pCur->cAliasedPages > 0);
883 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)cLeft << PAGE_SHIFT),
884 false /*fDoAccounting*/);
885 --pCur->cAliasedPages;
886#ifndef VBOX_STRICT
887 if (pCur->cAliasedPages == 0)
888 break;
889#endif
890 }
891 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
892 pPage++;
893 }
894 Assert(pCur->cAliasedPages == 0);
895 }
896 }
897 else if (pCur->cTmpOffPages > 0)
898 {
899 /*
900 * Set the flags and flush shadow PT entries.
901 */
902 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
903 }
904
905 pCur->cAliasedPages = 0;
906 pCur->cTmpOffPages = 0;
907
908 rc = VINF_SUCCESS;
909 break;
910 }
911
912 /*
913 * Invalid.
914 */
915 default:
916 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
917 rc = VERR_INTERNAL_ERROR;
918 break;
919 }
920 }
921 else
922 {
923 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
924 rc = VERR_PGM_HANDLER_NOT_FOUND;
925 }
926
927 pgmUnlock(pVM);
928 return rc;
929}
930
931
932/**
933 * Temporarily turns off the access monitoring of a page within a monitored
934 * physical write/all page access handler region.
935 *
936 * Use this when no further \#PFs are required for that page. Be aware that
937 * a page directory sync might reset the flags, and turn on access monitoring
938 * for the page.
939 *
940 * The caller must do required page table modifications.
941 *
942 * @returns VBox status code.
943 * @param pVM VM Handle
944 * @param GCPhys The start address of the access handler. This
945 * must be a fully page aligned range or we risk
946 * messing up other handlers installed for the
947 * start and end pages.
948 * @param GCPhysPage The physical address of the page to turn off
949 * access monitoring for.
950 */
951VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
952{
953 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
954
955 pgmLock(pVM);
956 /*
957 * Validate the range.
958 */
959 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
960 if (RT_LIKELY(pCur))
961 {
962 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
963 && GCPhysPage <= pCur->Core.KeyLast))
964 {
965 Assert(!(pCur->Core.Key & PAGE_OFFSET_MASK));
966 Assert((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
967
968 AssertReturnStmt( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
969 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL,
970 pgmUnlock(pVM), VERR_ACCESS_DENIED);
971
972 /*
973 * Change the page status.
974 */
975 PPGMPAGE pPage;
976 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
977 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
978 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
979 {
980 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
981 pCur->cTmpOffPages++;
982 }
983 pgmUnlock(pVM);
984 return VINF_SUCCESS;
985 }
986 pgmUnlock(pVM);
987 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
988 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
989 return VERR_INVALID_PARAMETER;
990 }
991 pgmUnlock(pVM);
992 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
993 return VERR_PGM_HANDLER_NOT_FOUND;
994}
995
996
997/**
998 * Replaces an MMIO page with an MMIO2 page.
999 *
1000 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1001 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1002 * backing, the caller must provide a replacement page. For various reasons the
1003 * replacement page must be an MMIO2 page.
1004 *
1005 * The caller must do required page table modifications. You can get away
1006 * without making any modifications since it's an MMIO page, the cost is an extra
1007 * \#PF which will the resync the page.
1008 *
1009 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1010 *
1011 * The caller may still get handler callback even after this call and must be
1012 * able to deal correctly with such calls. The reason for these callbacks are
1013 * either that we're executing in the recompiler (which doesn't know about this
1014 * arrangement) or that we've been restored from saved state (where we won't
1015 * save the change).
1016 *
1017 * @returns VBox status code.
1018 * @param pVM The VM handle
1019 * @param GCPhys The start address of the access handler. This
1020 * must be a fully page aligned range or we risk
1021 * messing up other handlers installed for the
1022 * start and end pages.
1023 * @param GCPhysPage The physical address of the page to turn off
1024 * access monitoring for.
1025 * @param GCPhysPageRemap The physical address of the MMIO2 page that
1026 * serves as backing memory.
1027 *
1028 * @remark May cause a page pool flush if used on a page that is already
1029 * aliased.
1030 *
1031 * @note This trick does only work reliably if the two pages are never ever
1032 * mapped in the same page table. If they are the page pool code will
1033 * be confused should either of them be flushed. See the special case
1034 * of zero page aliasing mentioned in #3170.
1035 *
1036 */
1037VMMDECL(int) PGMHandlerPhysicalPageAlias(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTGCPHYS GCPhysPageRemap)
1038{
1039/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1040
1041 pgmLock(pVM);
1042 /*
1043 * Lookup and validate the range.
1044 */
1045 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1046 if (RT_LIKELY(pCur))
1047 {
1048 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1049 && GCPhysPage <= pCur->Core.KeyLast))
1050 {
1051 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
1052 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1053 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1054
1055 /*
1056 * Get and validate the two pages.
1057 */
1058 PPGMPAGE pPageRemap;
1059 int rc = pgmPhysGetPageEx(pVM, GCPhysPageRemap, &pPageRemap);
1060 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1061 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1062 ("GCPhysPageRemap=%RGp %R[pgmpage]\n", GCPhysPageRemap, pPageRemap),
1063 pgmUnlock(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1064
1065 PPGMPAGE pPage;
1066 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1067 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1068 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1069 {
1070 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1071 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1072 VERR_PGM_PHYS_NOT_MMIO2);
1073 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1074 {
1075 pgmUnlock(pVM);
1076 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1077 }
1078
1079 /*
1080 * The page is already mapped as some other page, reset it
1081 * to an MMIO/ZERO page before doing the new mapping.
1082 */
1083 Log(("PGMHandlerPhysicalPageAlias: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1084 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1085 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, false /*fDoAccounting*/);
1086 pCur->cAliasedPages--;
1087 }
1088 Assert(PGM_PAGE_IS_ZERO(pPage));
1089
1090 /*
1091 * Do the actual remapping here.
1092 * This page now serves as an alias for the backing memory specified.
1093 */
1094 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RGp (%R[pgmpage])\n",
1095 GCPhysPage, pPage, GCPhysPageRemap, pPageRemap ));
1096 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1097 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1098 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1099 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1100 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1101 pCur->cAliasedPages++;
1102 Assert(pCur->cAliasedPages <= pCur->cPages);
1103
1104 /* Flush its TLB entry. */
1105 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1106
1107 LogFlow(("PGMHandlerPhysicalPageAlias: => %R[pgmpage]\n", pPage));
1108 pgmUnlock(pVM);
1109 return VINF_SUCCESS;
1110 }
1111
1112 pgmUnlock(pVM);
1113 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1114 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1115 return VERR_INVALID_PARAMETER;
1116 }
1117
1118 pgmUnlock(pVM);
1119 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1120 return VERR_PGM_HANDLER_NOT_FOUND;
1121}
1122
1123/**
1124 * Replaces an MMIO page with an arbitrary HC page.
1125 *
1126 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1127 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1128 * backing, the caller must provide a replacement page. For various reasons the
1129 * replacement page must be an MMIO2 page.
1130 *
1131 * The caller must do required page table modifications. You can get away
1132 * without making any modifications since it's an MMIO page, the cost is an extra
1133 * \#PF which will the resync the page.
1134 *
1135 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1136 *
1137 * The caller may still get handler callback even after this call and must be
1138 * able to deal correctly with such calls. The reason for these callbacks are
1139 * either that we're executing in the recompiler (which doesn't know about this
1140 * arrangement) or that we've been restored from saved state (where we won't
1141 * save the change).
1142 *
1143 * @returns VBox status code.
1144 * @param pVM The VM handle
1145 * @param GCPhys The start address of the access handler. This
1146 * must be a fully page aligned range or we risk
1147 * messing up other handlers installed for the
1148 * start and end pages.
1149 * @param GCPhysPage The physical address of the page to turn off
1150 * access monitoring for.
1151 * @param HCPhysPageRemap The physical address of the HC page that
1152 * serves as backing memory.
1153 *
1154 * @remark May cause a page pool flush if used on a page that is already
1155 * aliased.
1156 */
1157VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1158{
1159/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1160
1161 /*
1162 * Lookup and validate the range.
1163 */
1164 pgmLock(pVM);
1165 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1166 if (RT_LIKELY(pCur))
1167 {
1168 if (RT_LIKELY( GCPhysPage >= pCur->Core.Key
1169 && GCPhysPage <= pCur->Core.KeyLast))
1170 {
1171 AssertReturnStmt(pCur->enmType == PGMPHYSHANDLERTYPE_MMIO, pgmUnlock(pVM), VERR_ACCESS_DENIED);
1172 AssertReturnStmt(!(pCur->Core.Key & PAGE_OFFSET_MASK), pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1173 AssertReturnStmt((pCur->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK, pgmUnlock(pVM), VERR_INVALID_PARAMETER);
1174
1175 /*
1176 * Get and validate the pages.
1177 */
1178 PPGMPAGE pPage;
1179 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1180 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc);
1181 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1182 {
1183 pgmUnlock(pVM);
1184 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1185 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1186 VERR_PGM_PHYS_NOT_MMIO2);
1187 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1188 }
1189 Assert(PGM_PAGE_IS_ZERO(pPage));
1190
1191 /*
1192 * Do the actual remapping here.
1193 * This page now serves as an alias for the backing memory specified.
1194 */
1195 LogFlow(("PGMHandlerPhysicalPageAlias: %RGp (%R[pgmpage]) alias for %RHp\n",
1196 GCPhysPage, pPage, HCPhysPageRemap));
1197 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1198 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1199 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1200 /** @todo hack alert
1201 * This needs to be done properly. Currently we get away with it as the recompiler directly calls
1202 * IOM read and write functions. Access through PGMPhysRead/Write will crash the process.
1203 */
1204 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1205 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1206 pCur->cAliasedPages++;
1207 Assert(pCur->cAliasedPages <= pCur->cPages);
1208
1209 /* Flush its TLB entry. */
1210 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1211
1212 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1213 pgmUnlock(pVM);
1214 return VINF_SUCCESS;
1215 }
1216 pgmUnlock(pVM);
1217 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n",
1218 GCPhysPage, pCur->Core.Key, pCur->Core.KeyLast));
1219 return VERR_INVALID_PARAMETER;
1220 }
1221 pgmUnlock(pVM);
1222
1223 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1224 return VERR_PGM_HANDLER_NOT_FOUND;
1225}
1226
1227
1228/**
1229 * Checks if a physical range is handled
1230 *
1231 * @returns boolean
1232 * @param pVM VM Handle.
1233 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1234 * @remarks Caller must take the PGM lock...
1235 * @thread EMT.
1236 */
1237VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys)
1238{
1239 /*
1240 * Find the handler.
1241 */
1242 pgmLock(pVM);
1243 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1244 if (pCur)
1245 {
1246 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1247 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1248 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1249 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO);
1250 pgmUnlock(pVM);
1251 return true;
1252 }
1253 pgmUnlock(pVM);
1254 return false;
1255}
1256
1257
1258/**
1259 * Checks if it's an disabled all access handler or write access handler at the
1260 * given address.
1261 *
1262 * @returns true if it's an all access handler, false if it's a write access
1263 * handler.
1264 * @param pVM Pointer to the shared VM structure.
1265 * @param GCPhys The address of the page with a disabled handler.
1266 *
1267 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
1268 */
1269bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys)
1270{
1271 pgmLock(pVM);
1272 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
1273 if (!pCur)
1274 {
1275 pgmUnlock(pVM);
1276 AssertFailed();
1277 return true;
1278 }
1279 Assert( pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
1280 || pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_ALL
1281 || pCur->enmType == PGMPHYSHANDLERTYPE_MMIO); /* sanity */
1282 /* Only whole pages can be disabled. */
1283 Assert( pCur->Core.Key <= (GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK)
1284 && pCur->Core.KeyLast >= (GCPhys | PAGE_OFFSET_MASK));
1285
1286 bool bRet = pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE;
1287 pgmUnlock(pVM);
1288 return bRet;
1289}
1290
1291
1292/**
1293 * Check if particular guest's VA is being monitored.
1294 *
1295 * @returns true or false
1296 * @param pVM VM handle.
1297 * @param GCPtr Virtual address.
1298 * @remarks Will acquire the PGM lock.
1299 * @thread Any.
1300 */
1301VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr)
1302{
1303 pgmLock(pVM);
1304 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, GCPtr);
1305 pgmUnlock(pVM);
1306
1307 return pCur != NULL;
1308}
1309
1310
1311/**
1312 * Search for virtual handler with matching physical address
1313 *
1314 * @returns VBox status code
1315 * @param pVM The VM handle.
1316 * @param GCPhys GC physical address to search for.
1317 * @param ppVirt Where to store the pointer to the virtual handler structure.
1318 * @param piPage Where to store the pointer to the index of the cached physical page.
1319 */
1320int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
1321{
1322 STAM_PROFILE_START(&pVM->pgm.s.CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1323 Assert(ppVirt);
1324
1325 pgmLock(pVM);
1326 PPGMPHYS2VIRTHANDLER pCur;
1327 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys);
1328 if (pCur)
1329 {
1330 /* found a match! */
1331 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1332 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
1333 pgmUnlock(pVM);
1334
1335#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1336 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
1337#endif
1338 LogFlow(("PHYS2VIRT: found match for %RGp -> %RGv *piPage=%#x\n", GCPhys, (*ppVirt)->Core.Key, *piPage));
1339 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1340 return VINF_SUCCESS;
1341 }
1342
1343 pgmUnlock(pVM);
1344 *ppVirt = NULL;
1345 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,VirtHandlerSearchByPhys), a);
1346 return VERR_PGM_HANDLER_NOT_FOUND;
1347}
1348
1349
1350/**
1351 * Deal with aliases in phys2virt.
1352 *
1353 * As pointed out by the various todos, this currently only deals with
1354 * aliases where the two ranges match 100%.
1355 *
1356 * @param pVM The VM handle.
1357 * @param pPhys2Virt The node we failed insert.
1358 */
1359static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
1360{
1361 /*
1362 * First find the node which is conflicting with us.
1363 */
1364 /** @todo Deal with partial overlapping. (Unlikely situation, so I'm too lazy to do anything about it now.) */
1365 /** @todo check if the current head node covers the ground we do. This is highly unlikely
1366 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
1367 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1368#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1369 AssertReleaseMsg(pHead != pPhys2Virt, ("%RGp-%RGp offVirtHandler=%#RX32\n",
1370 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
1371#endif
1372 if (RT_UNLIKELY(!pHead || pHead->Core.KeyLast != pPhys2Virt->Core.KeyLast))
1373 {
1374 /** @todo do something clever here... */
1375 LogRel(("pgmHandlerVirtualInsertAliased: %RGp-%RGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1376 pPhys2Virt->offNextAlias = 0;
1377 return;
1378 }
1379
1380 /*
1381 * Insert ourselves as the next node.
1382 */
1383 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1384 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
1385 else
1386 {
1387 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1388 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
1389 | PGMPHYS2VIRTHANDLER_IN_TREE;
1390 }
1391 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
1392 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1393 Log(("pgmHandlerVirtualInsertAliased: %RGp-%RGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1394}
1395
1396
1397/**
1398 * Resets one virtual handler range.
1399 *
1400 * This is called by HandlerVirtualUpdate when it has detected some kind of
1401 * problem and have started clearing the virtual handler page states (or
1402 * when there have been registration/deregistrations). For this reason this
1403 * function will only update the page status if it's lower than desired.
1404 *
1405 * @returns 0
1406 * @param pNode Pointer to a PGMVIRTHANDLER.
1407 * @param pvUser The VM handle.
1408 */
1409DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1410{
1411 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1412 PVM pVM = (PVM)pvUser;
1413
1414 PGM_LOCK_ASSERT_OWNER(pVM);
1415
1416 /*
1417 * Iterate the pages and apply the new state.
1418 */
1419 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1420 PPGMRAMRANGE pRamHint = NULL;
1421 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->Core.Key & PAGE_OFFSET_MASK);
1422 RTGCUINTPTR cbLeft = pCur->cb;
1423 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1424 {
1425 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1426 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
1427 {
1428 /*
1429 * Update the page state wrt virtual handlers.
1430 */
1431 PPGMPAGE pPage;
1432 int rc = pgmPhysGetPageWithHintEx(pVM, pPhys2Virt->Core.Key, &pPage, &pRamHint);
1433 if ( RT_SUCCESS(rc)
1434 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1435 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, uState);
1436 else
1437 AssertRC(rc);
1438
1439 /*
1440 * Need to insert the page in the Phys2Virt lookup tree?
1441 */
1442 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
1443 {
1444#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1445 AssertRelease(!pPhys2Virt->offNextAlias);
1446#endif
1447 unsigned cbPhys = cbLeft;
1448 if (cbPhys > PAGE_SIZE - offPage)
1449 cbPhys = PAGE_SIZE - offPage;
1450 else
1451 Assert(iPage == pCur->cPages - 1);
1452 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1453 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1454 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1455 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1456#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1457 else
1458 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1459 ("%RGp-%RGp offNextAlias=%#RX32\n",
1460 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1461#endif
1462 Log2(("PHYS2VIRT: Insert physical range %RGp-%RGp offNextAlias=%#RX32 %s\n",
1463 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1464 }
1465 }
1466 cbLeft -= PAGE_SIZE - offPage;
1467 offPage = 0;
1468 }
1469
1470 return 0;
1471}
1472
1473#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
1474
1475/**
1476 * Worker for pgmHandlerVirtualDumpPhysPages.
1477 *
1478 * @returns 0 (continue enumeration).
1479 * @param pNode The virtual handler node.
1480 * @param pvUser User argument, unused.
1481 */
1482static DECLCALLBACK(int) pgmHandlerVirtualDumpPhysPagesCallback(PAVLROGCPHYSNODECORE pNode, void *pvUser)
1483{
1484 PPGMPHYS2VIRTHANDLER pCur = (PPGMPHYS2VIRTHANDLER)pNode;
1485 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
1486 Log(("PHYS2VIRT: Range %RGp-%RGp for virtual handler: %s\n", pCur->Core.Key, pCur->Core.KeyLast, pVirt->pszDesc));
1487 return 0;
1488}
1489
1490
1491/**
1492 * Assertion / logging helper for dumping all the
1493 * virtual handlers to the log.
1494 *
1495 * @param pVM Pointer to the shared VM structure.
1496 */
1497void pgmHandlerVirtualDumpPhysPages(PVM pVM)
1498{
1499 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */,
1500 pgmHandlerVirtualDumpPhysPagesCallback, 0);
1501}
1502
1503#endif /* VBOX_STRICT || LOG_ENABLED */
1504#ifdef VBOX_STRICT
1505
1506/**
1507 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
1508 * and its AVL enumerators.
1509 */
1510typedef struct PGMAHAFIS
1511{
1512 /** The current physical address. */
1513 RTGCPHYS GCPhys;
1514 /** The state we've calculated. */
1515 unsigned uVirtStateFound;
1516 /** The state we're matching up to. */
1517 unsigned uVirtState;
1518 /** Number of errors. */
1519 unsigned cErrors;
1520 /** The VM handle. */
1521 PVM pVM;
1522} PGMAHAFIS, *PPGMAHAFIS;
1523
1524
1525#if 0 /* unused */
1526/**
1527 * Verify virtual handler by matching physical address.
1528 *
1529 * @returns 0
1530 * @param pNode Pointer to a PGMVIRTHANDLER.
1531 * @param pvUser Pointer to user parameter.
1532 */
1533static DECLCALLBACK(int) pgmHandlerVirtualVerifyOneByPhysAddr(PAVLROGCPTRNODECORE pNode, void *pvUser)
1534{
1535 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
1536 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1537
1538 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
1539 {
1540 if ((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == pState->GCPhys)
1541 {
1542 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1543 if (pState->uVirtState < uState)
1544 {
1545 error
1546 }
1547
1548 if (pState->uVirtState == uState)
1549 break; //??
1550 }
1551 }
1552 return 0;
1553}
1554#endif /* unused */
1555
1556
1557/**
1558 * Verify a virtual handler (enumeration callback).
1559 *
1560 * Called by PGMAssertHandlerAndFlagsInSync to check the sanity of all
1561 * the virtual handlers, esp. that the physical addresses matches up.
1562 *
1563 * @returns 0
1564 * @param pNode Pointer to a PGMVIRTHANDLER.
1565 * @param pvUser Pointer to a PPGMAHAFIS structure.
1566 */
1567static DECLCALLBACK(int) pgmHandlerVirtualVerifyOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
1568{
1569 PPGMVIRTHANDLER pVirt = (PPGMVIRTHANDLER)pNode;
1570 PPGMAHAFIS pState = (PPGMAHAFIS)pvUser;
1571 PVM pVM = pState->pVM;
1572
1573 /*
1574 * Validate the type and calc state.
1575 */
1576 switch (pVirt->enmType)
1577 {
1578 case PGMVIRTHANDLERTYPE_WRITE:
1579 case PGMVIRTHANDLERTYPE_ALL:
1580 break;
1581 default:
1582 AssertMsgFailed(("unknown/wrong enmType=%d\n", pVirt->enmType));
1583 pState->cErrors++;
1584 return 0;
1585 }
1586 const unsigned uState = pgmHandlerVirtualCalcState(pVirt);
1587
1588 /*
1589 * Check key alignment.
1590 */
1591 if ( (pVirt->aPhysToVirt[0].Core.Key & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.Key & PAGE_OFFSET_MASK)
1592 && pVirt->aPhysToVirt[0].Core.Key != NIL_RTGCPHYS)
1593 {
1594 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1595 pVirt->aPhysToVirt[0].Core.Key, pVirt->Core.Key, R3STRING(pVirt->pszDesc)));
1596 pState->cErrors++;
1597 }
1598
1599 if ( (pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast & PAGE_OFFSET_MASK) != ((RTGCUINTPTR)pVirt->Core.KeyLast & PAGE_OFFSET_MASK)
1600 && pVirt->aPhysToVirt[pVirt->cPages - 1].Core.Key != NIL_RTGCPHYS)
1601 {
1602 AssertMsgFailed(("virt handler phys has incorrect key! %RGp %RGv %s\n",
1603 pVirt->aPhysToVirt[pVirt->cPages - 1].Core.KeyLast, pVirt->Core.KeyLast, R3STRING(pVirt->pszDesc)));
1604 pState->cErrors++;
1605 }
1606
1607 /*
1608 * Check pages for sanity and state.
1609 */
1610 RTGCUINTPTR GCPtr = (RTGCUINTPTR)pVirt->Core.Key;
1611 for (unsigned iPage = 0; iPage < pVirt->cPages; iPage++, GCPtr += PAGE_SIZE)
1612 {
1613 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1614 {
1615 PVMCPU pVCpu = &pVM->aCpus[i];
1616
1617 RTGCPHYS GCPhysGst;
1618 uint64_t fGst;
1619 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, &fGst, &GCPhysGst);
1620 if ( rc == VERR_PAGE_NOT_PRESENT
1621 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1622 {
1623 if (pVirt->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS)
1624 {
1625 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysNew=~0 iPage=%#x %RGv %s\n",
1626 pVirt->aPhysToVirt[iPage].Core.Key, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1627 pState->cErrors++;
1628 }
1629 continue;
1630 }
1631
1632 AssertRCReturn(rc, 0);
1633 if ((pVirt->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) != GCPhysGst)
1634 {
1635 AssertMsgFailed(("virt handler phys out of sync. %RGp GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1636 pVirt->aPhysToVirt[iPage].Core.Key, GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1637 pState->cErrors++;
1638 continue;
1639 }
1640
1641 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysGst);
1642 if (!pPage)
1643 {
1644 AssertMsgFailed(("virt handler getting ram flags. GCPhysGst=%RGp iPage=%#x %RGv %s\n",
1645 GCPhysGst, iPage, GCPtr, R3STRING(pVirt->pszDesc)));
1646 pState->cErrors++;
1647 continue;
1648 }
1649
1650 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState)
1651 {
1652 AssertMsgFailed(("virt handler state mismatch. pPage=%R[pgmpage] GCPhysGst=%RGp iPage=%#x %RGv state=%d expected>=%d %s\n",
1653 pPage, GCPhysGst, iPage, GCPtr, PGM_PAGE_GET_HNDL_VIRT_STATE(pPage), uState, R3STRING(pVirt->pszDesc)));
1654 pState->cErrors++;
1655 continue;
1656 }
1657 } /* for each VCPU */
1658 } /* for pages in virtual mapping. */
1659
1660 return 0;
1661}
1662
1663
1664/**
1665 * Asserts that the handlers+guest-page-tables == ramrange-flags and
1666 * that the physical addresses associated with virtual handlers are correct.
1667 *
1668 * @returns Number of mismatches.
1669 * @param pVM The VM handle.
1670 */
1671VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVM pVM)
1672{
1673 PPGM pPGM = &pVM->pgm.s;
1674 PGMAHAFIS State;
1675 State.GCPhys = 0;
1676 State.uVirtState = 0;
1677 State.uVirtStateFound = 0;
1678 State.cErrors = 0;
1679 State.pVM = pVM;
1680
1681 PGM_LOCK_ASSERT_OWNER(pVM);
1682
1683 /*
1684 * Check the RAM flags against the handlers.
1685 */
1686 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
1687 {
1688 const uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1689 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1690 {
1691 PGMPAGE const *pPage = &pRam->aPages[iPage];
1692 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
1693 {
1694 State.GCPhys = pRam->GCPhys + (iPage << PAGE_SHIFT);
1695
1696 /*
1697 * Physical first - calculate the state based on the handlers
1698 * active on the page, then compare.
1699 */
1700 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
1701 {
1702 /* the first */
1703 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys);
1704 if (!pPhys)
1705 {
1706 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true);
1707 if ( pPhys
1708 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1))
1709 pPhys = NULL;
1710 Assert(!pPhys || pPhys->Core.Key >= State.GCPhys);
1711 }
1712 if (pPhys)
1713 {
1714 unsigned uState = pgmHandlerPhysicalCalcState(pPhys);
1715
1716 /* more? */
1717 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK))
1718 {
1719 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers,
1720 pPhys->Core.KeyLast + 1, true);
1721 if ( !pPhys2
1722 || pPhys2->Core.Key > (State.GCPhys | PAGE_OFFSET_MASK))
1723 break;
1724 unsigned uState2 = pgmHandlerPhysicalCalcState(pPhys2);
1725 uState = RT_MAX(uState, uState2);
1726 pPhys = pPhys2;
1727 }
1728
1729 /* compare.*/
1730 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
1731 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1732 {
1733 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
1734 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhys->pszDesc));
1735 State.cErrors++;
1736 }
1737
1738#ifdef IN_RING3
1739 /* validate that REM is handling it. */
1740 if ( !REMR3IsPageAccessHandled(pVM, State.GCPhys)
1741 /* ignore shadowed ROM for the time being. */
1742 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW)
1743 {
1744 AssertMsgFailed(("ram range vs phys handler REM mismatch. GCPhys=%RGp state=%d %s\n",
1745 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), pPhys->pszDesc));
1746 State.cErrors++;
1747 }
1748#endif
1749 }
1750 else
1751 {
1752 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
1753 State.cErrors++;
1754 }
1755 }
1756
1757 /*
1758 * Virtual handlers.
1759 */
1760 if (PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage))
1761 {
1762 State.uVirtState = PGM_PAGE_GET_HNDL_VIRT_STATE(pPage);
1763#if 1
1764 /* locate all the matching physical ranges. */
1765 State.uVirtStateFound = PGM_PAGE_HNDL_VIRT_STATE_NONE;
1766 RTGCPHYS GCPhysKey = State.GCPhys;
1767 for (;;)
1768 {
1769 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1770 GCPhysKey, true /* above-or-equal */);
1771 if ( !pPhys2Virt
1772 || (pPhys2Virt->Core.Key & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1773 break;
1774
1775 /* the head */
1776 GCPhysKey = pPhys2Virt->Core.KeyLast;
1777 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1778 unsigned uState = pgmHandlerVirtualCalcState(pCur);
1779 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1780
1781 /* any aliases */
1782 while (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1783 {
1784 pPhys2Virt = (PPGMPHYS2VIRTHANDLER)((uintptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1785 pCur = (PPGMVIRTHANDLER)((uintptr_t)pPhys2Virt + pPhys2Virt->offVirtHandler);
1786 uState = pgmHandlerVirtualCalcState(pCur);
1787 State.uVirtStateFound = RT_MAX(State.uVirtStateFound, uState);
1788 }
1789
1790 /* done? */
1791 if ((GCPhysKey & X86_PTE_PAE_PG_MASK) != State.GCPhys)
1792 break;
1793 }
1794#else
1795 /* very slow */
1796 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);
1797#endif
1798 if (State.uVirtState != State.uVirtStateFound)
1799 {
1800 AssertMsgFailed(("ram range vs virt handler flags mismatch. GCPhys=%RGp uVirtState=%#x uVirtStateFound=%#x\n",
1801 State.GCPhys, State.uVirtState, State.uVirtStateFound));
1802 State.cErrors++;
1803 }
1804 }
1805 }
1806 } /* foreach page in ram range. */
1807 } /* foreach ram range. */
1808
1809 /*
1810 * Check that the physical addresses of the virtual handlers matches up
1811 * and that they are otherwise sane.
1812 */
1813 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);
1814
1815 /*
1816 * Do the reverse check for physical handlers.
1817 */
1818 /** @todo */
1819
1820 return State.cErrors;
1821}
1822
1823#endif /* VBOX_STRICT */
1824
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette