VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 6902

Last change on this file since 6902 was 6902, checked in by vboxsync, 17 years ago

Converted MM_RAM_FLAGS_PHYSICAL_HANDLER, _WRITE, _ALL and _TEMP_OFF into
a 2-bit state field (u2HandlerPhysStateX). I've tripple checked this change,
but if I overlooked something real odd stuff might happen...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 37.1 KB
Line 
1/* $Id: PGMAllHandler.cpp 6902 2008-02-11 16:51:52Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/dbgf.h>
24#include <VBox/pgm.h>
25#include <VBox/iom.h>
26#include <VBox/mm.h>
27#include <VBox/em.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/dbgf.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/string.h>
39#include <VBox/param.h>
40#include <VBox/err.h>
41#include <VBox/selm.h>
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam);
48static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur);
49static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur);
50
51
52
53/**
54 * Register a access handler for a physical range.
55 *
56 * @returns VBox status code.
57 * @retval VINF_SUCCESS when successfully installed.
58 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
59 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
60 * flagged together with a pool clearing.
61 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
62 * one. A debug assertion is raised.
63 *
64 * @param pVM VM Handle.
65 * @param enmType Handler type. Any of the PGMPHYSHANDLERTYPE_PHYSICAL* enums.
66 * @param GCPhys Start physical address.
67 * @param GCPhysLast Last physical address. (inclusive)
68 * @param pfnHandlerR3 The R3 handler.
69 * @param pvUserR3 User argument to the R3 handler.
70 * @param pfnHandlerR0 The R0 handler.
71 * @param pvUserR0 User argument to the R0 handler.
72 * @param pfnHandlerGC The GC handler.
73 * @param pvUserGC User argument to the GC handler.
74 * This must be a GC pointer because it will be relocated!
75 * @param pszDesc Pointer to description string. This must not be freed.
76 */
77PGMDECL(int) PGMHandlerPhysicalRegisterEx(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
78 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
79 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
80 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
81 R3PTRTYPE(const char *) pszDesc)
82{
83 Log(("PGMHandlerPhysicalRegisterEx: enmType=%d GCPhys=%VGp GCPhysLast=%VGp pfnHandlerR3=%VHv pvUserR3=%VHv pfnHandlerR0=%VHv pvUserR0=%VHv pfnHandlerGC=%VGv pvUserGC=%VGv pszDesc=%s\n",
84 enmType, GCPhys, GCPhysLast, pfnHandlerR3, pvUserR3, pfnHandlerR0, pvUserR0, pfnHandlerGC, pvUserGC, HCSTRING(pszDesc)));
85
86 /*
87 * Validate input.
88 */
89 if (GCPhys >= GCPhysLast)
90 {
91 AssertMsgFailed(("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast));
92 return VERR_INVALID_PARAMETER;
93 }
94 switch (enmType)
95 {
96 case PGMPHYSHANDLERTYPE_MMIO:
97 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
98 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
99 break;
100 default:
101 AssertMsgFailed(("Invalid input enmType=%d!\n", enmType));
102 return VERR_INVALID_PARAMETER;
103 }
104 if ( (RTGCUINTPTR)pvUserGC >= 0x10000
105 && MMHyperHC2GC(pVM, MMHyperGC2HC(pVM, pvUserGC)) != pvUserGC)
106 {
107 AssertMsgFailed(("Not GC pointer! pvUserGC=%VGv\n", pvUserGC));
108 return VERR_INVALID_PARAMETER;
109 }
110 AssertReturn(pfnHandlerR3 || pfnHandlerR0 || pfnHandlerGC, VERR_INVALID_PARAMETER);
111
112 /*
113 * We require the range to be within registered ram.
114 * There is no apparent need to support ranges which cover more than one ram range.
115 */
116 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
117 while (pRam && GCPhys > pRam->GCPhysLast)
118 pRam = CTXALLSUFF(pRam->pNext);
119 if ( !pRam
120 || GCPhysLast < pRam->GCPhys
121 || GCPhys > pRam->GCPhysLast)
122 {
123#ifdef IN_RING3
124 /*
125 * If this is an MMIO registration, we'll just add a range for it.
126 */
127 if ( enmType == PGMPHYSHANDLERTYPE_MMIO
128 && ( !pRam
129 || GCPhysLast < pRam->GCPhys)
130 )
131 {
132 size_t cb = GCPhysLast - GCPhys + 1;
133 Assert(cb == RT_ALIGN_Z(cb, PAGE_SIZE));
134 int rc = PGMR3PhysRegister(pVM, NULL, GCPhys, cb, MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO, NULL, pszDesc);
135 if (VBOX_FAILURE(rc))
136 return rc;
137
138 /* search again. */
139 pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
140 while (pRam && GCPhys > pRam->GCPhysLast)
141 pRam = CTXALLSUFF(pRam->pNext);
142 }
143
144 if ( !pRam
145 || GCPhysLast < pRam->GCPhys
146 || GCPhys > pRam->GCPhysLast)
147#endif /* IN_RING3 */
148 {
149#ifdef IN_RING3
150 DBGFR3Info(pVM, "phys", NULL, NULL);
151#endif
152 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
153 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
154 }
155 }
156
157 /*
158 * Allocate and initialize the new entry.
159 */
160 PPGMPHYSHANDLER pNew;
161 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
162 if (VBOX_FAILURE(rc))
163 return rc;
164
165 pNew->Core.Key = GCPhys;
166 pNew->Core.KeyLast = GCPhysLast;
167 pNew->enmType = enmType;
168 pNew->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
169 pNew->pfnHandlerR3 = pfnHandlerR3;
170 pNew->pvUserR3 = pvUserR3;
171 pNew->pfnHandlerR0 = pfnHandlerR0;
172 pNew->pvUserR0 = pvUserR0;
173 pNew->pfnHandlerGC = pfnHandlerGC;
174 pNew->pvUserGC = pvUserGC;
175 pNew->pszDesc = pszDesc;
176
177 pgmLock(pVM);
178
179 /*
180 * Try insert into list.
181 */
182 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
183 {
184 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam);
185 if (rc == VINF_PGM_GCPHYS_ALIASED)
186 {
187 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
188 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
189 }
190 pVM->pgm.s.fPhysCacheFlushPending = true;
191#ifndef IN_RING3
192 REMNotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
193#else
194 REMR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, GCPhysLast - GCPhys + 1, !!pfnHandlerR3);
195#endif
196 pgmUnlock(pVM);
197 if (rc != VINF_SUCCESS)
198 Log(("PGMHandlerPhysicalRegisterEx: returns %Vrc (%VGp-%VGp)\n", rc, GCPhys, GCPhysLast));
199 return rc;
200 }
201
202 pgmUnlock(pVM);
203
204#if defined(IN_RING3) && defined(VBOX_STRICT)
205 DBGFR3Info(pVM, "handlers", "phys nostats", NULL);
206#endif
207 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp pszDesc=%s\n", GCPhys, GCPhysLast, pszDesc));
208 MMHyperFree(pVM, pNew);
209 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
210}
211
212
213/**
214 * Sets ram range flags and attempts updating shadow PTs.
215 *
216 * @returns VBox status code.
217 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
218 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
219 * the guest page aliased or/and mapped by multiple PTs.
220 * @param pVM The VM handle.
221 * @param pCur The physical handler.
222 * @param pRam The RAM range.
223 */
224static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVM pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam)
225{
226 /*
227 * Iterate the guest ram pages updating the flags and flushing PT entries
228 * mapping the page.
229 */
230 bool fFlushTLBs = false;
231#if defined(PGMPOOL_WITH_GCPHYS_TRACKING) || defined(PGMPOOL_WITH_CACHE)
232 int rc = VINF_SUCCESS;
233#else
234 const int rc = VINF_PGM_GCPHYS_ALIASED;
235#endif
236 const unsigned uState = pgmHandlerPhysicalCalcState(pCur);
237 RTUINT cPages = pCur->cPages;
238 RTUINT i = (pCur->Core.Key - pRam->GCPhys) >> PAGE_SHIFT;
239 for (;;)
240 {
241 /* Physical chunk in dynamically allocated range not present? */
242 if (RT_UNLIKELY(!PGM_PAGE_GET_HCPHYS(&pRam->aPages[i])))
243 {
244 RTGCPHYS GCPhys = pRam->GCPhys + (i << PAGE_SHIFT);
245#ifdef IN_RING3
246 int rc2 = pgmr3PhysGrowRange(pVM, GCPhys);
247#else
248 int rc2 = CTXALLMID(VMM, CallHost)(pVM, VMMCALLHOST_PGM_RAM_GROW_RANGE, GCPhys);
249#endif
250 if (rc2 != VINF_SUCCESS)
251 return rc2;
252 }
253
254 /* Only do upgrades. */
255 PPGMPAGE pPage = &pRam->aPages[i];
256 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
257 {
258 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
259 Assert(PGM_PAGE_GET_HCPHYS(pPage));
260
261#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
262 /* This code also makes ASSUMPTIONS about the cRefs and stuff. */
263 Assert(MM_RAM_FLAGS_IDX_SHIFT < MM_RAM_FLAGS_CREFS_SHIFT);
264 const uint16_t u16 = pRam->aPages[i].HCPhys >> MM_RAM_FLAGS_IDX_SHIFT; /** @todo PAGE FLAGS */
265 if (u16)
266 {
267 if ((u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) != MM_RAM_FLAGS_CREFS_PHYSEXT)
268 pgmPoolTrackFlushGCPhysPT(pVM,
269 pPage,
270 u16 & MM_RAM_FLAGS_IDX_MASK,
271 u16 >> (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT));
272 else if (u16 != ((MM_RAM_FLAGS_CREFS_PHYSEXT << (MM_RAM_FLAGS_CREFS_SHIFT - MM_RAM_FLAGS_IDX_SHIFT)) | MM_RAM_FLAGS_IDX_OVERFLOWED))
273 pgmPoolTrackFlushGCPhysPTs(pVM, pPage, u16 & MM_RAM_FLAGS_IDX_MASK);
274 else
275 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
276 fFlushTLBs = true;
277 }
278#elif defined(PGMPOOL_WITH_CACHE)
279 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPage);
280 fFlushTLBs = true;
281#endif
282 }
283
284 /* next */
285 if (--cPages == 0)
286 break;
287 i++;
288 }
289
290 if (fFlushTLBs && rc == VINF_SUCCESS)
291 {
292 PGM_INVL_GUEST_TLBS();
293 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs\n"));
294 }
295 else
296 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Vrc\n", rc));
297 return rc;
298}
299
300
301/**
302 * Register a physical page access handler.
303 *
304 * @returns VBox status code.
305 * @param pVM VM Handle.
306 * @param GCPhys Start physical address.
307 */
308PGMDECL(int) PGMHandlerPhysicalDeregister(PVM pVM, RTGCPHYS GCPhys)
309{
310 /*
311 * Find the handler.
312 */
313 pgmLock(pVM);
314 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
315 if (pCur)
316 {
317 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %#VGp-%#VGp %s\n",
318 pCur->Core.Key, pCur->Core.KeyLast, HCSTRING(pCur->pszDesc)));
319
320 /*
321 * Clear the page bits and notify the REM about this change.
322 */
323 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
324 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
325 pgmUnlock(pVM);
326 MMHyperFree(pVM, pCur);
327 return VINF_SUCCESS;
328 }
329 pgmUnlock(pVM);
330
331 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
332 return VERR_PGM_HANDLER_NOT_FOUND;
333}
334
335
336/**
337 * Shared code with modify.
338 */
339static void pgmHandlerPhysicalDeregisterNotifyREM(PVM pVM, PPGMPHYSHANDLER pCur)
340{
341 RTGCPHYS GCPhysStart = pCur->Core.Key;
342 RTGCPHYS GCPhysLast = pCur->Core.KeyLast;
343
344 /*
345 * Page align the range.
346 *
347 * Since we've reset (recalculated) the physical handler state of all pages
348 * we can make use of the page states to figure out whether a page should be
349 * included in the REM notification or not.
350 */
351 if ( (pCur->Core.Key & PAGE_OFFSET_MASK)
352 || ((pCur->Core.KeyLast + 1) & PAGE_OFFSET_MASK))
353 {
354 Assert(pCur->enmType != PGMPHYSHANDLERTYPE_MMIO);
355
356 if (GCPhysStart & PAGE_OFFSET_MASK)
357 {
358 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysStart);
359 if ( pPage
360 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
361 {
362 RTGCPHYS GCPhys = (GCPhysStart + (PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
363 if ( GCPhys > GCPhysLast
364 || GCPhys < GCPhysStart)
365 return;
366 GCPhysStart = GCPhys;
367 }
368 else
369 GCPhysStart &= X86_PTE_PAE_PG_MASK;
370 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
371 }
372
373 if (GCPhysLast & PAGE_OFFSET_MASK)
374 {
375 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysLast);
376 if ( pPage
377 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
378 {
379 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
380 if ( GCPhys < GCPhysStart
381 || GCPhys > GCPhysLast)
382 return;
383 GCPhysLast = GCPhys;
384 }
385 else
386 GCPhysLast |= PAGE_OFFSET_MASK;
387 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
388 }
389 }
390
391 /*
392 * Tell REM.
393 */
394 const bool fRestoreAsRAM = pCur->pfnHandlerR3
395 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
396#ifndef IN_RING3
397 REMNotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
398#else
399 REMR3NotifyHandlerPhysicalDeregister(pVM, pCur->enmType, GCPhysStart, GCPhysLast - GCPhysStart + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
400#endif
401}
402
403
404/**
405 * pgmHandlerPhysicalResetRamFlags helper that checks for
406 * other handlers on edge pages.
407 */
408DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PPGM pPGM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
409{
410 /*
411 * Look for other handlers.
412 */
413 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
414 for (;;)
415 {
416 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTXSUFF(pTrees)->PhysHandlers, GCPhys, fAbove);
417 if ( !pCur
418 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
419 break;
420 unsigned uThisState = pgmHandlerPhysicalCalcState(pCur);
421 uState = RT_MAX(uState, uThisState);
422
423 /* next? */
424 RTGCPHYS GCPhysNext = fAbove
425 ? pCur->Core.KeyLast + 1
426 : pCur->Core.Key - 1;
427 if ((GCPhysNext >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT))
428 break;
429 GCPhys = GCPhysNext;
430 }
431
432 /*
433 * Update if we found something that is a higher priority
434 * state than the current.
435 */
436 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
437 {
438 PPGMPAGE pPage;
439 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, ppRamHint);
440 if ( RT_SUCCESS(rc)
441 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
442 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState);
443 else
444 AssertRC(rc);
445 }
446}
447
448
449/**
450 * Resets ram range flags.
451 *
452 * @returns VBox status code.
453 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
454 * @param pVM The VM handle.
455 * @param pCur The physical handler.
456 *
457 * @remark We don't start messing with the shadow page tables, as we've already got code
458 * in Trap0e which deals with out of sync handler flags (originally conceived for
459 * global pages).
460 */
461static void pgmHandlerPhysicalResetRamFlags(PVM pVM, PPGMPHYSHANDLER pCur)
462{
463 /*
464 * Iterate the guest ram pages updating the state.
465 */
466 RTUINT cPages = pCur->cPages;
467 RTGCPHYS GCPhys = pCur->Core.Key;
468 PPGMRAMRANGE pRamHint = NULL;
469 PPGM pPGM = &pVM->pgm.s;
470 for (;;)
471 {
472 PPGMPAGE pPage;
473 int rc = pgmPhysGetPageWithHintEx(pPGM, GCPhys, &pPage, &pRamHint);
474 if (RT_SUCCESS(rc))
475 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE);
476 else
477 AssertRC(rc);
478
479 /* next */
480 if (--cPages == 0)
481 break;
482 GCPhys += PAGE_SIZE;
483 }
484
485 /*
486 * Check for partial start and end pages.
487 */
488 if (pCur->Core.Key & PAGE_OFFSET_MASK)
489 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.Key - 1, false /* fAbove */, &pRamHint);
490 if ((pCur->Core.KeyLast & PAGE_OFFSET_MASK) != PAGE_SIZE - 1)
491 pgmHandlerPhysicalRecalcPageState(pPGM, pCur->Core.KeyLast + 1, true /* fAbove */, &pRamHint);
492}
493
494
495/**
496 * Modify a physical page access handler.
497 *
498 * Modification can only be done to the range it self, not the type or anything else.
499 *
500 * @returns VBox status code.
501 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
502 * and a new registration must be performed!
503 * @param pVM VM handle.
504 * @param GCPhysCurrent Current location.
505 * @param GCPhys New location.
506 * @param GCPhysLast New last location.
507 */
508PGMDECL(int) PGMHandlerPhysicalModify(PVM pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
509{
510 /*
511 * Remove it.
512 */
513 int rc;
514 pgmLock(pVM);
515 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhysCurrent);
516 if (pCur)
517 {
518 /*
519 * Clear the ram flags. (We're gonna move or free it!)
520 */
521 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
522 const bool fRestoreAsRAM = pCur->pfnHandlerR3
523 && pCur->enmType != PGMPHYSHANDLERTYPE_MMIO; /** @todo this isn't entirely correct. */
524
525 /*
526 * Validate the new range, modify and reinsert.
527 */
528 if (GCPhysLast >= GCPhys)
529 {
530 /*
531 * We require the range to be within registered ram.
532 * There is no apparent need to support ranges which cover more than one ram range.
533 */
534 PPGMRAMRANGE pRam = CTXALLSUFF(pVM->pgm.s.pRamRanges);
535 while (pRam && GCPhys > pRam->GCPhysLast)
536 pRam = CTXALLSUFF(pRam->pNext);
537 if ( pRam
538 && GCPhys <= pRam->GCPhysLast
539 && GCPhysLast >= pRam->GCPhys)
540 {
541 pCur->Core.Key = GCPhys;
542 pCur->Core.KeyLast = GCPhysLast;
543 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT;
544
545 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pCur->Core))
546 {
547 /*
548 * Set ram flags, flush shadow PT entries and finally tell REM about this.
549 */
550 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
551 if (rc == VINF_PGM_GCPHYS_ALIASED)
552 {
553 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
554 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
555 }
556 pVM->pgm.s.fPhysCacheFlushPending = true;
557
558#ifndef IN_RING3
559 REMNotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
560 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
561#else
562 REMR3NotifyHandlerPhysicalModify(pVM, pCur->enmType, GCPhysCurrent, GCPhys,
563 pCur->Core.KeyLast - GCPhys + 1, !!pCur->pfnHandlerR3, fRestoreAsRAM);
564#endif
565 pgmUnlock(pVM);
566 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%VGp -> GCPhys=%VGp GCPhysLast=%VGp\n",
567 GCPhysCurrent, GCPhys, GCPhysLast));
568 return VINF_SUCCESS;
569 }
570
571 AssertMsgFailed(("Conflict! GCPhys=%VGp GCPhysLast=%VGp\n", GCPhys, GCPhysLast));
572 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
573 }
574 else
575 {
576 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
577 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
578 }
579 }
580 else
581 {
582 AssertMsgFailed(("Invalid range %VGp-%VGp\n", GCPhys, GCPhysLast));
583 rc = VERR_INVALID_PARAMETER;
584 }
585
586 /*
587 * Invalid new location, free it.
588 * We've only gotta notify REM and free the memory.
589 */
590 pgmHandlerPhysicalDeregisterNotifyREM(pVM, pCur);
591 MMHyperFree(pVM, pCur);
592 }
593 else
594 {
595 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhysCurrent));
596 rc = VERR_PGM_HANDLER_NOT_FOUND;
597 }
598
599 pgmUnlock(pVM);
600 return rc;
601}
602
603
604/**
605 * Changes the callbacks associated with a physical access handler.
606 *
607 * @returns VBox status code.
608 * @param pVM VM Handle.
609 * @param GCPhys Start physical address.
610 * @param pfnHandlerR3 The R3 handler.
611 * @param pvUserR3 User argument to the R3 handler.
612 * @param pfnHandlerR0 The R0 handler.
613 * @param pvUserR0 User argument to the R0 handler.
614 * @param pfnHandlerGC The GC handler.
615 * @param pvUserGC User argument to the GC handler.
616 * This must be a GC pointer because it will be relocated!
617 * @param pszDesc Pointer to description string. This must not be freed.
618 */
619PGMDECL(int) PGMHandlerPhysicalChangeCallbacks(PVM pVM, RTGCPHYS GCPhys,
620 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3,
621 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0, RTR0PTR pvUserR0,
622 GCPTRTYPE(PFNPGMGCPHYSHANDLER) pfnHandlerGC, RTGCPTR pvUserGC,
623 R3PTRTYPE(const char *) pszDesc)
624{
625 /*
626 * Get the handler.
627 */
628 int rc = VINF_SUCCESS;
629 pgmLock(pVM);
630 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
631 if (pCur)
632 {
633 /*
634 * Change callbacks.
635 */
636 pCur->pfnHandlerR3 = pfnHandlerR3;
637 pCur->pvUserR3 = pvUserR3;
638 pCur->pfnHandlerR0 = pfnHandlerR0;
639 pCur->pvUserR0 = pvUserR0;
640 pCur->pfnHandlerGC = pfnHandlerGC;
641 pCur->pvUserGC = pvUserGC;
642 pCur->pszDesc = pszDesc;
643 }
644 else
645 {
646 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
647 rc = VERR_PGM_HANDLER_NOT_FOUND;
648 }
649
650 pgmUnlock(pVM);
651 return rc;
652}
653
654
655/**
656 * Splitts a physical access handler in two.
657 *
658 * @returns VBox status code.
659 * @param pVM VM Handle.
660 * @param GCPhys Start physical address of the handler.
661 * @param GCPhysSplit The split address.
662 */
663PGMDECL(int) PGMHandlerPhysicalSplit(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
664{
665 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
666
667 /*
668 * Do the allocation without owning the lock.
669 */
670 PPGMPHYSHANDLER pNew;
671 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
672 if (VBOX_FAILURE(rc))
673 return rc;
674
675 /*
676 * Get the handler.
677 */
678 pgmLock(pVM);
679 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
680 if (pCur)
681 {
682 if (GCPhysSplit <= pCur->Core.KeyLast)
683 {
684 /*
685 * Create new handler node for the 2nd half.
686 */
687 *pNew = *pCur;
688 pNew->Core.Key = GCPhysSplit;
689 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
690
691 pCur->Core.KeyLast = GCPhysSplit - 1;
692 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
693
694 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, &pNew->Core))
695 {
696 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n",
697 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
698 pgmUnlock(pVM);
699 return VINF_SUCCESS;
700 }
701 AssertMsgFailed(("whu?\n"));
702 rc = VERR_INTERNAL_ERROR;
703 }
704 else
705 {
706 AssertMsgFailed(("outside range: %VGp-%VGp split %VGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
707 rc = VERR_INVALID_PARAMETER;
708 }
709 }
710 else
711 {
712 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys));
713 rc = VERR_PGM_HANDLER_NOT_FOUND;
714 }
715 pgmUnlock(pVM);
716 MMHyperFree(pVM, pNew);
717 return rc;
718}
719
720
721/**
722 * Joins up two adjacent physical access handlers which has the same callbacks.
723 *
724 * @returns VBox status code.
725 * @param pVM VM Handle.
726 * @param GCPhys1 Start physical address of the first handler.
727 * @param GCPhys2 Start physical address of the second handler.
728 */
729PGMDECL(int) PGMHandlerPhysicalJoin(PVM pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
730{
731 /*
732 * Get the handlers.
733 */
734 int rc;
735 pgmLock(pVM);
736 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys1);
737 if (pCur1)
738 {
739 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
740 if (pCur2)
741 {
742 /*
743 * Make sure that they are adjacent, and that they've got the same callbacks.
744 */
745 if (pCur1->Core.KeyLast + 1 == pCur2->Core.Key)
746 {
747 if ( pCur1->pfnHandlerGC == pCur2->pfnHandlerGC
748 && pCur1->pfnHandlerR0 == pCur2->pfnHandlerR0
749 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3)
750 {
751 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys2);
752 if (pCur3 == pCur2)
753 {
754 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
755 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT;
756 LogFlow(("PGMHandlerPhysicalJoin: %VGp-%VGp %VGp-%VGp\n",
757 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
758 pgmUnlock(pVM);
759 MMHyperFree(pVM, pCur2);
760 return VINF_SUCCESS;
761 }
762
763 Assert(pCur3 == pCur2);
764 rc = VERR_INTERNAL_ERROR;
765 }
766 else
767 {
768 AssertMsgFailed(("mismatching handlers\n"));
769 rc = VERR_ACCESS_DENIED;
770 }
771 }
772 else
773 {
774 AssertMsgFailed(("not adjacent: %VGp-%VGp %VGp-%VGp\n",
775 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
776 rc = VERR_INVALID_PARAMETER;
777 }
778 }
779 else
780 {
781 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys2));
782 rc = VERR_PGM_HANDLER_NOT_FOUND;
783 }
784 }
785 else
786 {
787 AssertMsgFailed(("Didn't find range starting at %VGp\n", GCPhys1));
788 rc = VERR_PGM_HANDLER_NOT_FOUND;
789 }
790 pgmUnlock(pVM);
791 return rc;
792
793}
794
795
796/**
797 * Resets any modifications to individual pages in a physical
798 * page access handler region.
799 *
800 * This is used in pair with PGMHandlerPhysicalPageTempOff().
801 *
802 * @returns VBox status code.
803 * @param pVM VM Handle
804 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
805 */
806PGMDECL(int) PGMHandlerPhysicalReset(PVM pVM, RTGCPHYS GCPhys)
807{
808 pgmLock(pVM);
809
810 /*
811 * Find the handler.
812 */
813 int rc;
814 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysHandlers, GCPhys);
815 if (pCur)
816 {
817 /*
818 * Validate type.
819 */
820 switch (pCur->enmType)
821 {
822 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
823 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
824 {
825 /*
826 * Set the flags and flush shadow PT entries.
827 */
828 STAM_COUNTER_INC(&pVM->pgm.s.StatHandlePhysicalReset);
829 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
830 Assert(pRam);
831 int rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam);
832 if (rc == VINF_PGM_GCPHYS_ALIASED)
833 {
834 pVM->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
835 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
836 }
837 pVM->pgm.s.fPhysCacheFlushPending = true;
838
839 rc = VINF_SUCCESS;
840 break;
841 }
842
843 /*
844 * Invalid.
845 */
846 case PGMPHYSHANDLERTYPE_MMIO:
847 AssertMsgFailed(("Can't reset type %d!\n", pCur->enmType));
848 rc = VERR_INTERNAL_ERROR;
849 break;
850
851 default:
852 AssertMsgFailed(("Invalid type %d! Corruption!\n", pCur->enmType));
853 rc = VERR_INTERNAL_ERROR;
854 break;
855 }
856 }
857 else
858 {
859 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
860 rc = VERR_PGM_HANDLER_NOT_FOUND;
861 }
862
863 pgmUnlock(pVM);
864 return rc;
865}
866
867
868/**
869 * Search for virtual handler with matching physical address
870 *
871 * @returns VBox status code
872 * @param pVM The VM handle.
873 * @param GCPhys GC physical address to search for.
874 * @param ppVirt Where to store the pointer to the virtual handler structure.
875 * @param piPage Where to store the pointer to the index of the cached physical page.
876 */
877int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage)
878{
879 STAM_PROFILE_START(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
880 Assert(ppVirt);
881
882 PPGMPHYS2VIRTHANDLER pCur;
883 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);
884 if (pCur)
885 {
886 /* found a match! */
887#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
888 AssertRelease(pCur->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD);
889#endif
890 *ppVirt = (PPGMVIRTHANDLER)((uintptr_t)pCur + pCur->offVirtHandler);
891 *piPage = pCur - &(*ppVirt)->aPhysToVirt[0];
892
893 LogFlow(("PHYS2VIRT: found match for %VGp -> %VGv *piPage=%#x\n",
894 GCPhys, (*ppVirt)->GCPtr, *piPage));
895 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
896 return VINF_SUCCESS;
897 }
898
899 *ppVirt = NULL;
900 STAM_PROFILE_STOP(CTXSUFF(&pVM->pgm.s.StatVirtHandleSearchByPhys), a);
901 return VERR_PGM_HANDLER_NOT_FOUND;
902}
903
904
905/**
906 * Deal with aliases in phys2virt.
907 *
908 * @param pVM The VM handle.
909 * @param pPhys2Virt The node we failed insert.
910 */
911static void pgmHandlerVirtualInsertAliased(PVM pVM, PPGMPHYS2VIRTHANDLER pPhys2Virt)
912{
913 /*
914 * First find the node which is conflicting with us.
915 */
916 /** @todo Deal with partial overlapping. (Unlikly situation, so I'm too lazy to do anything about it now.) */
917 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
918 if (!pHead)
919 {
920 /** @todo do something clever here... */
921#ifdef IN_RING3
922 LogRel(("pgmHandlerVirtualInsertAliased: %VGp-%VGp\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
923#endif
924 pPhys2Virt->offNextAlias = 0;
925 return;
926 }
927#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
928 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n",
929 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler));
930#endif
931
932 /** @todo check if the current head node covers the ground we do. This is highly unlikely
933 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */
934
935 /*
936 * Insert ourselves as the next node.
937 */
938 if (!(pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
939 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IN_TREE;
940 else
941 {
942 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pHead + (pHead->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
943 pPhys2Virt->offNextAlias = ((intptr_t)pNext - (intptr_t)pPhys2Virt)
944 | PGMPHYS2VIRTHANDLER_IN_TREE;
945 }
946 pHead->offNextAlias = ((intptr_t)pPhys2Virt - (intptr_t)pHead)
947 | (pHead->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
948 Log(("pgmHandlerVirtualInsertAliased: %VGp-%VGp offNextAlias=%#RX32\n", pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
949}
950
951
952/**
953 * Resets one virtual handler range.
954 *
955 * @returns 0
956 * @param pNode Pointer to a PGMVIRTHANDLER.
957 * @param pvUser The VM handle.
958 */
959DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser)
960{
961 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)pNode;
962 PVM pVM = (PVM)pvUser;
963
964 /*
965 * Calc flags.
966 */
967 unsigned fFlags;
968 switch (pCur->enmType)
969 {
970 case PGMVIRTHANDLERTYPE_EIP:
971 case PGMVIRTHANDLERTYPE_NORMAL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER; break;
972 case PGMVIRTHANDLERTYPE_WRITE: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_WRITE; break;
973 case PGMVIRTHANDLERTYPE_ALL: fFlags = MM_RAM_FLAGS_VIRTUAL_HANDLER | MM_RAM_FLAGS_VIRTUAL_ALL; break;
974 /* hypervisor handlers need no flags and wouldn't have nowhere to put them in any case. */
975 case PGMVIRTHANDLERTYPE_HYPERVISOR:
976 return 0;
977 default:
978 AssertMsgFailed(("Invalid type %d\n", pCur->enmType));
979 return 0;
980 }
981
982 /*
983 * Iterate the pages and apply the flags.
984 */
985 PPGMRAMRANGE pRamHint = NULL;
986 RTGCUINTPTR offPage = ((RTGCUINTPTR)pCur->GCPtr & PAGE_OFFSET_MASK);
987 RTGCUINTPTR cbLeft = pCur->cb;
988 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++)
989 {
990 PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
991 if (pPhys2Virt->Core.Key != NIL_RTGCPHYS)
992 {
993 /* Update the flags. */
994 int rc = pgmRamFlagsSetByGCPhysWithHint(&pVM->pgm.s, pPhys2Virt->Core.Key, fFlags, &pRamHint);
995 AssertRC(rc);
996
997 /* Need to insert the page in the Phys2Virt lookup tree? */
998 if (pPhys2Virt->Core.KeyLast == NIL_RTGCPHYS)
999 {
1000#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1001 AssertRelease(!pPhys2Virt->offNextAlias);
1002#endif
1003 unsigned cbPhys = cbLeft;
1004 if (cbPhys > PAGE_SIZE - offPage)
1005 cbPhys = PAGE_SIZE - offPage;
1006 else
1007 Assert(iPage == pCur->cPages - 1);
1008 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */
1009 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE;
1010 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))
1011 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt);
1012#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1013 else
1014 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTXSUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,
1015 ("%VGp-%VGp offNextAlias=%#RX32\n",
1016 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias));
1017#endif
1018 Log2(("PHYS2VIRT: Insert physical range %VGp-%VGp offNextAlias=%#RX32 %s\n",
1019 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1020 }
1021 }
1022 cbLeft -= PAGE_SIZE - offPage;
1023 offPage = 0;
1024 }
1025
1026 return 0;
1027}
1028
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette