VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 12792

Last change on this file since 12792 was 12792, checked in by vboxsync, 17 years ago

#1865: some MM stuff.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.5 KB
Line 
1/* $Id: MMHyper.cpp 12792 2008-09-29 12:14:42Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#define LOG_GROUP LOG_GROUP_MM_HYPER
29#include <VBox/pgm.h>
30#include <VBox/mm.h>
31#include <VBox/dbgf.h>
32#include "MMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <VBox/log.h>
37#include <iprt/alloc.h>
38#include <iprt/assert.h>
39#include <iprt/string.h>
40
41
42/*******************************************************************************
43* Internal Functions *
44*******************************************************************************/
45static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
46static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
47static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap);
48static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
49static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmR3HyperInit(PVM pVM)
62{
63 LogFlow(("mmR3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 uint32_t cbHyperHeap;
75 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
76 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
77 cbHyperHeap = 1280*_1K;
78 else if (VBOX_FAILURE(rc))
79 {
80 LogRel(("MM/cbHyperHeap query -> %Vrc\n", rc));
81 AssertRCReturn(rc, rc);
82 }
83 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
84
85 /*
86 * Allocate the hypervisor heap.
87 *
88 * (This must be done before we start adding memory to the
89 * hypervisor static area because lookup records are allocated from it.)
90 */
91 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapHC);
92 if (VBOX_SUCCESS(rc))
93 {
94 /*
95 * Make a small head fence to fend of accidental sequential access.
96 */
97 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
98
99 /*
100 * Map the VM structure into the hypervisor space.
101 */
102 RTGCPTR GCPtr;
103 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
104 if (VBOX_SUCCESS(rc))
105 {
106 pVM->pVMGC = (RTGCPTR32)GCPtr;
107 for (uint32_t i = 0; i < pVM->cCPUs; i++)
108 pVM->aCpus[i].pVMRC = pVM->pVMGC;
109
110 /* Reserve a page for fencing. */
111 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
112
113 /*
114 * Map the heap into the hypervisor space.
115 */
116 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapHC, &GCPtr);
117 if (VBOX_SUCCESS(rc))
118 {
119 pVM->mm.s.pHyperHeapGC = (RTGCPTR32)GCPtr;
120
121 /*
122 * Register info handlers.
123 */
124 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
125
126 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
127 return VINF_SUCCESS;
128 }
129 /* Caller will do proper cleanup. */
130 }
131 }
132
133 LogFlow(("mmR3HyperInit: returns %Vrc\n", rc));
134 return rc;
135}
136
137
138/**
139 * Finalizes the HMA mapping.
140 *
141 * This is called later during init, most (all) HMA allocations should be done
142 * by the time this function is called.
143 *
144 * @returns VBox status.
145 */
146MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
147{
148 LogFlow(("MMR3HyperInitFinalize:\n"));
149
150 /*
151 * Adjust and create the HMA mapping.
152 */
153 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
154 pVM->mm.s.cbHyperArea -= _4M;
155 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
156 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
157 if (VBOX_FAILURE(rc))
158 return rc;
159 pVM->mm.s.fPGMInitialized = true;
160
161 /*
162 * Do all the delayed mappings.
163 */
164 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
165 for (;;)
166 {
167 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
168 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
169 switch (pLookup->enmType)
170 {
171 case MMLOOKUPHYPERTYPE_LOCKED:
172 rc = mmR3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
173 break;
174
175 case MMLOOKUPHYPERTYPE_HCPHYS:
176 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
177 break;
178
179 case MMLOOKUPHYPERTYPE_GCPHYS:
180 {
181 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
182 const size_t cb = pLookup->cb;
183 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
184 {
185 RTHCPHYS HCPhys;
186 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
187 if (VBOX_FAILURE(rc))
188 break;
189 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
190 if (VBOX_FAILURE(rc))
191 break;
192 }
193 break;
194 }
195
196 case MMLOOKUPHYPERTYPE_MMIO2:
197 {
198 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
199 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
200 {
201 RTHCPHYS HCPhys;
202 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
203 if (RT_FAILURE(rc))
204 break;
205 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
206 if (RT_FAILURE(rc))
207 break;
208 }
209 break;
210 }
211
212 case MMLOOKUPHYPERTYPE_DYNAMIC:
213 /* do nothing here since these are either fences or managed by someone else using PGM. */
214 break;
215
216 default:
217 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
218 break;
219 }
220
221 if (VBOX_FAILURE(rc))
222 {
223 AssertMsgFailed(("rc=%Vrc cb=%d GCPtr=%VGv enmType=%d pszDesc=%s\n",
224 rc, pLookup->cb, pLookup->enmType, pLookup->pszDesc));
225 return rc;
226 }
227
228 /* next */
229 if (pLookup->offNext == (int32_t)NIL_OFFSET)
230 break;
231 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
232 }
233
234 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
235 return VINF_SUCCESS;
236}
237
238
239/**
240 * Callback function which will be called when PGM is trying to find
241 * a new location for the mapping.
242 *
243 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
244 * In 1) the callback should say if it objects to a suggested new location. If it
245 * accepts the new location, it is called again for doing it's relocation.
246 *
247 *
248 * @returns true if the location is ok.
249 * @returns false if another location should be found.
250 * @param pVM The VM handle.
251 * @param GCPtrOld The old virtual address.
252 * @param GCPtrNew The new virtual address.
253 * @param enmMode Used to indicate the callback mode.
254 * @param pvUser User argument. Ignored.
255 * @remark The return value is no a failure indicator, it's an acceptance
256 * indicator. Relocation can not fail!
257 */
258static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
259{
260 switch (enmMode)
261 {
262 /*
263 * Verify location - all locations are good for us.
264 */
265 case PGMRELOCATECALL_SUGGEST:
266 return true;
267
268 /*
269 * Execute the relocation.
270 */
271 case PGMRELOCATECALL_RELOCATE:
272 {
273 /*
274 * Accepted!
275 */
276 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%VGv pVM->mm.s.pvHyperAreaGC=%VGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
277 Log(("Relocating the hypervisor from %VGv to %VGv\n", GCPtrOld, GCPtrNew));
278
279 /*
280 * Relocate the VM structure and ourselves.
281 */
282 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
283 pVM->pVMGC += offDelta;
284 for (uint32_t i = 0; i < pVM->cCPUs; i++)
285 pVM->aCpus[i].pVMRC = pVM->pVMGC;
286
287 pVM->mm.s.pvHyperAreaGC += offDelta;
288 pVM->mm.s.pHyperHeapGC += offDelta;
289 pVM->mm.s.pHyperHeapHC->pbHeapRC += offDelta;
290 pVM->mm.s.pHyperHeapHC->pVMRC = pVM->pVMGC;
291
292 /*
293 * Relocate the rest.
294 */
295 VMR3Relocate(pVM, offDelta);
296 return true;
297 }
298
299 default:
300 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
301 }
302
303 return false;
304}
305
306
307/**
308 * Maps contiguous HC physical memory into the hypervisor region in the GC.
309 *
310 * @return VBox status code.
311 *
312 * @param pVM VM handle.
313 * @param pvHC Host context address of the memory. Must be page aligned!
314 * @param HCPhys Host context physical address of the memory to be mapped. Must be page aligned!
315 * @param cb Size of the memory. Will be rounded up to nearest page.
316 * @param pszDesc Description.
317 * @param pGCPtr Where to store the GC address.
318 */
319MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
320{
321 LogFlow(("MMR3HyperMapHCPhys: pvHc=%p HCPhys=%VHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
322
323 /*
324 * Validate input.
325 */
326 AssertReturn(RT_ALIGN_P(pvHC, PAGE_SIZE) == pvHC, VERR_INVALID_PARAMETER);
327 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
328 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
329
330 /*
331 * Add the memory to the hypervisor area.
332 */
333 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
334 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
335 RTGCPTR GCPtr;
336 PMMLOOKUPHYPER pLookup;
337 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
338 if (VBOX_SUCCESS(rc))
339 {
340 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
341 pLookup->u.HCPhys.pvHC = pvHC;
342 pLookup->u.HCPhys.HCPhys = HCPhys;
343
344 /*
345 * Update the page table.
346 */
347 if (pVM->mm.s.fPGMInitialized)
348 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
349 if (VBOX_SUCCESS(rc))
350 *pGCPtr = GCPtr;
351 }
352 return rc;
353}
354
355
356/**
357 * Maps contiguous GC physical memory into the hypervisor region in the GC.
358 *
359 * @return VBox status code.
360 *
361 * @param pVM VM handle.
362 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
363 * @param cb Size of the memory. Will be rounded up to nearest page.
364 * @param pszDesc Mapping description.
365 * @param pGCPtr Where to store the GC address.
366 */
367MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
368{
369 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%VGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
370
371 /*
372 * Validate input.
373 */
374 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
375 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
376
377 /*
378 * Add the memory to the hypervisor area.
379 */
380 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
381 RTGCPTR GCPtr;
382 PMMLOOKUPHYPER pLookup;
383 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
384 if (VBOX_SUCCESS(rc))
385 {
386 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
387 pLookup->u.GCPhys.GCPhys = GCPhys;
388
389 /*
390 * Update the page table.
391 */
392 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
393 {
394 RTHCPHYS HCPhys;
395 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
396 AssertRC(rc);
397 if (VBOX_FAILURE(rc))
398 {
399 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
400 break;
401 }
402 if (pVM->mm.s.fPGMInitialized)
403 {
404 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
405 AssertRC(rc);
406 if (VBOX_FAILURE(rc))
407 {
408 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
409 break;
410 }
411 }
412 }
413
414 if (VBOX_SUCCESS(rc) && pGCPtr)
415 *pGCPtr = GCPtr;
416 }
417 return rc;
418}
419
420
421/**
422 * Maps a portion of an MMIO2 region into the hypervisor region.
423 *
424 * Callers of this API must never deregister the MMIO2 region before the
425 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
426 * API will be needed to perform cleanups.
427 *
428 * @return VBox status code.
429 *
430 * @param pVM Pointer to the shared VM structure.
431 * @param pDevIns The device owning the MMIO2 memory.
432 * @param iRegion The region.
433 * @param off The offset into the region. Will be rounded down to closest page boundrary.
434 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
435 * @param pszDesc Mapping description.
436 * @param pRCPtr Where to store the RC address.
437 */
438MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
439 const char *pszDesc, PRTRCPTR pRCPtr)
440{
441 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%VGp cb=%VGp pszDesc=%p:{%s} pRCPtr=%p\n",
442 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
443 int rc;
444
445 /*
446 * Validate input.
447 */
448 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
449 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
450 uint32_t const offPage = off & PAGE_OFFSET_MASK;
451 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
452 cb += offPage;
453 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
454 const RTGCPHYS offEnd = off + cb;
455 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
456 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
457 {
458 RTHCPHYS HCPhys;
459 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
460 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
461 }
462
463 /*
464 * Add the memory to the hypervisor area.
465 */
466 RTGCPTR GCPtr;
467 PMMLOOKUPHYPER pLookup;
468 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
469 if (VBOX_SUCCESS(rc))
470 {
471 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
472 pLookup->u.MMIO2.pDevIns = pDevIns;
473 pLookup->u.MMIO2.iRegion = iRegion;
474 pLookup->u.MMIO2.off = off;
475
476 /*
477 * Update the page table.
478 */
479 if (pVM->mm.s.fPGMInitialized)
480 {
481 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
482 {
483 RTHCPHYS HCPhys;
484 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
485 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
486 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
487 if (VBOX_FAILURE(rc))
488 {
489 AssertMsgFailed(("rc=%Vrc offCur=%RGp %s\n", rc, offCur, pszDesc));
490 break;
491 }
492 }
493 }
494
495 if (VBOX_SUCCESS(rc))
496 {
497 GCPtr |= offPage;
498 *pRCPtr = GCPtr;
499 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
500 }
501 }
502 return rc;
503}
504
505
506
507
508/**
509 * Locks and Maps HC virtual memory into the hypervisor region in the GC.
510 *
511 * @return VBox status code.
512 *
513 * @param pVM VM handle.
514 * @param pvHC Host context address of the memory (may be not page aligned).
515 * @param cb Size of the memory. Will be rounded up to nearest page.
516 * @param fFree Set this if MM is responsible for freeing the memory using SUPPageFree.
517 * @param pszDesc Mapping description.
518 * @param pGCPtr Where to store the GC address corresponding to pvHC.
519 */
520MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr)
521{
522 LogFlow(("MMR3HyperMapHCRam: pvHc=%p cb=%d fFree=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, (int)cb, fFree, pszDesc, pszDesc, pGCPtr));
523
524 /*
525 * Validate input.
526 */
527 if ( !pvHC
528 || cb <= 0
529 || !pszDesc
530 || !*pszDesc)
531 {
532 AssertMsgFailed(("Invalid parameter\n"));
533 return VERR_INVALID_PARAMETER;
534 }
535
536 /*
537 * Page align address and size.
538 */
539 void *pvHCPage = (void *)((uintptr_t)pvHC & PAGE_BASE_HC_MASK);
540 cb += (uintptr_t)pvHC & PAGE_OFFSET_MASK;
541 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
542
543 /*
544 * Add the memory to the hypervisor area.
545 */
546 RTGCPTR GCPtr;
547 PMMLOOKUPHYPER pLookup;
548 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
549 if (VBOX_SUCCESS(rc))
550 {
551 /*
552 * Lock the heap memory and tell PGM about the locked pages.
553 */
554 PMMLOCKEDMEM pLockedMem;
555 rc = mmR3LockMem(pVM, pvHCPage, cb, fFree ? MM_LOCKED_TYPE_HYPER : MM_LOCKED_TYPE_HYPER_NOFREE, &pLockedMem, false /* fSilentFailure */);
556 if (VBOX_SUCCESS(rc))
557 {
558 /* map the stuff into guest address space. */
559 if (pVM->mm.s.fPGMInitialized)
560 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
561 if (VBOX_SUCCESS(rc))
562 {
563 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
564 pLookup->u.Locked.pvHC = pvHC;
565 pLookup->u.Locked.pvR0 = NIL_RTR0PTR;
566 pLookup->u.Locked.pLockedMem = pLockedMem;
567
568 /* done. */
569 GCPtr |= (uintptr_t)pvHC & PAGE_OFFSET_MASK;
570 *pGCPtr = GCPtr;
571 return rc;
572 }
573 /* Don't care about failure clean, we're screwed if this fails anyway. */
574 }
575 }
576
577 return rc;
578}
579
580
581/**
582 * Maps locked R3 virtual memory into the hypervisor region in the GC.
583 *
584 * @return VBox status code.
585 *
586 * @param pVM VM handle.
587 * @param pvR3 The ring-3 address of the memory, must be page aligned.
588 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
589 * @param cPages The number of pages.
590 * @param paPages The page descriptors.
591 * @param pszDesc Mapping description.
592 * @param pGCPtr Where to store the GC address corresponding to pvHC.
593 */
594MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
595{
596 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
597 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
598
599 /*
600 * Validate input.
601 */
602 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
603 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
604 AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
605 AssertReturn(cPages < 1024, VERR_INVALID_PARAMETER);
606 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
607 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
608 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
609
610 /*
611 * Add the memory to the hypervisor area.
612 */
613 RTGCPTR GCPtr;
614 PMMLOOKUPHYPER pLookup;
615 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
616 if (VBOX_SUCCESS(rc))
617 {
618 /*
619 * Create a locked memory record and tell PGM about this.
620 */
621 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
622 if (pLockedMem)
623 {
624 pLockedMem->pv = pvR3;
625 pLockedMem->cb = cPages << PAGE_SHIFT;
626 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES;
627 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
628 for (size_t i = 0; i < cPages; i++)
629 {
630 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
631 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys;
632 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem;
633 }
634
635 /* map the stuff into guest address space. */
636 if (pVM->mm.s.fPGMInitialized)
637 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
638 if (VBOX_SUCCESS(rc))
639 {
640 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
641 pLookup->u.Locked.pvHC = pvR3;
642 pLookup->u.Locked.pvR0 = pvR0;
643 pLookup->u.Locked.pLockedMem = pLockedMem;
644
645 /* done. */
646 *pGCPtr = GCPtr;
647 return rc;
648 }
649 /* Don't care about failure clean, we're screwed if this fails anyway. */
650 }
651 }
652
653 return rc;
654}
655
656
657/**
658 * Reserves a hypervisor memory area.
659 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
660 *
661 * @return VBox status code.
662 *
663 * @param pVM VM handle.
664 * @param cb Size of the memory. Will be rounded up to nearest page.
665 * @param pszDesc Mapping description.
666 * @param pGCPtr Where to store the assigned GC address. Optional.
667 */
668MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
669{
670 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
671
672 /*
673 * Validate input.
674 */
675 if ( cb <= 0
676 || !pszDesc
677 || !*pszDesc)
678 {
679 AssertMsgFailed(("Invalid parameter\n"));
680 return VERR_INVALID_PARAMETER;
681 }
682
683 /*
684 * Add the memory to the hypervisor area.
685 */
686 RTGCPTR GCPtr;
687 PMMLOOKUPHYPER pLookup;
688 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
689 if (VBOX_SUCCESS(rc))
690 {
691 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
692 if (pGCPtr)
693 *pGCPtr = GCPtr;
694 return VINF_SUCCESS;
695 }
696 return rc;
697}
698
699
700/**
701 * Adds memory to the hypervisor memory arena.
702 *
703 * @return VBox status code.
704 * @param pVM The VM handle.
705 * @param cb Size of the memory. Will be rounded up to neares page.
706 * @param pszDesc The description of the memory.
707 * @param pGCPtr Where to store the GC address.
708 * @param ppLookup Where to store the pointer to the lookup record.
709 * @remark We assume the threading structure of VBox imposes natural
710 * serialization of most functions, this one included.
711 */
712static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
713{
714 /*
715 * Validate input.
716 */
717 const uint32_t cbAligned = RT_ALIGN(cb, PAGE_SIZE);
718 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
719 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
720 {
721 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x\n",
722 pVM->mm.s.offHyperNextStatic, cbAligned));
723 return VERR_NO_MEMORY;
724 }
725
726 /*
727 * Allocate lookup record.
728 */
729 PMMLOOKUPHYPER pLookup;
730 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
731 if (VBOX_SUCCESS(rc))
732 {
733 /*
734 * Initialize it and insert it.
735 */
736 pLookup->offNext = pVM->mm.s.offLookupHyper;
737 pLookup->cb = cbAligned;
738 pLookup->off = pVM->mm.s.offHyperNextStatic;
739 pVM->mm.s.offLookupHyper = (char *)pLookup - (char *)pVM->mm.s.pHyperHeapHC;
740 if (pLookup->offNext != (int32_t)NIL_OFFSET)
741 pLookup->offNext -= pVM->mm.s.offLookupHyper;
742 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
743 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
744 pLookup->pszDesc = pszDesc;
745
746 /* Mapping. */
747 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
748 pVM->mm.s.offHyperNextStatic += cbAligned;
749
750 /* Return pointer. */
751 *ppLookup = pLookup;
752 }
753
754 AssertRC(rc);
755 LogFlow(("mmR3HyperMap: returns %Vrc *pGCPtr=%VGv\n", rc, *pGCPtr));
756 return rc;
757}
758
759
760/**
761 * Allocates a new heap.
762 *
763 * @returns VBox status code.
764 * @param pVM The VM handle.
765 * @param cb The size of the new heap.
766 * @param ppHeap Where to store the heap pointer on successful return.
767 */
768static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap)
769{
770 /*
771 * Allocate the hypervisor heap.
772 */
773 const uint32_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
774 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
775 void *pv;
776 int rc = SUPPageAlloc(cbAligned >> PAGE_SHIFT, &pv);
777 if (VBOX_SUCCESS(rc))
778 {
779 /*
780 * Initialize the heap and first free chunk.
781 */
782 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
783 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
784 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
785 pHeap->pbHeapR0 = (uintptr_t)pHeap->pbHeapR3; /** @todo #1865: Map heap into ring-0 on darwin. */
786 //pHeap->pbHeapGC = 0; // set by mmR3HyperHeapMap()
787 pHeap->pVMR3 = pVM;
788 pHeap->pVMR0 = pVM->pVMR0;
789 pHeap->pVMRC = pVM->pVMGC;
790 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
791 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
792 //pHeap->offFreeHead = 0;
793 //pHeap->offFreeTail = 0;
794 pHeap->offPageAligned = pHeap->cbHeap;
795 //pHeap->HyperHeapStatTree = 0;
796
797 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
798 pFree->cb = pHeap->cbFree;
799 //pFree->core.offNext = 0;
800 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
801 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
802 //pFree->offNext = 0;
803 //pFree->offPrev = 0;
804
805 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
806 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
807
808 *ppHeap = pHeap;
809 return VINF_SUCCESS;
810 }
811 AssertMsgFailed(("SUPPageAlloc(%d,) -> %Vrc\n", cbAligned >> PAGE_SHIFT, rc));
812
813 *ppHeap = NULL;
814 return rc;
815}
816
817
818/**
819 * Allocates a new heap.
820 */
821static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
822{
823 int rc = MMR3HyperMapHCRam(pVM, pHeap, pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, true, "Heap", ppHeapGC);
824 if (VBOX_SUCCESS(rc))
825 {
826 pHeap->pVMRC = pVM->pVMGC;
827 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
828 /* Reserve a page for fencing. */
829 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
830 }
831 return rc;
832}
833
834
835#if 0
836/**
837 * Destroys a heap.
838 */
839static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
840{
841 /* all this is dealt with when unlocking and freeing locked memory. */
842}
843#endif
844
845
846/**
847 * Allocates memory in the Hypervisor (GC VMM) area which never will
848 * be freed and doesn't have any offset based relation to other heap blocks.
849 *
850 * The latter means that two blocks allocated by this API will not have the
851 * same relative position to each other in GC and HC. In short, never use
852 * this API for allocating nodes for an offset based AVL tree!
853 *
854 * The returned memory is of course zeroed.
855 *
856 * @returns VBox status code.
857 * @param pVM The VM to operate on.
858 * @param cb Number of bytes to allocate.
859 * @param uAlignment Required memory alignment in bytes.
860 * Values are 0,8,16,32 and PAGE_SIZE.
861 * 0 -> default alignment, i.e. 8 bytes.
862 * @param enmTag The statistics tag.
863 * @param ppv Where to store the address to the allocated
864 * memory.
865 * @remark This is assumed not to be used at times when serialization is required.
866 */
867MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
868{
869 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
870
871 /*
872 * Choose between allocating a new chunk of HMA memory
873 * and the heap. We will only do BIG allocations from HMA.
874 */
875 if ( cb < _64K
876 && ( uAlignment != PAGE_SIZE
877 || cb < 48*_1K))
878 {
879 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
880 if ( rc != VERR_MM_HYPER_NO_MEMORY
881 || cb <= 8*_1K)
882 {
883 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
884 cb, uAlignment, rc, *ppv));
885 return rc;
886 }
887 }
888
889 /*
890 * Validate alignment.
891 */
892 switch (uAlignment)
893 {
894 case 0:
895 case 8:
896 case 16:
897 case 32:
898 case PAGE_SIZE:
899 break;
900 default:
901 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
902 return VERR_INVALID_PARAMETER;
903 }
904
905 /*
906 * Allocate the pages and the HMA space.
907 */
908 cb = RT_ALIGN(cb, PAGE_SIZE);
909 void *pvPages;
910 int rc = SUPPageAlloc(cb >> PAGE_SHIFT, &pvPages);
911 if (VBOX_SUCCESS(rc))
912 {
913 RTGCPTR GCPtr;
914 rc = MMR3HyperMapHCRam(pVM, pvPages, cb, true, mmR3GetTagName(enmTag), &GCPtr);
915 if (VBOX_SUCCESS(rc))
916 {
917 *ppv = pvPages;
918 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
919 cb, uAlignment, *ppv));
920 return rc;
921 }
922 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
923 SUPPageFree(pvPages, cb >> PAGE_SHIFT);
924
925 /*
926 * HACK ALERT! Try allocate it off the heap so that we don't freak
927 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
928 */
929 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
930 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#d,,) instead\n", rc, cb));
931 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
932 if (RT_SUCCESS(rc2))
933 {
934 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
935 cb, uAlignment, rc, *ppv));
936 return rc;
937 }
938 }
939 else
940 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
941
942 if (rc == VERR_NO_MEMORY)
943 rc = VERR_MM_HYPER_NO_MEMORY;
944 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
945 return rc;
946}
947
948
949/**
950 * Convert hypervisor HC virtual address to HC physical address.
951 *
952 * @returns HC physical address.
953 * @param pVM VM Handle
954 * @param pvHC Host context physical address.
955 */
956MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC)
957{
958 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
959 for (;;)
960 {
961 switch (pLookup->enmType)
962 {
963 case MMLOOKUPHYPERTYPE_LOCKED:
964 {
965 unsigned off = (char *)pvHC - (char *)pLookup->u.Locked.pvHC;
966 if (off < pLookup->cb)
967 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
968 break;
969 }
970
971 case MMLOOKUPHYPERTYPE_HCPHYS:
972 {
973 unsigned off = (char *)pvHC - (char *)pLookup->u.HCPhys.pvHC;
974 if (off < pLookup->cb)
975 return pLookup->u.HCPhys.HCPhys + off;
976 break;
977 }
978
979 case MMLOOKUPHYPERTYPE_GCPHYS:
980 case MMLOOKUPHYPERTYPE_MMIO2:
981 case MMLOOKUPHYPERTYPE_DYNAMIC:
982 /* can (or don't want to) convert these kind of records. */
983 break;
984
985 default:
986 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
987 break;
988 }
989
990 /* next */
991 if ((unsigned)pLookup->offNext == NIL_OFFSET)
992 break;
993 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
994 }
995
996 AssertMsgFailed(("pvHC=%p is not inside the hypervisor memory area!\n", pvHC));
997 return NIL_RTHCPHYS;
998}
999
1000
1001#if 0 /* unused, not implemented */
1002/**
1003 * Convert hypervisor HC physical address to HC virtual address.
1004 *
1005 * @returns HC virtual address.
1006 * @param pVM VM Handle
1007 * @param HCPhys Host context physical address.
1008 */
1009MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1010{
1011 void *pv;
1012 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1013 if (VBOX_SUCCESS(rc))
1014 return pv;
1015 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1016 return NULL;
1017}
1018
1019
1020/**
1021 * Convert hypervisor HC physical address to HC virtual address.
1022 *
1023 * @returns VBox status.
1024 * @param pVM VM Handle
1025 * @param HCPhys Host context physical address.
1026 * @param ppv Where to store the HC virtual address.
1027 */
1028MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1029{
1030 /*
1031 * Linear search.
1032 */
1033 /** @todo implement when actually used. */
1034 return VERR_INVALID_POINTER;
1035}
1036#endif /* unused, not implemented */
1037
1038
1039/**
1040 * Read hypervisor memory from GC virtual address.
1041 *
1042 * @returns VBox status.
1043 * @param pVM VM handle.
1044 * @param pvDst Destination address (HC of course).
1045 * @param GCPtr GC virtual address.
1046 * @param cb Number of bytes to read.
1047 */
1048MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1049{
1050 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1051 return VERR_INVALID_PARAMETER;
1052 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1053}
1054
1055
1056/**
1057 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1058 *
1059 * @param pVM The VM handle.
1060 * @param pHlp Callback functions for doing output.
1061 * @param pszArgs Argument string. Optional and specific to the handler.
1062 */
1063static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1064{
1065 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %VGv, 0x%08x bytes\n",
1066 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1067
1068 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
1069 for (;;)
1070 {
1071 switch (pLookup->enmType)
1072 {
1073 case MMLOOKUPHYPERTYPE_LOCKED:
1074 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv LOCKED %-*s %s\n",
1075 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1076 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1077 pLookup->u.Locked.pvHC,
1078 sizeof(RTHCPTR) * 2,
1079 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree"
1080 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree"
1081 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages"
1082 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys"
1083 : "??",
1084 pLookup->pszDesc);
1085 break;
1086
1087 case MMLOOKUPHYPERTYPE_HCPHYS:
1088 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv HCPHYS %VHp %s\n",
1089 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1090 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1091 pLookup->u.HCPhys.pvHC, pLookup->u.HCPhys.HCPhys,
1092 pLookup->pszDesc);
1093 break;
1094
1095 case MMLOOKUPHYPERTYPE_GCPHYS:
1096 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s GCPHYS %VGp%*s %s\n",
1097 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1098 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1099 sizeof(RTHCPTR) * 2, "",
1100 pLookup->u.GCPhys.GCPhys, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1101 pLookup->pszDesc);
1102 break;
1103
1104 case MMLOOKUPHYPERTYPE_MMIO2:
1105 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s MMIO2 %VGp%*s %s\n",
1106 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1107 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1108 sizeof(RTHCPTR) * 2, "",
1109 pLookup->u.MMIO2.off, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1110 pLookup->pszDesc);
1111 break;
1112
1113 case MMLOOKUPHYPERTYPE_DYNAMIC:
1114 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n",
1115 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1116 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1117 sizeof(RTHCPTR) * 2, "",
1118 sizeof(RTHCPTR) * 2, "",
1119 pLookup->pszDesc);
1120 break;
1121
1122 default:
1123 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1124 break;
1125 }
1126
1127 /* next */
1128 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1129 break;
1130 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
1131 }
1132}
1133
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette