VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 12794

Last change on this file since 12794 was 12794, checked in by vboxsync, 16 years ago

#1865: Added a pVMRC member to VM so I can gradually convert away from pVMGC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 39.6 KB
Line 
1/* $Id: MMHyper.cpp 12794 2008-09-29 12:51:37Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#define LOG_GROUP LOG_GROUP_MM_HYPER
29#include <VBox/pgm.h>
30#include <VBox/mm.h>
31#include <VBox/dbgf.h>
32#include "MMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <VBox/log.h>
37#include <iprt/alloc.h>
38#include <iprt/assert.h>
39#include <iprt/string.h>
40
41
42/*******************************************************************************
43* Internal Functions *
44*******************************************************************************/
45static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
46static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
47static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap);
48static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
49static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmR3HyperInit(PVM pVM)
62{
63 LogFlow(("mmR3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 uint32_t cbHyperHeap;
75 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
76 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
77 cbHyperHeap = 1280*_1K;
78 else if (VBOX_FAILURE(rc))
79 {
80 LogRel(("MM/cbHyperHeap query -> %Vrc\n", rc));
81 AssertRCReturn(rc, rc);
82 }
83 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
84
85 /*
86 * Allocate the hypervisor heap.
87 *
88 * (This must be done before we start adding memory to the
89 * hypervisor static area because lookup records are allocated from it.)
90 */
91 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapHC);
92 if (VBOX_SUCCESS(rc))
93 {
94 /*
95 * Make a small head fence to fend of accidental sequential access.
96 */
97 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
98
99 /*
100 * Map the VM structure into the hypervisor space.
101 */
102 RTGCPTR GCPtr;
103 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
104 if (VBOX_SUCCESS(rc))
105 {
106 pVM->pVMRC = (RTGCPTR32)GCPtr;
107 pVM->pVMGC = pVM->pVMRC;
108 for (uint32_t i = 0; i < pVM->cCPUs; i++)
109 pVM->aCpus[i].pVMRC = pVM->pVMRC;
110
111 /* Reserve a page for fencing. */
112 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
113
114 /*
115 * Map the heap into the hypervisor space.
116 */
117 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapHC, &GCPtr);
118 if (VBOX_SUCCESS(rc))
119 {
120 pVM->mm.s.pHyperHeapGC = (RTGCPTR32)GCPtr;
121
122 /*
123 * Register info handlers.
124 */
125 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
126
127 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
128 return VINF_SUCCESS;
129 }
130 /* Caller will do proper cleanup. */
131 }
132 }
133
134 LogFlow(("mmR3HyperInit: returns %Vrc\n", rc));
135 return rc;
136}
137
138
139/**
140 * Finalizes the HMA mapping.
141 *
142 * This is called later during init, most (all) HMA allocations should be done
143 * by the time this function is called.
144 *
145 * @returns VBox status.
146 */
147MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
148{
149 LogFlow(("MMR3HyperInitFinalize:\n"));
150
151 /*
152 * Adjust and create the HMA mapping.
153 */
154 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
155 pVM->mm.s.cbHyperArea -= _4M;
156 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
157 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
158 if (VBOX_FAILURE(rc))
159 return rc;
160 pVM->mm.s.fPGMInitialized = true;
161
162 /*
163 * Do all the delayed mappings.
164 */
165 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
166 for (;;)
167 {
168 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
169 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
170 switch (pLookup->enmType)
171 {
172 case MMLOOKUPHYPERTYPE_LOCKED:
173 rc = mmR3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
174 break;
175
176 case MMLOOKUPHYPERTYPE_HCPHYS:
177 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
178 break;
179
180 case MMLOOKUPHYPERTYPE_GCPHYS:
181 {
182 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
183 const size_t cb = pLookup->cb;
184 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
185 {
186 RTHCPHYS HCPhys;
187 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
188 if (VBOX_FAILURE(rc))
189 break;
190 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
191 if (VBOX_FAILURE(rc))
192 break;
193 }
194 break;
195 }
196
197 case MMLOOKUPHYPERTYPE_MMIO2:
198 {
199 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
200 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
201 {
202 RTHCPHYS HCPhys;
203 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
204 if (RT_FAILURE(rc))
205 break;
206 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
207 if (RT_FAILURE(rc))
208 break;
209 }
210 break;
211 }
212
213 case MMLOOKUPHYPERTYPE_DYNAMIC:
214 /* do nothing here since these are either fences or managed by someone else using PGM. */
215 break;
216
217 default:
218 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
219 break;
220 }
221
222 if (VBOX_FAILURE(rc))
223 {
224 AssertMsgFailed(("rc=%Vrc cb=%d GCPtr=%VGv enmType=%d pszDesc=%s\n",
225 rc, pLookup->cb, pLookup->enmType, pLookup->pszDesc));
226 return rc;
227 }
228
229 /* next */
230 if (pLookup->offNext == (int32_t)NIL_OFFSET)
231 break;
232 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
233 }
234
235 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
236 return VINF_SUCCESS;
237}
238
239
240/**
241 * Callback function which will be called when PGM is trying to find
242 * a new location for the mapping.
243 *
244 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
245 * In 1) the callback should say if it objects to a suggested new location. If it
246 * accepts the new location, it is called again for doing it's relocation.
247 *
248 *
249 * @returns true if the location is ok.
250 * @returns false if another location should be found.
251 * @param pVM The VM handle.
252 * @param GCPtrOld The old virtual address.
253 * @param GCPtrNew The new virtual address.
254 * @param enmMode Used to indicate the callback mode.
255 * @param pvUser User argument. Ignored.
256 * @remark The return value is no a failure indicator, it's an acceptance
257 * indicator. Relocation can not fail!
258 */
259static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
260{
261 switch (enmMode)
262 {
263 /*
264 * Verify location - all locations are good for us.
265 */
266 case PGMRELOCATECALL_SUGGEST:
267 return true;
268
269 /*
270 * Execute the relocation.
271 */
272 case PGMRELOCATECALL_RELOCATE:
273 {
274 /*
275 * Accepted!
276 */
277 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%VGv pVM->mm.s.pvHyperAreaGC=%VGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
278 Log(("Relocating the hypervisor from %VGv to %VGv\n", GCPtrOld, GCPtrNew));
279
280 /*
281 * Relocate the VM structure and ourselves.
282 */
283 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
284 pVM->pVMRC += offDelta;
285 pVM->pVMGC = pVM->pVMRC;
286 for (uint32_t i = 0; i < pVM->cCPUs; i++)
287 pVM->aCpus[i].pVMRC = pVM->pVMRC;
288
289 pVM->mm.s.pvHyperAreaGC += offDelta;
290 pVM->mm.s.pHyperHeapGC += offDelta;
291 pVM->mm.s.pHyperHeapHC->pbHeapRC += offDelta;
292 pVM->mm.s.pHyperHeapHC->pVMRC = pVM->pVMRC;
293
294 /*
295 * Relocate the rest.
296 */
297 VMR3Relocate(pVM, offDelta);
298 return true;
299 }
300
301 default:
302 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
303 }
304
305 return false;
306}
307
308
309/**
310 * Maps contiguous HC physical memory into the hypervisor region in the GC.
311 *
312 * @return VBox status code.
313 *
314 * @param pVM VM handle.
315 * @param pvHC Host context address of the memory. Must be page aligned!
316 * @param HCPhys Host context physical address of the memory to be mapped. Must be page aligned!
317 * @param cb Size of the memory. Will be rounded up to nearest page.
318 * @param pszDesc Description.
319 * @param pGCPtr Where to store the GC address.
320 */
321MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
322{
323 LogFlow(("MMR3HyperMapHCPhys: pvHc=%p HCPhys=%VHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
324
325 /*
326 * Validate input.
327 */
328 AssertReturn(RT_ALIGN_P(pvHC, PAGE_SIZE) == pvHC, VERR_INVALID_PARAMETER);
329 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
330 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
331
332 /*
333 * Add the memory to the hypervisor area.
334 */
335 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
336 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
337 RTGCPTR GCPtr;
338 PMMLOOKUPHYPER pLookup;
339 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
340 if (VBOX_SUCCESS(rc))
341 {
342 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
343 pLookup->u.HCPhys.pvHC = pvHC;
344 pLookup->u.HCPhys.HCPhys = HCPhys;
345
346 /*
347 * Update the page table.
348 */
349 if (pVM->mm.s.fPGMInitialized)
350 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
351 if (VBOX_SUCCESS(rc))
352 *pGCPtr = GCPtr;
353 }
354 return rc;
355}
356
357
358/**
359 * Maps contiguous GC physical memory into the hypervisor region in the GC.
360 *
361 * @return VBox status code.
362 *
363 * @param pVM VM handle.
364 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
365 * @param cb Size of the memory. Will be rounded up to nearest page.
366 * @param pszDesc Mapping description.
367 * @param pGCPtr Where to store the GC address.
368 */
369MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
370{
371 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%VGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
372
373 /*
374 * Validate input.
375 */
376 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
377 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
378
379 /*
380 * Add the memory to the hypervisor area.
381 */
382 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
383 RTGCPTR GCPtr;
384 PMMLOOKUPHYPER pLookup;
385 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
386 if (VBOX_SUCCESS(rc))
387 {
388 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
389 pLookup->u.GCPhys.GCPhys = GCPhys;
390
391 /*
392 * Update the page table.
393 */
394 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
395 {
396 RTHCPHYS HCPhys;
397 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
398 AssertRC(rc);
399 if (VBOX_FAILURE(rc))
400 {
401 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
402 break;
403 }
404 if (pVM->mm.s.fPGMInitialized)
405 {
406 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
407 AssertRC(rc);
408 if (VBOX_FAILURE(rc))
409 {
410 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
411 break;
412 }
413 }
414 }
415
416 if (VBOX_SUCCESS(rc) && pGCPtr)
417 *pGCPtr = GCPtr;
418 }
419 return rc;
420}
421
422
423/**
424 * Maps a portion of an MMIO2 region into the hypervisor region.
425 *
426 * Callers of this API must never deregister the MMIO2 region before the
427 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
428 * API will be needed to perform cleanups.
429 *
430 * @return VBox status code.
431 *
432 * @param pVM Pointer to the shared VM structure.
433 * @param pDevIns The device owning the MMIO2 memory.
434 * @param iRegion The region.
435 * @param off The offset into the region. Will be rounded down to closest page boundrary.
436 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
437 * @param pszDesc Mapping description.
438 * @param pRCPtr Where to store the RC address.
439 */
440MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
441 const char *pszDesc, PRTRCPTR pRCPtr)
442{
443 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%VGp cb=%VGp pszDesc=%p:{%s} pRCPtr=%p\n",
444 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
445 int rc;
446
447 /*
448 * Validate input.
449 */
450 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
451 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
452 uint32_t const offPage = off & PAGE_OFFSET_MASK;
453 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
454 cb += offPage;
455 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
456 const RTGCPHYS offEnd = off + cb;
457 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
458 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
459 {
460 RTHCPHYS HCPhys;
461 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
462 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
463 }
464
465 /*
466 * Add the memory to the hypervisor area.
467 */
468 RTGCPTR GCPtr;
469 PMMLOOKUPHYPER pLookup;
470 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
471 if (VBOX_SUCCESS(rc))
472 {
473 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
474 pLookup->u.MMIO2.pDevIns = pDevIns;
475 pLookup->u.MMIO2.iRegion = iRegion;
476 pLookup->u.MMIO2.off = off;
477
478 /*
479 * Update the page table.
480 */
481 if (pVM->mm.s.fPGMInitialized)
482 {
483 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
484 {
485 RTHCPHYS HCPhys;
486 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
487 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
488 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
489 if (VBOX_FAILURE(rc))
490 {
491 AssertMsgFailed(("rc=%Vrc offCur=%RGp %s\n", rc, offCur, pszDesc));
492 break;
493 }
494 }
495 }
496
497 if (VBOX_SUCCESS(rc))
498 {
499 GCPtr |= offPage;
500 *pRCPtr = GCPtr;
501 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
502 }
503 }
504 return rc;
505}
506
507
508
509
510/**
511 * Locks and Maps HC virtual memory into the hypervisor region in the GC.
512 *
513 * @return VBox status code.
514 *
515 * @param pVM VM handle.
516 * @param pvHC Host context address of the memory (may be not page aligned).
517 * @param cb Size of the memory. Will be rounded up to nearest page.
518 * @param fFree Set this if MM is responsible for freeing the memory using SUPPageFree.
519 * @param pszDesc Mapping description.
520 * @param pGCPtr Where to store the GC address corresponding to pvHC.
521 */
522MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr)
523{
524 LogFlow(("MMR3HyperMapHCRam: pvHc=%p cb=%d fFree=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, (int)cb, fFree, pszDesc, pszDesc, pGCPtr));
525
526 /*
527 * Validate input.
528 */
529 if ( !pvHC
530 || cb <= 0
531 || !pszDesc
532 || !*pszDesc)
533 {
534 AssertMsgFailed(("Invalid parameter\n"));
535 return VERR_INVALID_PARAMETER;
536 }
537
538 /*
539 * Page align address and size.
540 */
541 void *pvHCPage = (void *)((uintptr_t)pvHC & PAGE_BASE_HC_MASK);
542 cb += (uintptr_t)pvHC & PAGE_OFFSET_MASK;
543 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
544
545 /*
546 * Add the memory to the hypervisor area.
547 */
548 RTGCPTR GCPtr;
549 PMMLOOKUPHYPER pLookup;
550 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
551 if (VBOX_SUCCESS(rc))
552 {
553 /*
554 * Lock the heap memory and tell PGM about the locked pages.
555 */
556 PMMLOCKEDMEM pLockedMem;
557 rc = mmR3LockMem(pVM, pvHCPage, cb, fFree ? MM_LOCKED_TYPE_HYPER : MM_LOCKED_TYPE_HYPER_NOFREE, &pLockedMem, false /* fSilentFailure */);
558 if (VBOX_SUCCESS(rc))
559 {
560 /* map the stuff into guest address space. */
561 if (pVM->mm.s.fPGMInitialized)
562 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
563 if (VBOX_SUCCESS(rc))
564 {
565 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
566 pLookup->u.Locked.pvHC = pvHC;
567 pLookup->u.Locked.pvR0 = NIL_RTR0PTR;
568 pLookup->u.Locked.pLockedMem = pLockedMem;
569
570 /* done. */
571 GCPtr |= (uintptr_t)pvHC & PAGE_OFFSET_MASK;
572 *pGCPtr = GCPtr;
573 return rc;
574 }
575 /* Don't care about failure clean, we're screwed if this fails anyway. */
576 }
577 }
578
579 return rc;
580}
581
582
583/**
584 * Maps locked R3 virtual memory into the hypervisor region in the GC.
585 *
586 * @return VBox status code.
587 *
588 * @param pVM VM handle.
589 * @param pvR3 The ring-3 address of the memory, must be page aligned.
590 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
591 * @param cPages The number of pages.
592 * @param paPages The page descriptors.
593 * @param pszDesc Mapping description.
594 * @param pGCPtr Where to store the GC address corresponding to pvHC.
595 */
596MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
597{
598 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
599 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
600
601 /*
602 * Validate input.
603 */
604 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
605 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
606 AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
607 AssertReturn(cPages < 1024, VERR_INVALID_PARAMETER);
608 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
609 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
610 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
611
612 /*
613 * Add the memory to the hypervisor area.
614 */
615 RTGCPTR GCPtr;
616 PMMLOOKUPHYPER pLookup;
617 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
618 if (VBOX_SUCCESS(rc))
619 {
620 /*
621 * Create a locked memory record and tell PGM about this.
622 */
623 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
624 if (pLockedMem)
625 {
626 pLockedMem->pv = pvR3;
627 pLockedMem->cb = cPages << PAGE_SHIFT;
628 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES;
629 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
630 for (size_t i = 0; i < cPages; i++)
631 {
632 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
633 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys;
634 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem;
635 }
636
637 /* map the stuff into guest address space. */
638 if (pVM->mm.s.fPGMInitialized)
639 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
640 if (VBOX_SUCCESS(rc))
641 {
642 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
643 pLookup->u.Locked.pvHC = pvR3;
644 pLookup->u.Locked.pvR0 = pvR0;
645 pLookup->u.Locked.pLockedMem = pLockedMem;
646
647 /* done. */
648 *pGCPtr = GCPtr;
649 return rc;
650 }
651 /* Don't care about failure clean, we're screwed if this fails anyway. */
652 }
653 }
654
655 return rc;
656}
657
658
659/**
660 * Reserves a hypervisor memory area.
661 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
662 *
663 * @return VBox status code.
664 *
665 * @param pVM VM handle.
666 * @param cb Size of the memory. Will be rounded up to nearest page.
667 * @param pszDesc Mapping description.
668 * @param pGCPtr Where to store the assigned GC address. Optional.
669 */
670MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
671{
672 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
673
674 /*
675 * Validate input.
676 */
677 if ( cb <= 0
678 || !pszDesc
679 || !*pszDesc)
680 {
681 AssertMsgFailed(("Invalid parameter\n"));
682 return VERR_INVALID_PARAMETER;
683 }
684
685 /*
686 * Add the memory to the hypervisor area.
687 */
688 RTGCPTR GCPtr;
689 PMMLOOKUPHYPER pLookup;
690 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
691 if (VBOX_SUCCESS(rc))
692 {
693 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
694 if (pGCPtr)
695 *pGCPtr = GCPtr;
696 return VINF_SUCCESS;
697 }
698 return rc;
699}
700
701
702/**
703 * Adds memory to the hypervisor memory arena.
704 *
705 * @return VBox status code.
706 * @param pVM The VM handle.
707 * @param cb Size of the memory. Will be rounded up to neares page.
708 * @param pszDesc The description of the memory.
709 * @param pGCPtr Where to store the GC address.
710 * @param ppLookup Where to store the pointer to the lookup record.
711 * @remark We assume the threading structure of VBox imposes natural
712 * serialization of most functions, this one included.
713 */
714static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
715{
716 /*
717 * Validate input.
718 */
719 const uint32_t cbAligned = RT_ALIGN(cb, PAGE_SIZE);
720 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
721 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
722 {
723 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x\n",
724 pVM->mm.s.offHyperNextStatic, cbAligned));
725 return VERR_NO_MEMORY;
726 }
727
728 /*
729 * Allocate lookup record.
730 */
731 PMMLOOKUPHYPER pLookup;
732 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
733 if (VBOX_SUCCESS(rc))
734 {
735 /*
736 * Initialize it and insert it.
737 */
738 pLookup->offNext = pVM->mm.s.offLookupHyper;
739 pLookup->cb = cbAligned;
740 pLookup->off = pVM->mm.s.offHyperNextStatic;
741 pVM->mm.s.offLookupHyper = (char *)pLookup - (char *)pVM->mm.s.pHyperHeapHC;
742 if (pLookup->offNext != (int32_t)NIL_OFFSET)
743 pLookup->offNext -= pVM->mm.s.offLookupHyper;
744 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
745 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
746 pLookup->pszDesc = pszDesc;
747
748 /* Mapping. */
749 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
750 pVM->mm.s.offHyperNextStatic += cbAligned;
751
752 /* Return pointer. */
753 *ppLookup = pLookup;
754 }
755
756 AssertRC(rc);
757 LogFlow(("mmR3HyperMap: returns %Vrc *pGCPtr=%VGv\n", rc, *pGCPtr));
758 return rc;
759}
760
761
762/**
763 * Allocates a new heap.
764 *
765 * @returns VBox status code.
766 * @param pVM The VM handle.
767 * @param cb The size of the new heap.
768 * @param ppHeap Where to store the heap pointer on successful return.
769 */
770static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap)
771{
772 /*
773 * Allocate the hypervisor heap.
774 */
775 const uint32_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
776 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
777 void *pv;
778 int rc = SUPPageAlloc(cbAligned >> PAGE_SHIFT, &pv);
779 if (VBOX_SUCCESS(rc))
780 {
781 /*
782 * Initialize the heap and first free chunk.
783 */
784 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
785 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
786 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
787 pHeap->pbHeapR0 = (uintptr_t)pHeap->pbHeapR3; /** @todo #1865: Map heap into ring-0 on darwin. */
788 //pHeap->pbHeapGC = 0; // set by mmR3HyperHeapMap()
789 pHeap->pVMR3 = pVM;
790 pHeap->pVMR0 = pVM->pVMR0;
791 pHeap->pVMRC = pVM->pVMRC;
792 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
793 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
794 //pHeap->offFreeHead = 0;
795 //pHeap->offFreeTail = 0;
796 pHeap->offPageAligned = pHeap->cbHeap;
797 //pHeap->HyperHeapStatTree = 0;
798
799 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
800 pFree->cb = pHeap->cbFree;
801 //pFree->core.offNext = 0;
802 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
803 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
804 //pFree->offNext = 0;
805 //pFree->offPrev = 0;
806
807 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
808 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
809
810 *ppHeap = pHeap;
811 return VINF_SUCCESS;
812 }
813 AssertMsgFailed(("SUPPageAlloc(%d,) -> %Vrc\n", cbAligned >> PAGE_SHIFT, rc));
814
815 *ppHeap = NULL;
816 return rc;
817}
818
819
820/**
821 * Allocates a new heap.
822 */
823static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
824{
825 int rc = MMR3HyperMapHCRam(pVM, pHeap, pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, true, "Heap", ppHeapGC);
826 if (VBOX_SUCCESS(rc))
827 {
828 pHeap->pVMRC = pVM->pVMRC;
829 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
830 /* Reserve a page for fencing. */
831 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
832 }
833 return rc;
834}
835
836
837#if 0
838/**
839 * Destroys a heap.
840 */
841static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
842{
843 /* all this is dealt with when unlocking and freeing locked memory. */
844}
845#endif
846
847
848/**
849 * Allocates memory in the Hypervisor (GC VMM) area which never will
850 * be freed and doesn't have any offset based relation to other heap blocks.
851 *
852 * The latter means that two blocks allocated by this API will not have the
853 * same relative position to each other in GC and HC. In short, never use
854 * this API for allocating nodes for an offset based AVL tree!
855 *
856 * The returned memory is of course zeroed.
857 *
858 * @returns VBox status code.
859 * @param pVM The VM to operate on.
860 * @param cb Number of bytes to allocate.
861 * @param uAlignment Required memory alignment in bytes.
862 * Values are 0,8,16,32 and PAGE_SIZE.
863 * 0 -> default alignment, i.e. 8 bytes.
864 * @param enmTag The statistics tag.
865 * @param ppv Where to store the address to the allocated
866 * memory.
867 * @remark This is assumed not to be used at times when serialization is required.
868 */
869MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
870{
871 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
872
873 /*
874 * Choose between allocating a new chunk of HMA memory
875 * and the heap. We will only do BIG allocations from HMA.
876 */
877 if ( cb < _64K
878 && ( uAlignment != PAGE_SIZE
879 || cb < 48*_1K))
880 {
881 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
882 if ( rc != VERR_MM_HYPER_NO_MEMORY
883 || cb <= 8*_1K)
884 {
885 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
886 cb, uAlignment, rc, *ppv));
887 return rc;
888 }
889 }
890
891 /*
892 * Validate alignment.
893 */
894 switch (uAlignment)
895 {
896 case 0:
897 case 8:
898 case 16:
899 case 32:
900 case PAGE_SIZE:
901 break;
902 default:
903 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
904 return VERR_INVALID_PARAMETER;
905 }
906
907 /*
908 * Allocate the pages and the HMA space.
909 */
910 cb = RT_ALIGN(cb, PAGE_SIZE);
911 void *pvPages;
912 int rc = SUPPageAlloc(cb >> PAGE_SHIFT, &pvPages);
913 if (VBOX_SUCCESS(rc))
914 {
915 RTGCPTR GCPtr;
916 rc = MMR3HyperMapHCRam(pVM, pvPages, cb, true, mmR3GetTagName(enmTag), &GCPtr);
917 if (VBOX_SUCCESS(rc))
918 {
919 *ppv = pvPages;
920 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
921 cb, uAlignment, *ppv));
922 return rc;
923 }
924 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
925 SUPPageFree(pvPages, cb >> PAGE_SHIFT);
926
927 /*
928 * HACK ALERT! Try allocate it off the heap so that we don't freak
929 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
930 */
931 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
932 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#d,,) instead\n", rc, cb));
933 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
934 if (RT_SUCCESS(rc2))
935 {
936 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
937 cb, uAlignment, rc, *ppv));
938 return rc;
939 }
940 }
941 else
942 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
943
944 if (rc == VERR_NO_MEMORY)
945 rc = VERR_MM_HYPER_NO_MEMORY;
946 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
947 return rc;
948}
949
950
951/**
952 * Convert hypervisor HC virtual address to HC physical address.
953 *
954 * @returns HC physical address.
955 * @param pVM VM Handle
956 * @param pvHC Host context physical address.
957 */
958MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC)
959{
960 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
961 for (;;)
962 {
963 switch (pLookup->enmType)
964 {
965 case MMLOOKUPHYPERTYPE_LOCKED:
966 {
967 unsigned off = (char *)pvHC - (char *)pLookup->u.Locked.pvHC;
968 if (off < pLookup->cb)
969 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
970 break;
971 }
972
973 case MMLOOKUPHYPERTYPE_HCPHYS:
974 {
975 unsigned off = (char *)pvHC - (char *)pLookup->u.HCPhys.pvHC;
976 if (off < pLookup->cb)
977 return pLookup->u.HCPhys.HCPhys + off;
978 break;
979 }
980
981 case MMLOOKUPHYPERTYPE_GCPHYS:
982 case MMLOOKUPHYPERTYPE_MMIO2:
983 case MMLOOKUPHYPERTYPE_DYNAMIC:
984 /* can (or don't want to) convert these kind of records. */
985 break;
986
987 default:
988 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
989 break;
990 }
991
992 /* next */
993 if ((unsigned)pLookup->offNext == NIL_OFFSET)
994 break;
995 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
996 }
997
998 AssertMsgFailed(("pvHC=%p is not inside the hypervisor memory area!\n", pvHC));
999 return NIL_RTHCPHYS;
1000}
1001
1002
1003#if 0 /* unused, not implemented */
1004/**
1005 * Convert hypervisor HC physical address to HC virtual address.
1006 *
1007 * @returns HC virtual address.
1008 * @param pVM VM Handle
1009 * @param HCPhys Host context physical address.
1010 */
1011MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1012{
1013 void *pv;
1014 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1015 if (VBOX_SUCCESS(rc))
1016 return pv;
1017 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1018 return NULL;
1019}
1020
1021
1022/**
1023 * Convert hypervisor HC physical address to HC virtual address.
1024 *
1025 * @returns VBox status.
1026 * @param pVM VM Handle
1027 * @param HCPhys Host context physical address.
1028 * @param ppv Where to store the HC virtual address.
1029 */
1030MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1031{
1032 /*
1033 * Linear search.
1034 */
1035 /** @todo implement when actually used. */
1036 return VERR_INVALID_POINTER;
1037}
1038#endif /* unused, not implemented */
1039
1040
1041/**
1042 * Read hypervisor memory from GC virtual address.
1043 *
1044 * @returns VBox status.
1045 * @param pVM VM handle.
1046 * @param pvDst Destination address (HC of course).
1047 * @param GCPtr GC virtual address.
1048 * @param cb Number of bytes to read.
1049 */
1050MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1051{
1052 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1053 return VERR_INVALID_PARAMETER;
1054 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1055}
1056
1057
1058/**
1059 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1060 *
1061 * @param pVM The VM handle.
1062 * @param pHlp Callback functions for doing output.
1063 * @param pszArgs Argument string. Optional and specific to the handler.
1064 */
1065static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1066{
1067 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %VGv, 0x%08x bytes\n",
1068 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1069
1070 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
1071 for (;;)
1072 {
1073 switch (pLookup->enmType)
1074 {
1075 case MMLOOKUPHYPERTYPE_LOCKED:
1076 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv LOCKED %-*s %s\n",
1077 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1078 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1079 pLookup->u.Locked.pvHC,
1080 sizeof(RTHCPTR) * 2,
1081 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree"
1082 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree"
1083 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages"
1084 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys"
1085 : "??",
1086 pLookup->pszDesc);
1087 break;
1088
1089 case MMLOOKUPHYPERTYPE_HCPHYS:
1090 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv HCPHYS %VHp %s\n",
1091 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1092 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1093 pLookup->u.HCPhys.pvHC, pLookup->u.HCPhys.HCPhys,
1094 pLookup->pszDesc);
1095 break;
1096
1097 case MMLOOKUPHYPERTYPE_GCPHYS:
1098 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s GCPHYS %VGp%*s %s\n",
1099 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1100 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1101 sizeof(RTHCPTR) * 2, "",
1102 pLookup->u.GCPhys.GCPhys, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1103 pLookup->pszDesc);
1104 break;
1105
1106 case MMLOOKUPHYPERTYPE_MMIO2:
1107 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s MMIO2 %VGp%*s %s\n",
1108 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1109 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1110 sizeof(RTHCPTR) * 2, "",
1111 pLookup->u.MMIO2.off, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1112 pLookup->pszDesc);
1113 break;
1114
1115 case MMLOOKUPHYPERTYPE_DYNAMIC:
1116 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n",
1117 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1118 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1119 sizeof(RTHCPTR) * 2, "",
1120 sizeof(RTHCPTR) * 2, "",
1121 pLookup->pszDesc);
1122 break;
1123
1124 default:
1125 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1126 break;
1127 }
1128
1129 /* next */
1130 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1131 break;
1132 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
1133 }
1134}
1135
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette