VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 18784

Last change on this file since 18784 was 18719, checked in by vboxsync, 16 years ago

MMR3HyperMapPages: break, not return.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 40.7 KB
Line 
1/* $Id: MMHyper.cpp 18719 2009-04-05 13:48:16Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER
27#include <VBox/pgm.h>
28#include <VBox/mm.h>
29#include <VBox/dbgf.h>
30#include "MMInternal.h"
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <VBox/log.h>
35#include <iprt/alloc.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
44static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
45static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
46static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
47static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
48
49
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmR3HyperInit(PVM pVM)
62{
63 LogFlow(("mmR3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
75 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
76 * precious kernel space on heap for the PATM. */
77 uint32_t cbHyperHeap;
78 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
79 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
80 {
81 cbHyperHeap = VMMIsHwVirtExtForced(pVM)
82 ? 640*_1K
83 : 1280*_1K;
84
85 /* Adjust for dynamic stuff like RAM mapping chunks. Try playing kind
86 of safe with existing configs here (HMA size must not change)... */
87 uint64_t cbRam;
88 CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
89 if (cbRam > _2G)
90 {
91 cbRam = RT_MIN(cbRam, _1T);
92 cbHyperHeap += (cbRam - _1G) / _1M * 128; /* 128 is a quick guess */
93 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, _64K);
94 }
95 }
96 else
97 AssertLogRelRCReturn(rc, rc);
98 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
99 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
100
101 /*
102 * Allocate the hypervisor heap.
103 *
104 * (This must be done before we start adding memory to the
105 * hypervisor static area because lookup records are allocated from it.)
106 */
107 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
108 if (RT_SUCCESS(rc))
109 {
110 /*
111 * Make a small head fence to fend of accidental sequential access.
112 */
113 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
114
115 /*
116 * Map the VM structure into the hypervisor space.
117 */
118 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCPUs]));
119 RTGCPTR GCPtr;
120 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
121 if (RT_SUCCESS(rc))
122 {
123 pVM->pVMRC = (RTRCPTR)GCPtr;
124 for (uint32_t i = 0; i < pVM->cCPUs; i++)
125 pVM->aCpus[i].pVMRC = pVM->pVMRC;
126
127 /* Reserve a page for fencing. */
128 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
129
130 /*
131 * Map the heap into the hypervisor space.
132 */
133 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
134 if (RT_SUCCESS(rc))
135 {
136 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
137 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
138
139 /*
140 * Register info handlers.
141 */
142 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
143
144 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
145 return VINF_SUCCESS;
146 }
147 /* Caller will do proper cleanup. */
148 }
149 }
150
151 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
152 return rc;
153}
154
155
156/**
157 * Finalizes the HMA mapping.
158 *
159 * This is called later during init, most (all) HMA allocations should be done
160 * by the time this function is called.
161 *
162 * @returns VBox status.
163 */
164VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
165{
166 LogFlow(("MMR3HyperInitFinalize:\n"));
167
168 /*
169 * Adjust and create the HMA mapping.
170 */
171 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
172 pVM->mm.s.cbHyperArea -= _4M;
173 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
174 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
175 if (RT_FAILURE(rc))
176 return rc;
177 pVM->mm.s.fPGMInitialized = true;
178
179 /*
180 * Do all the delayed mappings.
181 */
182 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
183 for (;;)
184 {
185 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
186 uint32_t cPages = pLookup->cb >> PAGE_SHIFT;
187 switch (pLookup->enmType)
188 {
189 case MMLOOKUPHYPERTYPE_LOCKED:
190 {
191 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;
192 for (uint32_t i = 0; i < cPages; i++)
193 {
194 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
195 AssertRCReturn(rc, rc);
196 }
197 break;
198 }
199
200 case MMLOOKUPHYPERTYPE_HCPHYS:
201 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
202 break;
203
204 case MMLOOKUPHYPERTYPE_GCPHYS:
205 {
206 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
207 const uint32_t cb = pLookup->cb;
208 for (uint32_t off = 0; off < cb; off += PAGE_SIZE)
209 {
210 RTHCPHYS HCPhys;
211 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
212 if (RT_FAILURE(rc))
213 break;
214 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
215 if (RT_FAILURE(rc))
216 break;
217 }
218 break;
219 }
220
221 case MMLOOKUPHYPERTYPE_MMIO2:
222 {
223 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
224 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
225 {
226 RTHCPHYS HCPhys;
227 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
228 if (RT_FAILURE(rc))
229 break;
230 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
231 if (RT_FAILURE(rc))
232 break;
233 }
234 break;
235 }
236
237 case MMLOOKUPHYPERTYPE_DYNAMIC:
238 /* do nothing here since these are either fences or managed by someone else using PGM. */
239 break;
240
241 default:
242 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
243 break;
244 }
245
246 if (RT_FAILURE(rc))
247 {
248 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
249 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
250 return rc;
251 }
252
253 /* next */
254 if (pLookup->offNext == (int32_t)NIL_OFFSET)
255 break;
256 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
257 }
258
259 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
260 return VINF_SUCCESS;
261}
262
263
264/**
265 * Callback function which will be called when PGM is trying to find
266 * a new location for the mapping.
267 *
268 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
269 * In 1) the callback should say if it objects to a suggested new location. If it
270 * accepts the new location, it is called again for doing it's relocation.
271 *
272 *
273 * @returns true if the location is ok.
274 * @returns false if another location should be found.
275 * @param pVM The VM handle.
276 * @param GCPtrOld The old virtual address.
277 * @param GCPtrNew The new virtual address.
278 * @param enmMode Used to indicate the callback mode.
279 * @param pvUser User argument. Ignored.
280 * @remark The return value is no a failure indicator, it's an acceptance
281 * indicator. Relocation can not fail!
282 */
283static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
284{
285 switch (enmMode)
286 {
287 /*
288 * Verify location - all locations are good for us.
289 */
290 case PGMRELOCATECALL_SUGGEST:
291 return true;
292
293 /*
294 * Execute the relocation.
295 */
296 case PGMRELOCATECALL_RELOCATE:
297 {
298 /*
299 * Accepted!
300 */
301 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
302 Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
303
304 /*
305 * Relocate the VM structure and ourselves.
306 */
307 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
308 pVM->pVMRC += offDelta;
309 for (uint32_t i = 0; i < pVM->cCPUs; i++)
310 pVM->aCpus[i].pVMRC = pVM->pVMRC;
311
312 pVM->mm.s.pvHyperAreaGC += offDelta;
313 Assert(pVM->mm.s.pvHyperAreaGC < _4G);
314 pVM->mm.s.pHyperHeapRC += offDelta;
315 pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;
316 pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;
317
318 /*
319 * Relocate the rest.
320 */
321 VMR3Relocate(pVM, offDelta);
322 return true;
323 }
324
325 default:
326 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
327 }
328
329 return false;
330}
331
332
333/**
334 * Maps contiguous HC physical memory into the hypervisor region in the GC.
335 *
336 * @return VBox status code.
337 *
338 * @param pVM VM handle.
339 * @param pvR3 Ring-3 address of the memory. Must be page aligned!
340 * @param pvR0 Optional ring-0 address of the memory.
341 * @param HCPhys Host context physical address of the memory to be
342 * mapped. Must be page aligned!
343 * @param cb Size of the memory. Will be rounded up to nearest page.
344 * @param pszDesc Description.
345 * @param pGCPtr Where to store the GC address.
346 */
347VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
348{
349 LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
350
351 /*
352 * Validate input.
353 */
354 AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
355 AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
356 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
357 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
358
359 /*
360 * Add the memory to the hypervisor area.
361 */
362 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
363 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
364 RTGCPTR GCPtr;
365 PMMLOOKUPHYPER pLookup;
366 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
367 if (RT_SUCCESS(rc))
368 {
369 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
370 pLookup->u.HCPhys.pvR3 = pvR3;
371 pLookup->u.HCPhys.pvR0 = pvR0;
372 pLookup->u.HCPhys.HCPhys = HCPhys;
373
374 /*
375 * Update the page table.
376 */
377 if (pVM->mm.s.fPGMInitialized)
378 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
379 if (RT_SUCCESS(rc))
380 *pGCPtr = GCPtr;
381 }
382 return rc;
383}
384
385
386/**
387 * Maps contiguous GC physical memory into the hypervisor region in the GC.
388 *
389 * @return VBox status code.
390 *
391 * @param pVM VM handle.
392 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
393 * @param cb Size of the memory. Will be rounded up to nearest page.
394 * @param pszDesc Mapping description.
395 * @param pGCPtr Where to store the GC address.
396 */
397VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
398{
399 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
400
401 /*
402 * Validate input.
403 */
404 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
405 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
406
407 /*
408 * Add the memory to the hypervisor area.
409 */
410 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
411 RTGCPTR GCPtr;
412 PMMLOOKUPHYPER pLookup;
413 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
414 if (RT_SUCCESS(rc))
415 {
416 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
417 pLookup->u.GCPhys.GCPhys = GCPhys;
418
419 /*
420 * Update the page table.
421 */
422 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
423 {
424 RTHCPHYS HCPhys;
425 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
426 AssertRC(rc);
427 if (RT_FAILURE(rc))
428 {
429 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
430 break;
431 }
432 if (pVM->mm.s.fPGMInitialized)
433 {
434 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
435 AssertRC(rc);
436 if (RT_FAILURE(rc))
437 {
438 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
439 break;
440 }
441 }
442 }
443
444 if (RT_SUCCESS(rc) && pGCPtr)
445 *pGCPtr = GCPtr;
446 }
447 return rc;
448}
449
450
451/**
452 * Maps a portion of an MMIO2 region into the hypervisor region.
453 *
454 * Callers of this API must never deregister the MMIO2 region before the
455 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
456 * API will be needed to perform cleanups.
457 *
458 * @return VBox status code.
459 *
460 * @param pVM Pointer to the shared VM structure.
461 * @param pDevIns The device owning the MMIO2 memory.
462 * @param iRegion The region.
463 * @param off The offset into the region. Will be rounded down to closest page boundrary.
464 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
465 * @param pszDesc Mapping description.
466 * @param pRCPtr Where to store the RC address.
467 */
468VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
469 const char *pszDesc, PRTRCPTR pRCPtr)
470{
471 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
472 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
473 int rc;
474
475 /*
476 * Validate input.
477 */
478 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
479 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
480 uint32_t const offPage = off & PAGE_OFFSET_MASK;
481 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
482 cb += offPage;
483 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
484 const RTGCPHYS offEnd = off + cb;
485 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
486 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
487 {
488 RTHCPHYS HCPhys;
489 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
490 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
491 }
492
493 /*
494 * Add the memory to the hypervisor area.
495 */
496 RTGCPTR GCPtr;
497 PMMLOOKUPHYPER pLookup;
498 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
499 if (RT_SUCCESS(rc))
500 {
501 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
502 pLookup->u.MMIO2.pDevIns = pDevIns;
503 pLookup->u.MMIO2.iRegion = iRegion;
504 pLookup->u.MMIO2.off = off;
505
506 /*
507 * Update the page table.
508 */
509 if (pVM->mm.s.fPGMInitialized)
510 {
511 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
512 {
513 RTHCPHYS HCPhys;
514 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
515 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
516 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
517 if (RT_FAILURE(rc))
518 {
519 AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
520 break;
521 }
522 }
523 }
524
525 if (RT_SUCCESS(rc))
526 {
527 GCPtr |= offPage;
528 *pRCPtr = GCPtr;
529 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
530 }
531 }
532 return rc;
533}
534
535
536/**
537 * Maps locked R3 virtual memory into the hypervisor region in the GC.
538 *
539 * @return VBox status code.
540 *
541 * @param pVM VM handle.
542 * @param pvR3 The ring-3 address of the memory, must be page aligned.
543 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
544 * @param cPages The number of pages.
545 * @param paPages The page descriptors.
546 * @param pszDesc Mapping description.
547 * @param pGCPtr Where to store the GC address corresponding to pvR3.
548 */
549VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
550{
551 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
552 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
553
554 /*
555 * Validate input.
556 */
557 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
558 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
559 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
560 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
561 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
562 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
563 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
564
565 /*
566 * Add the memory to the hypervisor area.
567 */
568 RTGCPTR GCPtr;
569 PMMLOOKUPHYPER pLookup;
570 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
571 if (RT_SUCCESS(rc))
572 {
573 /*
574 * Copy the physical page addresses and tell PGM about them.
575 */
576 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
577 if (paHCPhysPages)
578 {
579 for (size_t i = 0; i < cPages; i++)
580 {
581 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
582 paHCPhysPages[i] = paPages[i].Phys;
583 }
584
585 if (pVM->mm.s.fPGMInitialized)
586 {
587 for (size_t i = 0; i < cPages; i++)
588 {
589 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
590 AssertRCBreak(rc);
591 }
592 }
593 if (RT_SUCCESS(rc))
594 {
595 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
596 pLookup->u.Locked.pvR3 = pvR3;
597 pLookup->u.Locked.pvR0 = pvR0;
598 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
599
600 /* done. */
601 *pGCPtr = GCPtr;
602 return rc;
603 }
604 /* Don't care about failure clean, we're screwed if this fails anyway. */
605 }
606 }
607
608 return rc;
609}
610
611
612/**
613 * Reserves a hypervisor memory area.
614 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
615 *
616 * @return VBox status code.
617 *
618 * @param pVM VM handle.
619 * @param cb Size of the memory. Will be rounded up to nearest page.
620 * @param pszDesc Mapping description.
621 * @param pGCPtr Where to store the assigned GC address. Optional.
622 */
623VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
624{
625 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
626
627 /*
628 * Validate input.
629 */
630 if ( cb <= 0
631 || !pszDesc
632 || !*pszDesc)
633 {
634 AssertMsgFailed(("Invalid parameter\n"));
635 return VERR_INVALID_PARAMETER;
636 }
637
638 /*
639 * Add the memory to the hypervisor area.
640 */
641 RTGCPTR GCPtr;
642 PMMLOOKUPHYPER pLookup;
643 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
644 if (RT_SUCCESS(rc))
645 {
646 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
647 if (pGCPtr)
648 *pGCPtr = GCPtr;
649 return VINF_SUCCESS;
650 }
651 return rc;
652}
653
654
655/**
656 * Adds memory to the hypervisor memory arena.
657 *
658 * @return VBox status code.
659 * @param pVM The VM handle.
660 * @param cb Size of the memory. Will be rounded up to neares page.
661 * @param pszDesc The description of the memory.
662 * @param pGCPtr Where to store the GC address.
663 * @param ppLookup Where to store the pointer to the lookup record.
664 * @remark We assume the threading structure of VBox imposes natural
665 * serialization of most functions, this one included.
666 */
667static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
668{
669 /*
670 * Validate input.
671 */
672 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
673 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
674 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
675 {
676 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
677 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
678 return VERR_NO_MEMORY;
679 }
680
681 /*
682 * Allocate lookup record.
683 */
684 PMMLOOKUPHYPER pLookup;
685 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
686 if (RT_SUCCESS(rc))
687 {
688 /*
689 * Initialize it and insert it.
690 */
691 pLookup->offNext = pVM->mm.s.offLookupHyper;
692 pLookup->cb = cbAligned;
693 pLookup->off = pVM->mm.s.offHyperNextStatic;
694 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
695 if (pLookup->offNext != (int32_t)NIL_OFFSET)
696 pLookup->offNext -= pVM->mm.s.offLookupHyper;
697 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
698 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
699 pLookup->pszDesc = pszDesc;
700
701 /* Mapping. */
702 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
703 pVM->mm.s.offHyperNextStatic += cbAligned;
704
705 /* Return pointer. */
706 *ppLookup = pLookup;
707 }
708
709 AssertRC(rc);
710 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
711 return rc;
712}
713
714
715/**
716 * Allocates a new heap.
717 *
718 * @returns VBox status code.
719 * @param pVM The VM handle.
720 * @param cb The size of the new heap.
721 * @param ppHeap Where to store the heap pointer on successful return.
722 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
723 * success.
724 */
725static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
726{
727 /*
728 * Allocate the hypervisor heap.
729 */
730 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
731 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
732 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
733 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
734 if (!paPages)
735 return VERR_NO_MEMORY;
736 void *pv;
737 RTR0PTR pvR0 = NIL_RTR0PTR;
738 int rc = SUPR3PageAllocEx(cPages,
739 0 /*fFlags*/,
740 &pv,
741#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
742 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
743#else
744 NULL,
745#endif
746 paPages);
747 if (RT_SUCCESS(rc))
748 {
749#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
750 if (!VMMIsHwVirtExtForced(pVM))
751 pvR0 = NIL_RTR0PTR;
752#else
753 pvR0 = (uintptr_t)pv;
754#endif
755 memset(pv, 0, cbAligned);
756
757 /*
758 * Initialize the heap and first free chunk.
759 */
760 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
761 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
762 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
763 pHeap->pbHeapR0 = pvR0 != NIL_RTR0PTR ? pvR0 + MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR;
764 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
765 pHeap->pVMR3 = pVM;
766 pHeap->pVMR0 = pVM->pVMR0;
767 pHeap->pVMRC = pVM->pVMRC;
768 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
769 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
770 //pHeap->offFreeHead = 0;
771 //pHeap->offFreeTail = 0;
772 pHeap->offPageAligned = pHeap->cbHeap;
773 //pHeap->HyperHeapStatTree = 0;
774 pHeap->paPages = paPages;
775
776 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
777 pFree->cb = pHeap->cbFree;
778 //pFree->core.offNext = 0;
779 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
780 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
781 //pFree->offNext = 0;
782 //pFree->offPrev = 0;
783
784 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
785 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
786
787 *ppHeap = pHeap;
788 *pR0PtrHeap = pvR0;
789 return VINF_SUCCESS;
790 }
791 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
792
793 *ppHeap = NULL;
794 return rc;
795}
796
797
798/**
799 * Allocates a new heap.
800 */
801static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
802{
803 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
804 Assert(pHeap->paPages);
805 int rc = MMR3HyperMapPages(pVM,
806 pHeap,
807 pHeap->pbHeapR0 != NIL_RTR0PTR ? pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR,
808 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
809 pHeap->paPages,
810 "Heap", ppHeapGC);
811 if (RT_SUCCESS(rc))
812 {
813 pHeap->pVMRC = pVM->pVMRC;
814 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
815 /* Reserve a page for fencing. */
816 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
817
818 /* We won't need these any more. */
819 MMR3HeapFree(pHeap->paPages);
820 pHeap->paPages = NULL;
821 }
822 return rc;
823}
824
825
826#if 0
827/**
828 * Destroys a heap.
829 */
830static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
831{
832 /* all this is dealt with when unlocking and freeing locked memory. */
833}
834#endif
835
836
837/**
838 * Allocates memory in the Hypervisor (GC VMM) area which never will
839 * be freed and doesn't have any offset based relation to other heap blocks.
840 *
841 * The latter means that two blocks allocated by this API will not have the
842 * same relative position to each other in GC and HC. In short, never use
843 * this API for allocating nodes for an offset based AVL tree!
844 *
845 * The returned memory is of course zeroed.
846 *
847 * @returns VBox status code.
848 * @param pVM The VM to operate on.
849 * @param cb Number of bytes to allocate.
850 * @param uAlignment Required memory alignment in bytes.
851 * Values are 0,8,16,32 and PAGE_SIZE.
852 * 0 -> default alignment, i.e. 8 bytes.
853 * @param enmTag The statistics tag.
854 * @param ppv Where to store the address to the allocated
855 * memory.
856 * @remark This is assumed not to be used at times when serialization is required.
857 */
858VMMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
859{
860 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
861
862 /*
863 * Choose between allocating a new chunk of HMA memory
864 * and the heap. We will only do BIG allocations from HMA and
865 * only at creation time.
866 */
867 if ( ( cb < _64K
868 && ( uAlignment != PAGE_SIZE
869 || cb < 48*_1K))
870 || VMR3GetState(pVM) != VMSTATE_CREATING)
871 {
872 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
873 if ( rc != VERR_MM_HYPER_NO_MEMORY
874 || cb <= 8*_1K)
875 {
876 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
877 cb, uAlignment, rc, *ppv));
878 return rc;
879 }
880 }
881
882 /*
883 * Validate alignment.
884 */
885 switch (uAlignment)
886 {
887 case 0:
888 case 8:
889 case 16:
890 case 32:
891 case PAGE_SIZE:
892 break;
893 default:
894 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
895 return VERR_INVALID_PARAMETER;
896 }
897
898 /*
899 * Allocate the pages and map them into HMA space.
900 */
901 uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
902 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
903 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
904 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
905 if (!paPages)
906 return VERR_NO_TMP_MEMORY;
907 void *pvPages;
908 RTR0PTR pvR0 = NIL_RTR0PTR;
909 int rc = SUPR3PageAllocEx(cPages,
910 0 /*fFlags*/,
911 &pvPages,
912#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
913 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
914#else
915 NULL,
916#endif
917 paPages);
918 if (RT_SUCCESS(rc))
919 {
920#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
921 if (!VMMIsHwVirtExtForced(pVM))
922 pvR0 = NIL_RTR0PTR;
923#else
924 pvR0 = (uintptr_t)pvPages;
925#endif
926 memset(pvPages, 0, cbAligned);
927
928 RTGCPTR GCPtr;
929 rc = MMR3HyperMapPages(pVM,
930 pvPages,
931 pvR0,
932 cPages,
933 paPages,
934 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmR3GetTagName(enmTag)),
935 &GCPtr);
936 if (RT_SUCCESS(rc))
937 {
938 *ppv = pvPages;
939 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
940 cbAligned, uAlignment, *ppv));
941 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
942 return rc;
943 }
944 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
945 SUPR3PageFreeEx(pvPages, cPages);
946
947
948 /*
949 * HACK ALERT! Try allocate it off the heap so that we don't freak
950 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
951 */
952 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
953 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
954 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
955 if (RT_SUCCESS(rc2))
956 {
957 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
958 cb, uAlignment, rc, *ppv));
959 return rc;
960 }
961 }
962 else
963 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
964
965 if (rc == VERR_NO_MEMORY)
966 rc = VERR_MM_HYPER_NO_MEMORY;
967 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
968 return rc;
969}
970
971
972/**
973 * Convert hypervisor HC virtual address to HC physical address.
974 *
975 * @returns HC physical address.
976 * @param pVM VM Handle
977 * @param pvR3 Host context virtual address.
978 */
979VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
980{
981 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
982 for (;;)
983 {
984 switch (pLookup->enmType)
985 {
986 case MMLOOKUPHYPERTYPE_LOCKED:
987 {
988 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
989 if (off < pLookup->cb)
990 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
991 break;
992 }
993
994 case MMLOOKUPHYPERTYPE_HCPHYS:
995 {
996 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
997 if (off < pLookup->cb)
998 return pLookup->u.HCPhys.HCPhys + off;
999 break;
1000 }
1001
1002 case MMLOOKUPHYPERTYPE_GCPHYS:
1003 case MMLOOKUPHYPERTYPE_MMIO2:
1004 case MMLOOKUPHYPERTYPE_DYNAMIC:
1005 /* can (or don't want to) convert these kind of records. */
1006 break;
1007
1008 default:
1009 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1010 break;
1011 }
1012
1013 /* next */
1014 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1015 break;
1016 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1017 }
1018
1019 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
1020 return NIL_RTHCPHYS;
1021}
1022
1023
1024#if 0 /* unused, not implemented */
1025/**
1026 * Convert hypervisor HC physical address to HC virtual address.
1027 *
1028 * @returns HC virtual address.
1029 * @param pVM VM Handle
1030 * @param HCPhys Host context physical address.
1031 */
1032VMMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1033{
1034 void *pv;
1035 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1036 if (RT_SUCCESS(rc))
1037 return pv;
1038 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1039 return NULL;
1040}
1041
1042
1043/**
1044 * Convert hypervisor HC physical address to HC virtual address.
1045 *
1046 * @returns VBox status.
1047 * @param pVM VM Handle
1048 * @param HCPhys Host context physical address.
1049 * @param ppv Where to store the HC virtual address.
1050 */
1051VMMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1052{
1053 /*
1054 * Linear search.
1055 */
1056 /** @todo implement when actually used. */
1057 return VERR_INVALID_POINTER;
1058}
1059#endif /* unused, not implemented */
1060
1061
1062/**
1063 * Read hypervisor memory from GC virtual address.
1064 *
1065 * @returns VBox status.
1066 * @param pVM VM handle.
1067 * @param pvDst Destination address (HC of course).
1068 * @param GCPtr GC virtual address.
1069 * @param cb Number of bytes to read.
1070 *
1071 * @remarks For DBGF only.
1072 */
1073VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1074{
1075 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1076 return VERR_INVALID_PARAMETER;
1077 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1078}
1079
1080
1081/**
1082 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1083 *
1084 * @param pVM The VM handle.
1085 * @param pHlp Callback functions for doing output.
1086 * @param pszArgs Argument string. Optional and specific to the handler.
1087 */
1088static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1089{
1090 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
1091 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1092
1093 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1094 for (;;)
1095 {
1096 switch (pLookup->enmType)
1097 {
1098 case MMLOOKUPHYPERTYPE_LOCKED:
1099 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
1100 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1101 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1102 pLookup->u.Locked.pvR3,
1103 pLookup->u.Locked.pvR0,
1104 sizeof(RTHCPTR) * 2, "",
1105 pLookup->pszDesc);
1106 break;
1107
1108 case MMLOOKUPHYPERTYPE_HCPHYS:
1109 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
1110 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1111 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1112 pLookup->u.HCPhys.pvR3,
1113 pLookup->u.HCPhys.pvR0,
1114 pLookup->u.HCPhys.HCPhys,
1115 pLookup->pszDesc);
1116 break;
1117
1118 case MMLOOKUPHYPERTYPE_GCPHYS:
1119 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
1120 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1121 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1122 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1123 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1124 pLookup->pszDesc);
1125 break;
1126
1127 case MMLOOKUPHYPERTYPE_MMIO2:
1128 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
1129 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1130 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1131 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1132 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1133 pLookup->pszDesc);
1134 break;
1135
1136 case MMLOOKUPHYPERTYPE_DYNAMIC:
1137 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
1138 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1139 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1140 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1141 sizeof(RTHCPTR) * 2, "",
1142 pLookup->pszDesc);
1143 break;
1144
1145 default:
1146 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1147 break;
1148 }
1149
1150 /* next */
1151 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1152 break;
1153 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1154 }
1155}
1156
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette