VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 18370

Last change on this file since 18370 was 18354, checked in by vboxsync, 16 years ago

MMHyper: With lots of memory, we have to scale up the hyper heap size according to the memory size. We have extra expenses tracking memory we've mapped, this has to be accessible from R0. However, we don't really need this in RC, so it's one of the allocations we could put in a R3/R0 only heap later.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.0 KB
Line 
1/* $Id: MMHyper.cpp 18354 2009-03-26 22:17:20Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER
27#include <VBox/pgm.h>
28#include <VBox/mm.h>
29#include <VBox/dbgf.h>
30#include "MMInternal.h"
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <VBox/log.h>
35#include <iprt/alloc.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
44static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
45static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
46static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
47static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
48
49
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmR3HyperInit(PVM pVM)
62{
63 LogFlow(("mmR3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
75 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
76 * precious kernel space on heap for the PATM. */
77 uint32_t cbHyperHeap;
78 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
79 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
80 {
81 cbHyperHeap = VMMIsHwVirtExtForced(pVM)
82 ? 640*_1K
83 : 1280*_1K;
84
85 /* Adjust for dynamic stuff like RAM mapping chunks. Try playing kind
86 of safe with existing configs here (HMA size must not change)... */
87 uint64_t cbRam;
88 CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
89 if (cbRam > _2G)
90 {
91 cbRam = RT_MIN(cbRam, _1T);
92 cbHyperHeap += (cbRam - _1G) / _1M * 128; /* 128 is a quick guess */
93 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, _64K);
94 }
95 }
96 else
97 AssertLogRelRCReturn(rc, rc);
98 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
99 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
100
101 /*
102 * Allocate the hypervisor heap.
103 *
104 * (This must be done before we start adding memory to the
105 * hypervisor static area because lookup records are allocated from it.)
106 */
107 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
108 if (RT_SUCCESS(rc))
109 {
110 /*
111 * Make a small head fence to fend of accidental sequential access.
112 */
113 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
114
115 /*
116 * Map the VM structure into the hypervisor space.
117 */
118 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCPUs]));
119 RTGCPTR GCPtr;
120 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
121 if (RT_SUCCESS(rc))
122 {
123 pVM->pVMRC = (RTRCPTR)GCPtr;
124 for (uint32_t i = 0; i < pVM->cCPUs; i++)
125 pVM->aCpus[i].pVMRC = pVM->pVMRC;
126
127 /* Reserve a page for fencing. */
128 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
129
130 /*
131 * Map the heap into the hypervisor space.
132 */
133 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
134 if (RT_SUCCESS(rc))
135 {
136 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
137 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
138
139 /*
140 * Register info handlers.
141 */
142 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
143
144 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
145 return VINF_SUCCESS;
146 }
147 /* Caller will do proper cleanup. */
148 }
149 }
150
151 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
152 return rc;
153}
154
155
156/**
157 * Finalizes the HMA mapping.
158 *
159 * This is called later during init, most (all) HMA allocations should be done
160 * by the time this function is called.
161 *
162 * @returns VBox status.
163 */
164VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
165{
166 LogFlow(("MMR3HyperInitFinalize:\n"));
167
168 /*
169 * Adjust and create the HMA mapping.
170 */
171 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
172 pVM->mm.s.cbHyperArea -= _4M;
173 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
174 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
175 if (RT_FAILURE(rc))
176 return rc;
177 pVM->mm.s.fPGMInitialized = true;
178
179 /*
180 * Do all the delayed mappings.
181 */
182 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
183 for (;;)
184 {
185 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
186 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
187 switch (pLookup->enmType)
188 {
189 case MMLOOKUPHYPERTYPE_LOCKED:
190 rc = mmR3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
191 break;
192
193 case MMLOOKUPHYPERTYPE_HCPHYS:
194 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
195 break;
196
197 case MMLOOKUPHYPERTYPE_GCPHYS:
198 {
199 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
200 const size_t cb = pLookup->cb;
201 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
202 {
203 RTHCPHYS HCPhys;
204 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
205 if (RT_FAILURE(rc))
206 break;
207 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
208 if (RT_FAILURE(rc))
209 break;
210 }
211 break;
212 }
213
214 case MMLOOKUPHYPERTYPE_MMIO2:
215 {
216 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
217 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
218 {
219 RTHCPHYS HCPhys;
220 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
221 if (RT_FAILURE(rc))
222 break;
223 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
224 if (RT_FAILURE(rc))
225 break;
226 }
227 break;
228 }
229
230 case MMLOOKUPHYPERTYPE_DYNAMIC:
231 /* do nothing here since these are either fences or managed by someone else using PGM. */
232 break;
233
234 default:
235 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
236 break;
237 }
238
239 if (RT_FAILURE(rc))
240 {
241 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
242 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
243 return rc;
244 }
245
246 /* next */
247 if (pLookup->offNext == (int32_t)NIL_OFFSET)
248 break;
249 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
250 }
251
252 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
253 return VINF_SUCCESS;
254}
255
256
257/**
258 * Callback function which will be called when PGM is trying to find
259 * a new location for the mapping.
260 *
261 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
262 * In 1) the callback should say if it objects to a suggested new location. If it
263 * accepts the new location, it is called again for doing it's relocation.
264 *
265 *
266 * @returns true if the location is ok.
267 * @returns false if another location should be found.
268 * @param pVM The VM handle.
269 * @param GCPtrOld The old virtual address.
270 * @param GCPtrNew The new virtual address.
271 * @param enmMode Used to indicate the callback mode.
272 * @param pvUser User argument. Ignored.
273 * @remark The return value is no a failure indicator, it's an acceptance
274 * indicator. Relocation can not fail!
275 */
276static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
277{
278 switch (enmMode)
279 {
280 /*
281 * Verify location - all locations are good for us.
282 */
283 case PGMRELOCATECALL_SUGGEST:
284 return true;
285
286 /*
287 * Execute the relocation.
288 */
289 case PGMRELOCATECALL_RELOCATE:
290 {
291 /*
292 * Accepted!
293 */
294 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
295 Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
296
297 /*
298 * Relocate the VM structure and ourselves.
299 */
300 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
301 pVM->pVMRC += offDelta;
302 for (uint32_t i = 0; i < pVM->cCPUs; i++)
303 pVM->aCpus[i].pVMRC = pVM->pVMRC;
304
305 pVM->mm.s.pvHyperAreaGC += offDelta;
306 Assert(pVM->mm.s.pvHyperAreaGC < _4G);
307 pVM->mm.s.pHyperHeapRC += offDelta;
308 pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;
309 pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;
310
311 /*
312 * Relocate the rest.
313 */
314 VMR3Relocate(pVM, offDelta);
315 return true;
316 }
317
318 default:
319 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
320 }
321
322 return false;
323}
324
325
326/**
327 * Maps contiguous HC physical memory into the hypervisor region in the GC.
328 *
329 * @return VBox status code.
330 *
331 * @param pVM VM handle.
332 * @param pvR3 Ring-3 address of the memory. Must be page aligned!
333 * @param pvR0 Optional ring-0 address of the memory.
334 * @param HCPhys Host context physical address of the memory to be
335 * mapped. Must be page aligned!
336 * @param cb Size of the memory. Will be rounded up to nearest page.
337 * @param pszDesc Description.
338 * @param pGCPtr Where to store the GC address.
339 */
340VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
341{
342 LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
343
344 /*
345 * Validate input.
346 */
347 AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
348 AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
349 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
350 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
351
352 /*
353 * Add the memory to the hypervisor area.
354 */
355 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
356 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
357 RTGCPTR GCPtr;
358 PMMLOOKUPHYPER pLookup;
359 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
360 if (RT_SUCCESS(rc))
361 {
362 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
363 pLookup->u.HCPhys.pvR3 = pvR3;
364 pLookup->u.HCPhys.pvR0 = pvR0;
365 pLookup->u.HCPhys.HCPhys = HCPhys;
366
367 /*
368 * Update the page table.
369 */
370 if (pVM->mm.s.fPGMInitialized)
371 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
372 if (RT_SUCCESS(rc))
373 *pGCPtr = GCPtr;
374 }
375 return rc;
376}
377
378
379/**
380 * Maps contiguous GC physical memory into the hypervisor region in the GC.
381 *
382 * @return VBox status code.
383 *
384 * @param pVM VM handle.
385 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
386 * @param cb Size of the memory. Will be rounded up to nearest page.
387 * @param pszDesc Mapping description.
388 * @param pGCPtr Where to store the GC address.
389 */
390VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
391{
392 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
393
394 /*
395 * Validate input.
396 */
397 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
398 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
399
400 /*
401 * Add the memory to the hypervisor area.
402 */
403 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
404 RTGCPTR GCPtr;
405 PMMLOOKUPHYPER pLookup;
406 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
407 if (RT_SUCCESS(rc))
408 {
409 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
410 pLookup->u.GCPhys.GCPhys = GCPhys;
411
412 /*
413 * Update the page table.
414 */
415 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
416 {
417 RTHCPHYS HCPhys;
418 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
419 AssertRC(rc);
420 if (RT_FAILURE(rc))
421 {
422 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
423 break;
424 }
425 if (pVM->mm.s.fPGMInitialized)
426 {
427 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
428 AssertRC(rc);
429 if (RT_FAILURE(rc))
430 {
431 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
432 break;
433 }
434 }
435 }
436
437 if (RT_SUCCESS(rc) && pGCPtr)
438 *pGCPtr = GCPtr;
439 }
440 return rc;
441}
442
443
444/**
445 * Maps a portion of an MMIO2 region into the hypervisor region.
446 *
447 * Callers of this API must never deregister the MMIO2 region before the
448 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
449 * API will be needed to perform cleanups.
450 *
451 * @return VBox status code.
452 *
453 * @param pVM Pointer to the shared VM structure.
454 * @param pDevIns The device owning the MMIO2 memory.
455 * @param iRegion The region.
456 * @param off The offset into the region. Will be rounded down to closest page boundrary.
457 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
458 * @param pszDesc Mapping description.
459 * @param pRCPtr Where to store the RC address.
460 */
461VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
462 const char *pszDesc, PRTRCPTR pRCPtr)
463{
464 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
465 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
466 int rc;
467
468 /*
469 * Validate input.
470 */
471 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
472 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
473 uint32_t const offPage = off & PAGE_OFFSET_MASK;
474 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
475 cb += offPage;
476 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
477 const RTGCPHYS offEnd = off + cb;
478 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
479 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
480 {
481 RTHCPHYS HCPhys;
482 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
483 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
484 }
485
486 /*
487 * Add the memory to the hypervisor area.
488 */
489 RTGCPTR GCPtr;
490 PMMLOOKUPHYPER pLookup;
491 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
492 if (RT_SUCCESS(rc))
493 {
494 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
495 pLookup->u.MMIO2.pDevIns = pDevIns;
496 pLookup->u.MMIO2.iRegion = iRegion;
497 pLookup->u.MMIO2.off = off;
498
499 /*
500 * Update the page table.
501 */
502 if (pVM->mm.s.fPGMInitialized)
503 {
504 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
505 {
506 RTHCPHYS HCPhys;
507 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
508 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
509 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
510 if (RT_FAILURE(rc))
511 {
512 AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
513 break;
514 }
515 }
516 }
517
518 if (RT_SUCCESS(rc))
519 {
520 GCPtr |= offPage;
521 *pRCPtr = GCPtr;
522 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
523 }
524 }
525 return rc;
526}
527
528
529/**
530 * Maps locked R3 virtual memory into the hypervisor region in the GC.
531 *
532 * @return VBox status code.
533 *
534 * @param pVM VM handle.
535 * @param pvR3 The ring-3 address of the memory, must be page aligned.
536 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
537 * @param cPages The number of pages.
538 * @param paPages The page descriptors.
539 * @param pszDesc Mapping description.
540 * @param pGCPtr Where to store the GC address corresponding to pvR3.
541 */
542VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
543{
544 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
545 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
546
547 /*
548 * Validate input.
549 */
550 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
551 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
552 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
553 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
554 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
555 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
556 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
557
558 /*
559 * Add the memory to the hypervisor area.
560 */
561 RTGCPTR GCPtr;
562 PMMLOOKUPHYPER pLookup;
563 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
564 if (RT_SUCCESS(rc))
565 {
566 /*
567 * Create a locked memory record and tell PGM about this.
568 */
569 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
570 if (pLockedMem)
571 {
572 pLockedMem->pv = pvR3;
573 pLockedMem->cb = cPages << PAGE_SHIFT;
574 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES;
575 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
576 for (size_t i = 0; i < cPages; i++)
577 {
578 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
579 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys;
580 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem;
581 }
582
583 /* map the stuff into guest address space. */
584 if (pVM->mm.s.fPGMInitialized)
585 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
586 if (RT_SUCCESS(rc))
587 {
588 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
589 pLookup->u.Locked.pvR3 = pvR3;
590 pLookup->u.Locked.pvR0 = pvR0;
591 pLookup->u.Locked.pLockedMem = pLockedMem;
592
593 /* done. */
594 *pGCPtr = GCPtr;
595 return rc;
596 }
597 /* Don't care about failure clean, we're screwed if this fails anyway. */
598 }
599 }
600
601 return rc;
602}
603
604
605/**
606 * Reserves a hypervisor memory area.
607 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
608 *
609 * @return VBox status code.
610 *
611 * @param pVM VM handle.
612 * @param cb Size of the memory. Will be rounded up to nearest page.
613 * @param pszDesc Mapping description.
614 * @param pGCPtr Where to store the assigned GC address. Optional.
615 */
616VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
617{
618 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
619
620 /*
621 * Validate input.
622 */
623 if ( cb <= 0
624 || !pszDesc
625 || !*pszDesc)
626 {
627 AssertMsgFailed(("Invalid parameter\n"));
628 return VERR_INVALID_PARAMETER;
629 }
630
631 /*
632 * Add the memory to the hypervisor area.
633 */
634 RTGCPTR GCPtr;
635 PMMLOOKUPHYPER pLookup;
636 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
637 if (RT_SUCCESS(rc))
638 {
639 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
640 if (pGCPtr)
641 *pGCPtr = GCPtr;
642 return VINF_SUCCESS;
643 }
644 return rc;
645}
646
647
648/**
649 * Adds memory to the hypervisor memory arena.
650 *
651 * @return VBox status code.
652 * @param pVM The VM handle.
653 * @param cb Size of the memory. Will be rounded up to neares page.
654 * @param pszDesc The description of the memory.
655 * @param pGCPtr Where to store the GC address.
656 * @param ppLookup Where to store the pointer to the lookup record.
657 * @remark We assume the threading structure of VBox imposes natural
658 * serialization of most functions, this one included.
659 */
660static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
661{
662 /*
663 * Validate input.
664 */
665 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
666 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
667 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
668 {
669 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
670 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
671 return VERR_NO_MEMORY;
672 }
673
674 /*
675 * Allocate lookup record.
676 */
677 PMMLOOKUPHYPER pLookup;
678 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
679 if (RT_SUCCESS(rc))
680 {
681 /*
682 * Initialize it and insert it.
683 */
684 pLookup->offNext = pVM->mm.s.offLookupHyper;
685 pLookup->cb = cbAligned;
686 pLookup->off = pVM->mm.s.offHyperNextStatic;
687 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
688 if (pLookup->offNext != (int32_t)NIL_OFFSET)
689 pLookup->offNext -= pVM->mm.s.offLookupHyper;
690 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
691 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
692 pLookup->pszDesc = pszDesc;
693
694 /* Mapping. */
695 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
696 pVM->mm.s.offHyperNextStatic += cbAligned;
697
698 /* Return pointer. */
699 *ppLookup = pLookup;
700 }
701
702 AssertRC(rc);
703 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
704 return rc;
705}
706
707
708/**
709 * Allocates a new heap.
710 *
711 * @returns VBox status code.
712 * @param pVM The VM handle.
713 * @param cb The size of the new heap.
714 * @param ppHeap Where to store the heap pointer on successful return.
715 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
716 * success.
717 */
718static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
719{
720 /*
721 * Allocate the hypervisor heap.
722 */
723 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
724 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
725 uint32_t const cPages = cb >> PAGE_SHIFT;
726 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
727 if (!paPages)
728 return VERR_NO_MEMORY;
729 void *pv;
730 RTR0PTR pvR0 = NIL_RTR0PTR;
731 int rc = SUPR3PageAllocEx(cPages,
732 0 /*fFlags*/,
733 &pv,
734#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
735 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
736#else
737 NULL,
738#endif
739 paPages);
740 if (RT_SUCCESS(rc))
741 {
742#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
743 if (!VMMIsHwVirtExtForced(pVM))
744 pvR0 = NIL_RTR0PTR;
745#else
746 pvR0 = (uintptr_t)pv;
747#endif
748 memset(pv, 0, cbAligned);
749
750 /*
751 * Initialize the heap and first free chunk.
752 */
753 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
754 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
755 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
756 pHeap->pbHeapR0 = pvR0 != NIL_RTR0PTR ? pvR0 + MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR;
757 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
758 pHeap->pVMR3 = pVM;
759 pHeap->pVMR0 = pVM->pVMR0;
760 pHeap->pVMRC = pVM->pVMRC;
761 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
762 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
763 //pHeap->offFreeHead = 0;
764 //pHeap->offFreeTail = 0;
765 pHeap->offPageAligned = pHeap->cbHeap;
766 //pHeap->HyperHeapStatTree = 0;
767 pHeap->paPages = paPages;
768
769 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
770 pFree->cb = pHeap->cbFree;
771 //pFree->core.offNext = 0;
772 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
773 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
774 //pFree->offNext = 0;
775 //pFree->offPrev = 0;
776
777 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
778 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
779
780 *ppHeap = pHeap;
781 *pR0PtrHeap = pvR0;
782 return VINF_SUCCESS;
783 }
784 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
785
786 *ppHeap = NULL;
787 return rc;
788}
789
790
791/**
792 * Allocates a new heap.
793 */
794static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
795{
796 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
797 Assert(pHeap->paPages);
798 int rc = MMR3HyperMapPages(pVM,
799 pHeap,
800 pHeap->pbHeapR0 != NIL_RTR0PTR ? pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR,
801 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
802 pHeap->paPages,
803 "Heap", ppHeapGC);
804 if (RT_SUCCESS(rc))
805 {
806 pHeap->pVMRC = pVM->pVMRC;
807 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
808 /* Reserve a page for fencing. */
809 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
810
811 /* We won't need these any more. */
812 MMR3HeapFree(pHeap->paPages);
813 pHeap->paPages = NULL;
814 }
815 return rc;
816}
817
818
819#if 0
820/**
821 * Destroys a heap.
822 */
823static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
824{
825 /* all this is dealt with when unlocking and freeing locked memory. */
826}
827#endif
828
829
830/**
831 * Allocates memory in the Hypervisor (GC VMM) area which never will
832 * be freed and doesn't have any offset based relation to other heap blocks.
833 *
834 * The latter means that two blocks allocated by this API will not have the
835 * same relative position to each other in GC and HC. In short, never use
836 * this API for allocating nodes for an offset based AVL tree!
837 *
838 * The returned memory is of course zeroed.
839 *
840 * @returns VBox status code.
841 * @param pVM The VM to operate on.
842 * @param cb Number of bytes to allocate.
843 * @param uAlignment Required memory alignment in bytes.
844 * Values are 0,8,16,32 and PAGE_SIZE.
845 * 0 -> default alignment, i.e. 8 bytes.
846 * @param enmTag The statistics tag.
847 * @param ppv Where to store the address to the allocated
848 * memory.
849 * @remark This is assumed not to be used at times when serialization is required.
850 */
851VMMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
852{
853 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
854
855 /*
856 * Choose between allocating a new chunk of HMA memory
857 * and the heap. We will only do BIG allocations from HMA and
858 * only at creation time.
859 */
860 if ( ( cb < _64K
861 && ( uAlignment != PAGE_SIZE
862 || cb < 48*_1K))
863 || VMR3GetState(pVM) != VMSTATE_CREATING)
864 {
865 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
866 if ( rc != VERR_MM_HYPER_NO_MEMORY
867 || cb <= 8*_1K)
868 {
869 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
870 cb, uAlignment, rc, *ppv));
871 return rc;
872 }
873 }
874
875 /*
876 * Validate alignment.
877 */
878 switch (uAlignment)
879 {
880 case 0:
881 case 8:
882 case 16:
883 case 32:
884 case PAGE_SIZE:
885 break;
886 default:
887 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
888 return VERR_INVALID_PARAMETER;
889 }
890
891 /*
892 * Allocate the pages and map them into HMA space.
893 */
894 cb = RT_ALIGN(cb, PAGE_SIZE);
895 uint32_t const cPages = cb >> PAGE_SHIFT;
896 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
897 if (!paPages)
898 return VERR_NO_TMP_MEMORY;
899 void *pvPages;
900 RTR0PTR pvR0 = NIL_RTR0PTR;
901 int rc = SUPR3PageAllocEx(cPages,
902 0 /*fFlags*/,
903 &pvPages,
904#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
905 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
906#else
907 NULL,
908#endif
909 paPages);
910 if (RT_SUCCESS(rc))
911 {
912#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
913 if (!VMMIsHwVirtExtForced(pVM))
914 pvR0 = NIL_RTR0PTR;
915#else
916 pvR0 = (uintptr_t)pvPages;
917#endif
918 memset(pvPages, 0, cb);
919
920 RTGCPTR GCPtr;
921 rc = MMR3HyperMapPages(pVM,
922 pvPages,
923 pvR0,
924 cPages,
925 paPages,
926 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmR3GetTagName(enmTag)),
927 &GCPtr);
928 if (RT_SUCCESS(rc))
929 {
930 *ppv = pvPages;
931 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
932 cb, uAlignment, *ppv));
933 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
934 return rc;
935 }
936 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
937 SUPR3PageFreeEx(pvPages, cPages);
938
939
940 /*
941 * HACK ALERT! Try allocate it off the heap so that we don't freak
942 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
943 */
944 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
945 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#d,,) instead\n", rc, cb));
946 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
947 if (RT_SUCCESS(rc2))
948 {
949 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
950 cb, uAlignment, rc, *ppv));
951 return rc;
952 }
953 }
954 else
955 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
956
957 if (rc == VERR_NO_MEMORY)
958 rc = VERR_MM_HYPER_NO_MEMORY;
959 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
960 return rc;
961}
962
963
964/**
965 * Convert hypervisor HC virtual address to HC physical address.
966 *
967 * @returns HC physical address.
968 * @param pVM VM Handle
969 * @param pvR3 Host context virtual address.
970 */
971VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
972{
973 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
974 for (;;)
975 {
976 switch (pLookup->enmType)
977 {
978 case MMLOOKUPHYPERTYPE_LOCKED:
979 {
980 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
981 if (off < pLookup->cb)
982 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
983 break;
984 }
985
986 case MMLOOKUPHYPERTYPE_HCPHYS:
987 {
988 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
989 if (off < pLookup->cb)
990 return pLookup->u.HCPhys.HCPhys + off;
991 break;
992 }
993
994 case MMLOOKUPHYPERTYPE_GCPHYS:
995 case MMLOOKUPHYPERTYPE_MMIO2:
996 case MMLOOKUPHYPERTYPE_DYNAMIC:
997 /* can (or don't want to) convert these kind of records. */
998 break;
999
1000 default:
1001 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1002 break;
1003 }
1004
1005 /* next */
1006 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1007 break;
1008 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1009 }
1010
1011 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
1012 return NIL_RTHCPHYS;
1013}
1014
1015
1016#if 0 /* unused, not implemented */
1017/**
1018 * Convert hypervisor HC physical address to HC virtual address.
1019 *
1020 * @returns HC virtual address.
1021 * @param pVM VM Handle
1022 * @param HCPhys Host context physical address.
1023 */
1024VMMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1025{
1026 void *pv;
1027 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1028 if (RT_SUCCESS(rc))
1029 return pv;
1030 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1031 return NULL;
1032}
1033
1034
1035/**
1036 * Convert hypervisor HC physical address to HC virtual address.
1037 *
1038 * @returns VBox status.
1039 * @param pVM VM Handle
1040 * @param HCPhys Host context physical address.
1041 * @param ppv Where to store the HC virtual address.
1042 */
1043VMMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1044{
1045 /*
1046 * Linear search.
1047 */
1048 /** @todo implement when actually used. */
1049 return VERR_INVALID_POINTER;
1050}
1051#endif /* unused, not implemented */
1052
1053
1054/**
1055 * Read hypervisor memory from GC virtual address.
1056 *
1057 * @returns VBox status.
1058 * @param pVM VM handle.
1059 * @param pvDst Destination address (HC of course).
1060 * @param GCPtr GC virtual address.
1061 * @param cb Number of bytes to read.
1062 *
1063 * @remarks For DBGF only.
1064 */
1065VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1066{
1067 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1068 return VERR_INVALID_PARAMETER;
1069 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1070}
1071
1072
1073/**
1074 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1075 *
1076 * @param pVM The VM handle.
1077 * @param pHlp Callback functions for doing output.
1078 * @param pszArgs Argument string. Optional and specific to the handler.
1079 */
1080static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1081{
1082 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
1083 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1084
1085 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1086 for (;;)
1087 {
1088 switch (pLookup->enmType)
1089 {
1090 case MMLOOKUPHYPERTYPE_LOCKED:
1091 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
1092 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1093 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1094 pLookup->u.Locked.pvR3,
1095 pLookup->u.Locked.pvR0,
1096 sizeof(RTHCPTR) * 2,
1097 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree"
1098 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree"
1099 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages"
1100 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys"
1101 : "??",
1102 pLookup->pszDesc);
1103 break;
1104
1105 case MMLOOKUPHYPERTYPE_HCPHYS:
1106 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
1107 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1108 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1109 pLookup->u.HCPhys.pvR3,
1110 pLookup->u.HCPhys.pvR0,
1111 pLookup->u.HCPhys.HCPhys,
1112 pLookup->pszDesc);
1113 break;
1114
1115 case MMLOOKUPHYPERTYPE_GCPHYS:
1116 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
1117 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1118 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1119 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1120 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1121 pLookup->pszDesc);
1122 break;
1123
1124 case MMLOOKUPHYPERTYPE_MMIO2:
1125 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
1126 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1127 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1128 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1129 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1130 pLookup->pszDesc);
1131 break;
1132
1133 case MMLOOKUPHYPERTYPE_DYNAMIC:
1134 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
1135 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1136 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1137 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1138 sizeof(RTHCPTR) * 2, "",
1139 pLookup->pszDesc);
1140 break;
1141
1142 default:
1143 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1144 break;
1145 }
1146
1147 /* next */
1148 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1149 break;
1150 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1151 }
1152}
1153
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette