VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 7635

Last change on this file since 7635 was 7635, checked in by vboxsync, 17 years ago

The new MMIO2 code.
WARNING! This changes the pci mapping protocol for MMIO2 so it's working the same way as I/O ports and normal MMIO memory. External users of the interface will have to update their mapping routines.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 38.7 KB
Line 
1/* $Id: MMHyper.cpp 7635 2008-03-28 17:15:38Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19
20
21/*******************************************************************************
22* Header Files *
23*******************************************************************************/
24#define LOG_GROUP LOG_GROUP_MM_HYPER
25#include <VBox/pgm.h>
26#include <VBox/mm.h>
27#include <VBox/dbgf.h>
28#include "MMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/alloc.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36
37
38/*******************************************************************************
39* Internal Functions *
40*******************************************************************************/
41static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
42static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
43static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap);
44static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
45static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
46
47
48/**
49 * Initializes the hypvervisor related MM stuff without
50 * calling down to PGM.
51 *
52 * PGM is not initialized at this point, PGM relies on
53 * the heap to initialize.
54 *
55 * @returns VBox status.
56 */
57int mmR3HyperInit(PVM pVM)
58{
59 LogFlow(("mmR3HyperInit:\n"));
60
61 /*
62 * Decide Hypervisor mapping in the guest context
63 * And setup various hypervisor area and heap parameters.
64 */
65 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
66 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
67 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
68 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
69
70 uint32_t cbHyperHeap;
71 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
72 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
73 cbHyperHeap = 1280*_1K;
74 else if (VBOX_FAILURE(rc))
75 {
76 LogRel(("MM/cbHyperHeap query -> %Vrc\n", rc));
77 AssertRCReturn(rc, rc);
78 }
79 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
80
81 /*
82 * Allocate the hypervisor heap.
83 *
84 * (This must be done before we start adding memory to the
85 * hypervisor static area because lookup records are allocated from it.)
86 */
87 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapHC);
88 if (VBOX_SUCCESS(rc))
89 {
90 /*
91 * Make a small head fence to fend of accidental sequential access.
92 */
93 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
94
95 /*
96 * Map the VM structure into the hypervisor space.
97 */
98 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &pVM->pVMGC);
99 if (VBOX_SUCCESS(rc))
100 {
101 /* Reserve a page for fencing. */
102 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
103
104 /*
105 * Map the heap into the hypervisor space.
106 */
107 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapHC, &pVM->mm.s.pHyperHeapGC);
108 if (VBOX_SUCCESS(rc))
109 {
110 /*
111 * Register info handlers.
112 */
113 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
114
115 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
116 return VINF_SUCCESS;
117 }
118 /* Caller will do proper cleanup. */
119 }
120 }
121
122 LogFlow(("mmR3HyperInit: returns %Vrc\n", rc));
123 return rc;
124}
125
126
127/**
128 * Finalizes the HMA mapping.
129 *
130 * This is called later during init, most (all) HMA allocations should be done
131 * by the time this function is called.
132 *
133 * @returns VBox status.
134 */
135MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
136{
137 LogFlow(("MMR3HyperInitFinalize:\n"));
138
139 /*
140 * Adjust and create the HMA mapping.
141 */
142 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
143 pVM->mm.s.cbHyperArea -= _4M;
144 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
145 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
146 if (VBOX_FAILURE(rc))
147 return rc;
148 pVM->mm.s.fPGMInitialized = true;
149
150 /*
151 * Do all the delayed mappings.
152 */
153 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
154 for (;;)
155 {
156 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
157 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
158 switch (pLookup->enmType)
159 {
160 case MMLOOKUPHYPERTYPE_LOCKED:
161 rc = mmR3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
162 break;
163
164 case MMLOOKUPHYPERTYPE_HCPHYS:
165 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
166 break;
167
168 case MMLOOKUPHYPERTYPE_GCPHYS:
169 {
170 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
171 const size_t cb = pLookup->cb;
172 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
173 {
174 RTHCPHYS HCPhys;
175 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
176 if (VBOX_FAILURE(rc))
177 break;
178 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
179 if (VBOX_FAILURE(rc))
180 break;
181 }
182 break;
183 }
184
185 case MMLOOKUPHYPERTYPE_MMIO2:
186 {
187 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
188 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
189 {
190 RTHCPHYS HCPhys;
191 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
192 if (RT_FAILURE(rc))
193 break;
194 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
195 if (RT_FAILURE(rc))
196 break;
197 }
198 break;
199 }
200
201 case MMLOOKUPHYPERTYPE_DYNAMIC:
202 /* do nothing here since these are either fences or managed by someone else using PGM. */
203 break;
204
205 default:
206 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
207 break;
208 }
209
210 if (VBOX_FAILURE(rc))
211 {
212 AssertMsgFailed(("rc=%Vrc cb=%d GCPtr=%VGv enmType=%d pszDesc=%s\n",
213 rc, pLookup->cb, pLookup->enmType, pLookup->pszDesc));
214 return rc;
215 }
216
217 /* next */
218 if (pLookup->offNext == (int32_t)NIL_OFFSET)
219 break;
220 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
221 }
222
223 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Callback function which will be called when PGM is trying to find
230 * a new location for the mapping.
231 *
232 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
233 * In 1) the callback should say if it objects to a suggested new location. If it
234 * accepts the new location, it is called again for doing it's relocation.
235 *
236 *
237 * @returns true if the location is ok.
238 * @returns false if another location should be found.
239 * @param pVM The VM handle.
240 * @param GCPtrOld The old virtual address.
241 * @param GCPtrNew The new virtual address.
242 * @param enmMode Used to indicate the callback mode.
243 * @param pvUser User argument. Ignored.
244 * @remark The return value is no a failure indicator, it's an acceptance
245 * indicator. Relocation can not fail!
246 */
247static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
248{
249 switch (enmMode)
250 {
251 /*
252 * Verify location - all locations are good for us.
253 */
254 case PGMRELOCATECALL_SUGGEST:
255 return true;
256
257 /*
258 * Execute the relocation.
259 */
260 case PGMRELOCATECALL_RELOCATE:
261 {
262 /*
263 * Accepted!
264 */
265 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%#x pVM->mm.s.pvHyperAreaGC=%#x\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
266 Log(("Relocating the hypervisor from %#x to %#x\n", GCPtrOld, GCPtrNew));
267
268 /* relocate our selves and the VM structure. */
269 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
270 pVM->pVMGC += offDelta;
271 pVM->mm.s.pvHyperAreaGC += offDelta;
272 pVM->mm.s.pHyperHeapGC += offDelta;
273 pVM->mm.s.pHyperHeapHC->pbHeapGC += offDelta;
274 pVM->mm.s.pHyperHeapHC->pVMGC += pVM->pVMGC;
275
276 /* relocate the rest. */
277 VMR3Relocate(pVM, offDelta);
278 return true;
279 }
280
281 default:
282 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
283 }
284
285 return false;
286}
287
288
289/**
290 * Maps contiguous HC physical memory into the hypervisor region in the GC.
291 *
292 * @return VBox status code.
293 *
294 * @param pVM VM handle.
295 * @param pvHC Host context address of the memory. Must be page aligned!
296 * @param HCPhys Host context physical address of the memory to be mapped. Must be page aligned!
297 * @param cb Size of the memory. Will be rounded up to nearest page.
298 * @param pszDesc Description.
299 * @param pGCPtr Where to store the GC address.
300 */
301MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
302{
303 LogFlow(("MMR3HyperMapHCPhys: pvHc=%p HCPhys=%VHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
304
305 /*
306 * Validate input.
307 */
308 AssertReturn(RT_ALIGN_P(pvHC, PAGE_SIZE) == pvHC, VERR_INVALID_PARAMETER);
309 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
310 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
311
312 /*
313 * Add the memory to the hypervisor area.
314 */
315 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
316 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
317 RTGCPTR GCPtr;
318 PMMLOOKUPHYPER pLookup;
319 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
320 if (VBOX_SUCCESS(rc))
321 {
322 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
323 pLookup->u.HCPhys.pvHC = pvHC;
324 pLookup->u.HCPhys.HCPhys = HCPhys;
325
326 /*
327 * Update the page table.
328 */
329 if (pVM->mm.s.fPGMInitialized)
330 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
331 if (VBOX_SUCCESS(rc))
332 *pGCPtr = GCPtr;
333 }
334 return rc;
335}
336
337
338/**
339 * Maps contiguous GC physical memory into the hypervisor region in the GC.
340 *
341 * @return VBox status code.
342 *
343 * @param pVM VM handle.
344 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
345 * @param cb Size of the memory. Will be rounded up to nearest page.
346 * @param pszDesc Mapping description.
347 * @param pGCPtr Where to store the GC address.
348 */
349MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
350{
351 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%VGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
352
353 /*
354 * Validate input.
355 */
356 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
357 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
358
359 /*
360 * Add the memory to the hypervisor area.
361 */
362 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
363 RTGCPTR GCPtr;
364 PMMLOOKUPHYPER pLookup;
365 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
366 if (VBOX_SUCCESS(rc))
367 {
368 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
369 pLookup->u.GCPhys.GCPhys = GCPhys;
370
371 /*
372 * Update the page table.
373 */
374 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
375 {
376 RTHCPHYS HCPhys;
377 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
378 AssertRC(rc);
379 if (VBOX_FAILURE(rc))
380 {
381 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
382 break;
383 }
384 if (pVM->mm.s.fPGMInitialized)
385 {
386 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
387 AssertRC(rc);
388 if (VBOX_FAILURE(rc))
389 {
390 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
391 break;
392 }
393 }
394 }
395
396 if (VBOX_SUCCESS(rc) && pGCPtr)
397 *pGCPtr = GCPtr;
398 }
399 return rc;
400}
401
402
403/**
404 * Maps a portion of an MMIO2 region into the hypervisor region.
405 *
406 * Callers of this API must never deregister the MMIO2 region before the
407 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
408 * API will be needed to perform cleanups.
409 *
410 * @return VBox status code.
411 *
412 * @param pVM Pointer to the shared VM structure.
413 * @param pDevIns The device owning the MMIO2 memory.
414 * @param iRegion The region.
415 * @param off The offset into the region. Will be rounded down to closest page boundrary.
416 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
417 * @param pszDesc Mapping description.
418 * @param pGCPtr Where to store the GC address.
419 */
420MMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
421 const char *pszDesc, PRTGCPTR pGCPtr)
422{
423 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%VGp cb=%VGp pszDesc=%p:{%s} pGCPtr=%p\n",
424 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pGCPtr));
425 int rc;
426
427 /*
428 * Validate input.
429 */
430 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
431 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
432 uint32_t const offPage = off & PAGE_OFFSET_MASK;
433 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
434 cb += offPage;
435 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
436 const RTGCPHYS offEnd = off + cb;
437 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
438 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
439 {
440 RTHCPHYS HCPhys;
441 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
442 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
443 }
444
445 /*
446 * Add the memory to the hypervisor area.
447 */
448 RTGCPTR GCPtr;
449 PMMLOOKUPHYPER pLookup;
450 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
451 if (VBOX_SUCCESS(rc))
452 {
453 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
454 pLookup->u.MMIO2.pDevIns = pDevIns;
455 pLookup->u.MMIO2.iRegion = iRegion;
456 pLookup->u.MMIO2.off = off;
457
458 /*
459 * Update the page table.
460 */
461 if (pVM->mm.s.fPGMInitialized)
462 {
463 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
464 {
465 RTHCPHYS HCPhys;
466 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
467 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
468 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
469 if (VBOX_FAILURE(rc))
470 {
471 AssertMsgFailed(("rc=%Vrc offCur=%RGp %s\n", rc, offCur, pszDesc));
472 break;
473 }
474 }
475 }
476
477 if (VBOX_SUCCESS(rc) && pGCPtr)
478 *pGCPtr = GCPtr | offPage;
479 }
480 return rc;
481}
482
483
484
485
486/**
487 * Locks and Maps HC virtual memory into the hypervisor region in the GC.
488 *
489 * @return VBox status code.
490 *
491 * @param pVM VM handle.
492 * @param pvHC Host context address of the memory (may be not page aligned).
493 * @param cb Size of the memory. Will be rounded up to nearest page.
494 * @param fFree Set this if MM is responsible for freeing the memory using SUPPageFree.
495 * @param pszDesc Mapping description.
496 * @param pGCPtr Where to store the GC address corresponding to pvHC.
497 */
498MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr)
499{
500 LogFlow(("MMR3HyperMapHCRam: pvHc=%p cb=%d fFree=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, (int)cb, fFree, pszDesc, pszDesc, pGCPtr));
501
502 /*
503 * Validate input.
504 */
505 if ( !pvHC
506 || cb <= 0
507 || !pszDesc
508 || !*pszDesc)
509 {
510 AssertMsgFailed(("Invalid parameter\n"));
511 return VERR_INVALID_PARAMETER;
512 }
513
514 /*
515 * Page align address and size.
516 */
517 void *pvHCPage = (void *)((uintptr_t)pvHC & PAGE_BASE_HC_MASK);
518 cb += (uintptr_t)pvHC & PAGE_OFFSET_MASK;
519 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
520
521 /*
522 * Add the memory to the hypervisor area.
523 */
524 RTGCPTR GCPtr;
525 PMMLOOKUPHYPER pLookup;
526 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
527 if (VBOX_SUCCESS(rc))
528 {
529 /*
530 * Lock the heap memory and tell PGM about the locked pages.
531 */
532 PMMLOCKEDMEM pLockedMem;
533 rc = mmR3LockMem(pVM, pvHCPage, cb, fFree ? MM_LOCKED_TYPE_HYPER : MM_LOCKED_TYPE_HYPER_NOFREE, &pLockedMem, false /* fSilentFailure */);
534 if (VBOX_SUCCESS(rc))
535 {
536 /* map the stuff into guest address space. */
537 if (pVM->mm.s.fPGMInitialized)
538 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
539 if (VBOX_SUCCESS(rc))
540 {
541 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
542 pLookup->u.Locked.pvHC = pvHC;
543 pLookup->u.Locked.pvR0 = NIL_RTR0PTR;
544 pLookup->u.Locked.pLockedMem = pLockedMem;
545
546 /* done. */
547 GCPtr |= (uintptr_t)pvHC & PAGE_OFFSET_MASK;
548 *pGCPtr = GCPtr;
549 return rc;
550 }
551 /* Don't care about failure clean, we're screwed if this fails anyway. */
552 }
553 }
554
555 return rc;
556}
557
558
559/**
560 * Maps locked R3 virtual memory into the hypervisor region in the GC.
561 *
562 * @return VBox status code.
563 *
564 * @param pVM VM handle.
565 * @param pvR3 The ring-3 address of the memory, must be page aligned.
566 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
567 * @param cPages The number of pages.
568 * @param paPages The page descriptors.
569 * @param pszDesc Mapping description.
570 * @param pGCPtr Where to store the GC address corresponding to pvHC.
571 */
572MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
573{
574 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
575 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
576
577 /*
578 * Validate input.
579 */
580 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
581 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
582 AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
583 AssertReturn(cPages < 1024, VERR_INVALID_PARAMETER);
584 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
585 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
586 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
587
588 /*
589 * Add the memory to the hypervisor area.
590 */
591 RTGCPTR GCPtr;
592 PMMLOOKUPHYPER pLookup;
593 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
594 if (VBOX_SUCCESS(rc))
595 {
596 /*
597 * Create a locked memory record and tell PGM about this.
598 */
599 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
600 if (pLockedMem)
601 {
602 pLockedMem->pv = pvR3;
603 pLockedMem->cb = cPages << PAGE_SHIFT;
604 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES;
605 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
606 for (size_t i = 0; i < cPages; i++)
607 {
608 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
609 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys;
610 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem;
611 }
612
613 /* map the stuff into guest address space. */
614 if (pVM->mm.s.fPGMInitialized)
615 rc = mmR3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
616 if (VBOX_SUCCESS(rc))
617 {
618 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
619 pLookup->u.Locked.pvHC = pvR3;
620 pLookup->u.Locked.pvR0 = pvR0;
621 pLookup->u.Locked.pLockedMem = pLockedMem;
622
623 /* done. */
624 *pGCPtr = GCPtr;
625 return rc;
626 }
627 /* Don't care about failure clean, we're screwed if this fails anyway. */
628 }
629 }
630
631 return rc;
632}
633
634
635/**
636 * Reserves a hypervisor memory area.
637 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPTR.
638 *
639 * @return VBox status code.
640 *
641 * @param pVM VM handle.
642 * @param cb Size of the memory. Will be rounded up to nearest page.
643 * @param pszDesc Mapping description.
644 * @param pGCPtr Where to store the assigned GC address. Optional.
645 */
646MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
647{
648 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
649
650 /*
651 * Validate input.
652 */
653 if ( cb <= 0
654 || !pszDesc
655 || !*pszDesc)
656 {
657 AssertMsgFailed(("Invalid parameter\n"));
658 return VERR_INVALID_PARAMETER;
659 }
660
661 /*
662 * Add the memory to the hypervisor area.
663 */
664 RTGCPTR GCPtr;
665 PMMLOOKUPHYPER pLookup;
666 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
667 if (VBOX_SUCCESS(rc))
668 {
669 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
670 if (pGCPtr)
671 *pGCPtr = GCPtr;
672 return VINF_SUCCESS;
673 }
674 return rc;
675}
676
677
678/**
679 * Adds memory to the hypervisor memory arena.
680 *
681 * @return VBox status code.
682 * @param pVM The VM handle.
683 * @param cb Size of the memory. Will be rounded up to neares page.
684 * @param pszDesc The description of the memory.
685 * @param pGCPtr Where to store the GC address.
686 * @param ppLookup Where to store the pointer to the lookup record.
687 * @remark We assume the threading structure of VBox imposes natural
688 * serialization of most functions, this one included.
689 */
690static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
691{
692 /*
693 * Validate input.
694 */
695 const uint32_t cbAligned = RT_ALIGN(cb, PAGE_SIZE);
696 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
697 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
698 {
699 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x\n",
700 pVM->mm.s.offHyperNextStatic, cbAligned));
701 return VERR_NO_MEMORY;
702 }
703
704 /*
705 * Allocate lookup record.
706 */
707 PMMLOOKUPHYPER pLookup;
708 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
709 if (VBOX_SUCCESS(rc))
710 {
711 /*
712 * Initialize it and insert it.
713 */
714 pLookup->offNext = pVM->mm.s.offLookupHyper;
715 pLookup->cb = cbAligned;
716 pLookup->off = pVM->mm.s.offHyperNextStatic;
717 pVM->mm.s.offLookupHyper = (char *)pLookup - (char *)pVM->mm.s.pHyperHeapHC;
718 if (pLookup->offNext != (int32_t)NIL_OFFSET)
719 pLookup->offNext -= pVM->mm.s.offLookupHyper;
720 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
721 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
722 pLookup->pszDesc = pszDesc;
723
724 /* Mapping. */
725 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
726 pVM->mm.s.offHyperNextStatic += cbAligned;
727
728 /* Return pointer. */
729 *ppLookup = pLookup;
730 }
731
732 AssertRC(rc);
733 LogFlow(("mmR3HyperMap: returns %Vrc *pGCPtr=%VGv\n", rc, *pGCPtr));
734 return rc;
735}
736
737
738/**
739 * Allocates a new heap.
740 *
741 * @returns VBox status code.
742 * @param pVM The VM handle.
743 * @param cb The size of the new heap.
744 * @param ppHeap Where to store the heap pointer on successful return.
745 */
746static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap)
747{
748 /*
749 * Allocate the hypervisor heap.
750 */
751 const uint32_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
752 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
753 void *pv;
754 int rc = SUPPageAlloc(cbAligned >> PAGE_SHIFT, &pv);
755 if (VBOX_SUCCESS(rc))
756 {
757 /*
758 * Initialize the heap and first free chunk.
759 */
760 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
761 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
762 pHeap->pVMHC = pVM;
763 pHeap->pVMGC = pVM->pVMGC;
764 pHeap->pbHeapHC = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
765 //pHeap->pbHeapGC = 0; // set by mmR3HyperHeapMap()
766 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
767 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
768 //pHeap->offFreeHead = 0;
769 //pHeap->offFreeTail = 0;
770 pHeap->offPageAligned = pHeap->cbHeap;
771 //pHeap->HyperHeapStatTree = 0;
772
773 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapHC;
774 pFree->cb = pHeap->cbFree;
775 //pFree->core.offNext = 0;
776 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
777 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
778 //pFree->offNext = 0;
779 //pFree->offPrev = 0;
780
781 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
782 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
783
784 *ppHeap = pHeap;
785 return VINF_SUCCESS;
786 }
787 AssertMsgFailed(("SUPPageAlloc(%d,) -> %Vrc\n", cbAligned >> PAGE_SHIFT, rc));
788
789 *ppHeap = NULL;
790 return rc;
791}
792
793
794/**
795 * Allocates a new heap.
796 */
797static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
798{
799 int rc = MMR3HyperMapHCRam(pVM, pHeap, pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, true, "Heap", ppHeapGC);
800 if (VBOX_SUCCESS(rc))
801 {
802 pHeap->pVMGC = pVM->pVMGC;
803 pHeap->pbHeapGC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
804 /* Reserve a page for fencing. */
805 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
806 }
807 return rc;
808}
809
810
811#if 0
812/**
813 * Destroys a heap.
814 */
815static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
816{
817 /* all this is dealt with when unlocking and freeing locked memory. */
818}
819#endif
820
821
822/**
823 * Allocates memory in the Hypervisor (GC VMM) area which never will
824 * be freed and doesn't have any offset based relation to other heap blocks.
825 *
826 * The latter means that two blocks allocated by this API will not have the
827 * same relative position to each other in GC and HC. In short, never use
828 * this API for allocating nodes for an offset based AVL tree!
829 *
830 * The returned memory is of course zeroed.
831 *
832 * @returns VBox status code.
833 * @param pVM The VM to operate on.
834 * @param cb Number of bytes to allocate.
835 * @param uAlignment Required memory alignment in bytes.
836 * Values are 0,8,16,32 and PAGE_SIZE.
837 * 0 -> default alignment, i.e. 8 bytes.
838 * @param enmTag The statistics tag.
839 * @param ppv Where to store the address to the allocated
840 * memory.
841 * @remark This is assumed not to be used at times when serialization is required.
842 */
843MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
844{
845 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
846
847 /*
848 * Choose between allocating a new chunk of HMA memory
849 * and the heap. We will only do BIG allocations from HMA.
850 */
851 if ( cb < _64K
852 && ( uAlignment != PAGE_SIZE
853 || cb < 48*_1K))
854 {
855 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
856 if ( rc != VERR_MM_HYPER_NO_MEMORY
857 || cb <= 8*_1K)
858 {
859 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
860 cb, uAlignment, rc, *ppv));
861 return rc;
862 }
863 }
864
865 /*
866 * Validate alignment.
867 */
868 switch (uAlignment)
869 {
870 case 0:
871 case 8:
872 case 16:
873 case 32:
874 case PAGE_SIZE:
875 break;
876 default:
877 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
878 return VERR_INVALID_PARAMETER;
879 }
880
881 /*
882 * Allocate the pages and the HMA space.
883 */
884 cb = RT_ALIGN(cb, PAGE_SIZE);
885 void *pvPages;
886 int rc = SUPPageAlloc(cb >> PAGE_SHIFT, &pvPages);
887 if (VBOX_SUCCESS(rc))
888 {
889 RTGCPTR GCPtr;
890 rc = MMR3HyperMapHCRam(pVM, pvPages, cb, true, mmR3GetTagName(enmTag), &GCPtr);
891 if (VBOX_SUCCESS(rc))
892 {
893 *ppv = pvPages;
894 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
895 cb, uAlignment, *ppv));
896 return rc;
897 }
898 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
899 SUPPageFree(pvPages, cb >> PAGE_SHIFT);
900
901 /*
902 * HACK ALERT! Try allocate it off the heap so that we don't freak
903 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
904 */
905 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
906 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#d,,) instead\n", rc, cb));
907 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
908 if (RT_SUCCESS(rc2))
909 {
910 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
911 cb, uAlignment, rc, *ppv));
912 return rc;
913 }
914 }
915 else
916 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cb, rc));
917
918 if (rc == VERR_NO_MEMORY)
919 rc = VERR_MM_HYPER_NO_MEMORY;
920 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
921 return rc;
922}
923
924
925/**
926 * Convert hypervisor HC virtual address to HC physical address.
927 *
928 * @returns HC physical address.
929 * @param pVM VM Handle
930 * @param pvHC Host context physical address.
931 */
932MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC)
933{
934 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
935 for (;;)
936 {
937 switch (pLookup->enmType)
938 {
939 case MMLOOKUPHYPERTYPE_LOCKED:
940 {
941 unsigned off = (char *)pvHC - (char *)pLookup->u.Locked.pvHC;
942 if (off < pLookup->cb)
943 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
944 break;
945 }
946
947 case MMLOOKUPHYPERTYPE_HCPHYS:
948 {
949 unsigned off = (char *)pvHC - (char *)pLookup->u.HCPhys.pvHC;
950 if (off < pLookup->cb)
951 return pLookup->u.HCPhys.HCPhys + off;
952 break;
953 }
954
955 case MMLOOKUPHYPERTYPE_GCPHYS:
956 case MMLOOKUPHYPERTYPE_MMIO2:
957 case MMLOOKUPHYPERTYPE_DYNAMIC:
958 /* can (or don't want to) convert these kind of records. */
959 break;
960
961 default:
962 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
963 break;
964 }
965
966 /* next */
967 if ((unsigned)pLookup->offNext == NIL_OFFSET)
968 break;
969 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
970 }
971
972 AssertMsgFailed(("pvHC=%p is not inside the hypervisor memory area!\n", pvHC));
973 return NIL_RTHCPHYS;
974}
975
976
977#if 0 /* unused, not implemented */
978/**
979 * Convert hypervisor HC physical address to HC virtual address.
980 *
981 * @returns HC virtual address.
982 * @param pVM VM Handle
983 * @param HCPhys Host context physical address.
984 */
985MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
986{
987 void *pv;
988 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
989 if (VBOX_SUCCESS(rc))
990 return pv;
991 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
992 return NULL;
993}
994
995
996/**
997 * Convert hypervisor HC physical address to HC virtual address.
998 *
999 * @returns VBox status.
1000 * @param pVM VM Handle
1001 * @param HCPhys Host context physical address.
1002 * @param ppv Where to store the HC virtual address.
1003 */
1004MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1005{
1006 /*
1007 * Linear search.
1008 */
1009 /** @todo implement when actually used. */
1010 return VERR_INVALID_POINTER;
1011}
1012#endif /* unused, not implemented */
1013
1014
1015/**
1016 * Read hypervisor memory from GC virtual address.
1017 *
1018 * @returns VBox status.
1019 * @param pVM VM handle.
1020 * @param pvDst Destination address (HC of course).
1021 * @param GCPtr GC virtual address.
1022 * @param cb Number of bytes to read.
1023 */
1024MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1025{
1026 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1027 return VERR_INVALID_PARAMETER;
1028 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1029}
1030
1031
1032/**
1033 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1034 *
1035 * @param pVM The VM handle.
1036 * @param pHlp Callback functions for doing output.
1037 * @param pszArgs Argument string. Optional and specific to the handler.
1038 */
1039static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1040{
1041 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %VGv, 0x%08x bytes\n",
1042 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1043
1044 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
1045 for (;;)
1046 {
1047 switch (pLookup->enmType)
1048 {
1049 case MMLOOKUPHYPERTYPE_LOCKED:
1050 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv LOCKED %-*s %s\n",
1051 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1052 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1053 pLookup->u.Locked.pvHC,
1054 sizeof(RTHCPTR) * 2,
1055 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree"
1056 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree"
1057 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages"
1058 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys"
1059 : "??",
1060 pLookup->pszDesc);
1061 break;
1062
1063 case MMLOOKUPHYPERTYPE_HCPHYS:
1064 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv HCPHYS %VHp %s\n",
1065 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1066 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1067 pLookup->u.HCPhys.pvHC, pLookup->u.HCPhys.HCPhys,
1068 pLookup->pszDesc);
1069 break;
1070
1071 case MMLOOKUPHYPERTYPE_GCPHYS:
1072 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s GCPHYS %VGp%*s %s\n",
1073 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1074 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1075 sizeof(RTHCPTR) * 2, "",
1076 pLookup->u.GCPhys.GCPhys, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1077 pLookup->pszDesc);
1078 break;
1079
1080 case MMLOOKUPHYPERTYPE_MMIO2:
1081 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s MMIO2 %VGp%*s %s\n",
1082 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1083 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1084 sizeof(RTHCPTR) * 2, "",
1085 pLookup->u.MMIO2.off, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
1086 pLookup->pszDesc);
1087 break;
1088
1089 case MMLOOKUPHYPERTYPE_DYNAMIC:
1090 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n",
1091 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1092 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1093 sizeof(RTHCPTR) * 2, "",
1094 sizeof(RTHCPTR) * 2, "",
1095 pLookup->pszDesc);
1096 break;
1097
1098 default:
1099 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1100 break;
1101 }
1102
1103 /* next */
1104 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1105 break;
1106 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
1107 }
1108}
1109
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette