VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 19722

Last change on this file since 19722 was 19682, checked in by vboxsync, 16 years ago

Try to cleanup after one VCPU goes into guru meditation mode. Release all owned locks so the other VCPUs will be unblocked.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 41.1 KB
Line 
1/* $Id: MMHyper.cpp 19682 2009-05-14 10:15:44Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER
27#include <VBox/pgm.h>
28#include <VBox/mm.h>
29#include <VBox/dbgf.h>
30#include "MMInternal.h"
31#include <VBox/vm.h>
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <VBox/log.h>
35#include <iprt/alloc.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Internal Functions *
42*******************************************************************************/
43static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
44static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
45static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
46static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
47static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
48
49
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmR3HyperInit(PVM pVM)
62{
63 LogFlow(("mmR3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
75 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
76 * precious kernel space on heap for the PATM.
77 */
78 uint32_t cbHyperHeap;
79 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
80 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
81 cbHyperHeap = VMMIsHwVirtExtForced(pVM)
82 ? 640*_1K
83 : 1280*_1K;
84 else
85 AssertLogRelRCReturn(rc, rc);
86 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
87 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
88
89 /*
90 * Allocate the hypervisor heap.
91 *
92 * (This must be done before we start adding memory to the
93 * hypervisor static area because lookup records are allocated from it.)
94 */
95 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
96 if (RT_SUCCESS(rc))
97 {
98 /*
99 * Make a small head fence to fend of accidental sequential access.
100 */
101 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
102
103 /*
104 * Map the VM structure into the hypervisor space.
105 */
106 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCPUs]));
107 RTGCPTR GCPtr;
108 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
109 if (RT_SUCCESS(rc))
110 {
111 pVM->pVMRC = (RTRCPTR)GCPtr;
112 for (uint32_t i = 0; i < pVM->cCPUs; i++)
113 pVM->aCpus[i].pVMRC = pVM->pVMRC;
114
115 /* Reserve a page for fencing. */
116 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
117
118 /*
119 * Map the heap into the hypervisor space.
120 */
121 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
122 if (RT_SUCCESS(rc))
123 {
124 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
125 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
126
127 /*
128 * Register info handlers.
129 */
130 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
131
132 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
133 return VINF_SUCCESS;
134 }
135 /* Caller will do proper cleanup. */
136 }
137 }
138
139 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
140 return rc;
141}
142
143
144/**
145 * Cleans up the hypervisor heap.
146 *
147 * @returns VBox status.
148 */
149int mmR3HyperTerm(PVM pVM)
150{
151 if (pVM->mm.s.pHyperHeapR3)
152 PDMR3CritSectDelete(&pVM->mm.s.pHyperHeapR3->Lock);
153
154 return VINF_SUCCESS;
155}
156
157
158/**
159 * Finalizes the HMA mapping.
160 *
161 * This is called later during init, most (all) HMA allocations should be done
162 * by the time this function is called.
163 *
164 * @returns VBox status.
165 */
166VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
167{
168 LogFlow(("MMR3HyperInitFinalize:\n"));
169
170 /*
171 * Initialize the hyper heap critical section.
172 */
173 int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, "MM-HYPER");
174 AssertRC(rc);
175
176 /*
177 * Adjust and create the HMA mapping.
178 */
179 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
180 pVM->mm.s.cbHyperArea -= _4M;
181 rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
182 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
183 if (RT_FAILURE(rc))
184 return rc;
185 pVM->mm.s.fPGMInitialized = true;
186
187 /*
188 * Do all the delayed mappings.
189 */
190 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
191 for (;;)
192 {
193 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
194 uint32_t cPages = pLookup->cb >> PAGE_SHIFT;
195 switch (pLookup->enmType)
196 {
197 case MMLOOKUPHYPERTYPE_LOCKED:
198 {
199 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;
200 for (uint32_t i = 0; i < cPages; i++)
201 {
202 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
203 AssertRCReturn(rc, rc);
204 }
205 break;
206 }
207
208 case MMLOOKUPHYPERTYPE_HCPHYS:
209 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
210 break;
211
212 case MMLOOKUPHYPERTYPE_GCPHYS:
213 {
214 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
215 const uint32_t cb = pLookup->cb;
216 for (uint32_t off = 0; off < cb; off += PAGE_SIZE)
217 {
218 RTHCPHYS HCPhys;
219 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
220 if (RT_FAILURE(rc))
221 break;
222 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
223 if (RT_FAILURE(rc))
224 break;
225 }
226 break;
227 }
228
229 case MMLOOKUPHYPERTYPE_MMIO2:
230 {
231 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
232 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
233 {
234 RTHCPHYS HCPhys;
235 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
236 if (RT_FAILURE(rc))
237 break;
238 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
239 if (RT_FAILURE(rc))
240 break;
241 }
242 break;
243 }
244
245 case MMLOOKUPHYPERTYPE_DYNAMIC:
246 /* do nothing here since these are either fences or managed by someone else using PGM. */
247 break;
248
249 default:
250 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
251 break;
252 }
253
254 if (RT_FAILURE(rc))
255 {
256 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
257 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
258 return rc;
259 }
260
261 /* next */
262 if (pLookup->offNext == (int32_t)NIL_OFFSET)
263 break;
264 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
265 }
266
267 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
268 return VINF_SUCCESS;
269}
270
271
272/**
273 * Callback function which will be called when PGM is trying to find
274 * a new location for the mapping.
275 *
276 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
277 * In 1) the callback should say if it objects to a suggested new location. If it
278 * accepts the new location, it is called again for doing it's relocation.
279 *
280 *
281 * @returns true if the location is ok.
282 * @returns false if another location should be found.
283 * @param pVM The VM handle.
284 * @param GCPtrOld The old virtual address.
285 * @param GCPtrNew The new virtual address.
286 * @param enmMode Used to indicate the callback mode.
287 * @param pvUser User argument. Ignored.
288 * @remark The return value is no a failure indicator, it's an acceptance
289 * indicator. Relocation can not fail!
290 */
291static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
292{
293 switch (enmMode)
294 {
295 /*
296 * Verify location - all locations are good for us.
297 */
298 case PGMRELOCATECALL_SUGGEST:
299 return true;
300
301 /*
302 * Execute the relocation.
303 */
304 case PGMRELOCATECALL_RELOCATE:
305 {
306 /*
307 * Accepted!
308 */
309 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
310 Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
311
312 /*
313 * Relocate the VM structure and ourselves.
314 */
315 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
316 pVM->pVMRC += offDelta;
317 for (uint32_t i = 0; i < pVM->cCPUs; i++)
318 pVM->aCpus[i].pVMRC = pVM->pVMRC;
319
320 pVM->mm.s.pvHyperAreaGC += offDelta;
321 Assert(pVM->mm.s.pvHyperAreaGC < _4G);
322 pVM->mm.s.pHyperHeapRC += offDelta;
323 pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;
324 pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;
325
326 /*
327 * Relocate the rest.
328 */
329 VMR3Relocate(pVM, offDelta);
330 return true;
331 }
332
333 default:
334 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
335 }
336
337 return false;
338}
339
340/**
341 * Service a VMMCALLHOST_MMHYPER_LOCK call.
342 *
343 * @returns VBox status code.
344 * @param pVM The VM handle.
345 */
346VMMR3DECL(int) MMR3LockCall(PVM pVM)
347{
348 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
349
350 int rc = PDMR3CritSectEnterEx(&pHeap->Lock, true /* fHostCall */);
351 AssertRC(rc);
352 return rc;
353}
354
355/**
356 * Maps contiguous HC physical memory into the hypervisor region in the GC.
357 *
358 * @return VBox status code.
359 *
360 * @param pVM VM handle.
361 * @param pvR3 Ring-3 address of the memory. Must be page aligned!
362 * @param pvR0 Optional ring-0 address of the memory.
363 * @param HCPhys Host context physical address of the memory to be
364 * mapped. Must be page aligned!
365 * @param cb Size of the memory. Will be rounded up to nearest page.
366 * @param pszDesc Description.
367 * @param pGCPtr Where to store the GC address.
368 */
369VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
370{
371 LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
372
373 /*
374 * Validate input.
375 */
376 AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
377 AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
378 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
379 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
380
381 /*
382 * Add the memory to the hypervisor area.
383 */
384 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
385 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
386 RTGCPTR GCPtr;
387 PMMLOOKUPHYPER pLookup;
388 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
389 if (RT_SUCCESS(rc))
390 {
391 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
392 pLookup->u.HCPhys.pvR3 = pvR3;
393 pLookup->u.HCPhys.pvR0 = pvR0;
394 pLookup->u.HCPhys.HCPhys = HCPhys;
395
396 /*
397 * Update the page table.
398 */
399 if (pVM->mm.s.fPGMInitialized)
400 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
401 if (RT_SUCCESS(rc))
402 *pGCPtr = GCPtr;
403 }
404 return rc;
405}
406
407
408/**
409 * Maps contiguous GC physical memory into the hypervisor region in the GC.
410 *
411 * @return VBox status code.
412 *
413 * @param pVM VM handle.
414 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
415 * @param cb Size of the memory. Will be rounded up to nearest page.
416 * @param pszDesc Mapping description.
417 * @param pGCPtr Where to store the GC address.
418 */
419VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
420{
421 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
422
423 /*
424 * Validate input.
425 */
426 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
427 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
428
429 /*
430 * Add the memory to the hypervisor area.
431 */
432 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
433 RTGCPTR GCPtr;
434 PMMLOOKUPHYPER pLookup;
435 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
436 if (RT_SUCCESS(rc))
437 {
438 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
439 pLookup->u.GCPhys.GCPhys = GCPhys;
440
441 /*
442 * Update the page table.
443 */
444 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
445 {
446 RTHCPHYS HCPhys;
447 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
448 AssertRC(rc);
449 if (RT_FAILURE(rc))
450 {
451 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
452 break;
453 }
454 if (pVM->mm.s.fPGMInitialized)
455 {
456 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
457 AssertRC(rc);
458 if (RT_FAILURE(rc))
459 {
460 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
461 break;
462 }
463 }
464 }
465
466 if (RT_SUCCESS(rc) && pGCPtr)
467 *pGCPtr = GCPtr;
468 }
469 return rc;
470}
471
472
473/**
474 * Maps a portion of an MMIO2 region into the hypervisor region.
475 *
476 * Callers of this API must never deregister the MMIO2 region before the
477 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
478 * API will be needed to perform cleanups.
479 *
480 * @return VBox status code.
481 *
482 * @param pVM Pointer to the shared VM structure.
483 * @param pDevIns The device owning the MMIO2 memory.
484 * @param iRegion The region.
485 * @param off The offset into the region. Will be rounded down to closest page boundrary.
486 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
487 * @param pszDesc Mapping description.
488 * @param pRCPtr Where to store the RC address.
489 */
490VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
491 const char *pszDesc, PRTRCPTR pRCPtr)
492{
493 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
494 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
495 int rc;
496
497 /*
498 * Validate input.
499 */
500 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
501 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
502 uint32_t const offPage = off & PAGE_OFFSET_MASK;
503 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
504 cb += offPage;
505 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
506 const RTGCPHYS offEnd = off + cb;
507 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
508 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
509 {
510 RTHCPHYS HCPhys;
511 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
512 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
513 }
514
515 /*
516 * Add the memory to the hypervisor area.
517 */
518 RTGCPTR GCPtr;
519 PMMLOOKUPHYPER pLookup;
520 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
521 if (RT_SUCCESS(rc))
522 {
523 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
524 pLookup->u.MMIO2.pDevIns = pDevIns;
525 pLookup->u.MMIO2.iRegion = iRegion;
526 pLookup->u.MMIO2.off = off;
527
528 /*
529 * Update the page table.
530 */
531 if (pVM->mm.s.fPGMInitialized)
532 {
533 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
534 {
535 RTHCPHYS HCPhys;
536 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
537 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
538 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
539 if (RT_FAILURE(rc))
540 {
541 AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
542 break;
543 }
544 }
545 }
546
547 if (RT_SUCCESS(rc))
548 {
549 GCPtr |= offPage;
550 *pRCPtr = GCPtr;
551 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
552 }
553 }
554 return rc;
555}
556
557
558/**
559 * Maps locked R3 virtual memory into the hypervisor region in the GC.
560 *
561 * @return VBox status code.
562 *
563 * @param pVM VM handle.
564 * @param pvR3 The ring-3 address of the memory, must be page aligned.
565 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
566 * @param cPages The number of pages.
567 * @param paPages The page descriptors.
568 * @param pszDesc Mapping description.
569 * @param pGCPtr Where to store the GC address corresponding to pvR3.
570 */
571VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
572{
573 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
574 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
575
576 /*
577 * Validate input.
578 */
579 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
580 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
581 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
582 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
583 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
584 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
585 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
586
587 /*
588 * Add the memory to the hypervisor area.
589 */
590 RTGCPTR GCPtr;
591 PMMLOOKUPHYPER pLookup;
592 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
593 if (RT_SUCCESS(rc))
594 {
595 /*
596 * Copy the physical page addresses and tell PGM about them.
597 */
598 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
599 if (paHCPhysPages)
600 {
601 for (size_t i = 0; i < cPages; i++)
602 {
603 AssertReleaseMsgReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK),
604 ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
605 VERR_INTERNAL_ERROR);
606 paHCPhysPages[i] = paPages[i].Phys;
607 }
608
609 if (pVM->mm.s.fPGMInitialized)
610 {
611 for (size_t i = 0; i < cPages; i++)
612 {
613 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
614 AssertRCBreak(rc);
615 }
616 }
617 if (RT_SUCCESS(rc))
618 {
619 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
620 pLookup->u.Locked.pvR3 = pvR3;
621 pLookup->u.Locked.pvR0 = pvR0;
622 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
623
624 /* done. */
625 *pGCPtr = GCPtr;
626 return rc;
627 }
628 /* Don't care about failure clean, we're screwed if this fails anyway. */
629 }
630 }
631
632 return rc;
633}
634
635
636/**
637 * Reserves a hypervisor memory area.
638 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
639 *
640 * @return VBox status code.
641 *
642 * @param pVM VM handle.
643 * @param cb Size of the memory. Will be rounded up to nearest page.
644 * @param pszDesc Mapping description.
645 * @param pGCPtr Where to store the assigned GC address. Optional.
646 */
647VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
648{
649 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
650
651 /*
652 * Validate input.
653 */
654 if ( cb <= 0
655 || !pszDesc
656 || !*pszDesc)
657 {
658 AssertMsgFailed(("Invalid parameter\n"));
659 return VERR_INVALID_PARAMETER;
660 }
661
662 /*
663 * Add the memory to the hypervisor area.
664 */
665 RTGCPTR GCPtr;
666 PMMLOOKUPHYPER pLookup;
667 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
668 if (RT_SUCCESS(rc))
669 {
670 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
671 if (pGCPtr)
672 *pGCPtr = GCPtr;
673 return VINF_SUCCESS;
674 }
675 return rc;
676}
677
678
679/**
680 * Adds memory to the hypervisor memory arena.
681 *
682 * @return VBox status code.
683 * @param pVM The VM handle.
684 * @param cb Size of the memory. Will be rounded up to neares page.
685 * @param pszDesc The description of the memory.
686 * @param pGCPtr Where to store the GC address.
687 * @param ppLookup Where to store the pointer to the lookup record.
688 * @remark We assume the threading structure of VBox imposes natural
689 * serialization of most functions, this one included.
690 */
691static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
692{
693 /*
694 * Validate input.
695 */
696 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
697 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
698 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
699 {
700 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
701 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
702 return VERR_NO_MEMORY;
703 }
704
705 /*
706 * Allocate lookup record.
707 */
708 PMMLOOKUPHYPER pLookup;
709 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
710 if (RT_SUCCESS(rc))
711 {
712 /*
713 * Initialize it and insert it.
714 */
715 pLookup->offNext = pVM->mm.s.offLookupHyper;
716 pLookup->cb = cbAligned;
717 pLookup->off = pVM->mm.s.offHyperNextStatic;
718 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
719 if (pLookup->offNext != (int32_t)NIL_OFFSET)
720 pLookup->offNext -= pVM->mm.s.offLookupHyper;
721 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
722 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
723 pLookup->pszDesc = pszDesc;
724
725 /* Mapping. */
726 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
727 pVM->mm.s.offHyperNextStatic += cbAligned;
728
729 /* Return pointer. */
730 *ppLookup = pLookup;
731 }
732
733 AssertRC(rc);
734 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
735 return rc;
736}
737
738
739/**
740 * Allocates a new heap.
741 *
742 * @returns VBox status code.
743 * @param pVM The VM handle.
744 * @param cb The size of the new heap.
745 * @param ppHeap Where to store the heap pointer on successful return.
746 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
747 * success.
748 */
749static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
750{
751 /*
752 * Allocate the hypervisor heap.
753 */
754 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
755 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
756 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
757 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
758 if (!paPages)
759 return VERR_NO_MEMORY;
760 void *pv;
761 RTR0PTR pvR0 = NIL_RTR0PTR;
762 int rc = SUPR3PageAllocEx(cPages,
763 0 /*fFlags*/,
764 &pv,
765#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
766 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
767#else
768 NULL,
769#endif
770 paPages);
771 if (RT_SUCCESS(rc))
772 {
773#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
774 if (!VMMIsHwVirtExtForced(pVM))
775 pvR0 = NIL_RTR0PTR;
776#else
777 pvR0 = (uintptr_t)pv;
778#endif
779 memset(pv, 0, cbAligned);
780
781 /*
782 * Initialize the heap and first free chunk.
783 */
784 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
785 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
786 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
787 pHeap->pbHeapR0 = pvR0 != NIL_RTR0PTR ? pvR0 + MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR;
788 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
789 pHeap->pVMR3 = pVM;
790 pHeap->pVMR0 = pVM->pVMR0;
791 pHeap->pVMRC = pVM->pVMRC;
792 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
793 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
794 //pHeap->offFreeHead = 0;
795 //pHeap->offFreeTail = 0;
796 pHeap->offPageAligned = pHeap->cbHeap;
797 //pHeap->HyperHeapStatTree = 0;
798 pHeap->paPages = paPages;
799
800 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
801 pFree->cb = pHeap->cbFree;
802 //pFree->core.offNext = 0;
803 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
804 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
805 //pFree->offNext = 0;
806 //pFree->offPrev = 0;
807
808 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
809 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
810
811 *ppHeap = pHeap;
812 *pR0PtrHeap = pvR0;
813 return VINF_SUCCESS;
814 }
815 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
816
817 *ppHeap = NULL;
818 return rc;
819}
820
821/**
822 * Allocates a new heap.
823 */
824static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
825{
826 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
827 Assert(pHeap->paPages);
828 int rc = MMR3HyperMapPages(pVM,
829 pHeap,
830 pHeap->pbHeapR0 != NIL_RTR0PTR ? pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR,
831 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
832 pHeap->paPages,
833 "Heap", ppHeapGC);
834 if (RT_SUCCESS(rc))
835 {
836 pHeap->pVMRC = pVM->pVMRC;
837 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
838 /* Reserve a page for fencing. */
839 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
840
841 /* We won't need these any more. */
842 MMR3HeapFree(pHeap->paPages);
843 pHeap->paPages = NULL;
844 }
845 return rc;
846}
847
848
849/**
850 * Allocates memory in the Hypervisor (GC VMM) area which never will
851 * be freed and doesn't have any offset based relation to other heap blocks.
852 *
853 * The latter means that two blocks allocated by this API will not have the
854 * same relative position to each other in GC and HC. In short, never use
855 * this API for allocating nodes for an offset based AVL tree!
856 *
857 * The returned memory is of course zeroed.
858 *
859 * @returns VBox status code.
860 * @param pVM The VM to operate on.
861 * @param cb Number of bytes to allocate.
862 * @param uAlignment Required memory alignment in bytes.
863 * Values are 0,8,16,32 and PAGE_SIZE.
864 * 0 -> default alignment, i.e. 8 bytes.
865 * @param enmTag The statistics tag.
866 * @param ppv Where to store the address to the allocated
867 * memory.
868 * @remark This is assumed not to be used at times when serialization is required.
869 */
870VMMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
871{
872 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
873
874 /*
875 * Choose between allocating a new chunk of HMA memory
876 * and the heap. We will only do BIG allocations from HMA and
877 * only at creation time.
878 */
879 if ( ( cb < _64K
880 && ( uAlignment != PAGE_SIZE
881 || cb < 48*_1K))
882 || VMR3GetState(pVM) != VMSTATE_CREATING)
883 {
884 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
885 if ( rc != VERR_MM_HYPER_NO_MEMORY
886 || cb <= 8*_1K)
887 {
888 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
889 cb, uAlignment, rc, *ppv));
890 return rc;
891 }
892 }
893
894 /*
895 * Validate alignment.
896 */
897 switch (uAlignment)
898 {
899 case 0:
900 case 8:
901 case 16:
902 case 32:
903 case PAGE_SIZE:
904 break;
905 default:
906 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
907 return VERR_INVALID_PARAMETER;
908 }
909
910 /*
911 * Allocate the pages and map them into HMA space.
912 */
913 uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
914 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
915 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
916 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
917 if (!paPages)
918 return VERR_NO_TMP_MEMORY;
919 void *pvPages;
920 RTR0PTR pvR0 = NIL_RTR0PTR;
921 int rc = SUPR3PageAllocEx(cPages,
922 0 /*fFlags*/,
923 &pvPages,
924#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
925 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
926#else
927 NULL,
928#endif
929 paPages);
930 if (RT_SUCCESS(rc))
931 {
932#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
933 if (!VMMIsHwVirtExtForced(pVM))
934 pvR0 = NIL_RTR0PTR;
935#else
936 pvR0 = (uintptr_t)pvPages;
937#endif
938 memset(pvPages, 0, cbAligned);
939
940 RTGCPTR GCPtr;
941 rc = MMR3HyperMapPages(pVM,
942 pvPages,
943 pvR0,
944 cPages,
945 paPages,
946 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmR3GetTagName(enmTag)),
947 &GCPtr);
948 if (RT_SUCCESS(rc))
949 {
950 *ppv = pvPages;
951 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
952 cbAligned, uAlignment, *ppv));
953 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
954 return rc;
955 }
956 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
957 SUPR3PageFreeEx(pvPages, cPages);
958
959
960 /*
961 * HACK ALERT! Try allocate it off the heap so that we don't freak
962 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
963 */
964 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
965 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
966 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
967 if (RT_SUCCESS(rc2))
968 {
969 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
970 cb, uAlignment, rc, *ppv));
971 return rc;
972 }
973 }
974 else
975 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
976
977 if (rc == VERR_NO_MEMORY)
978 rc = VERR_MM_HYPER_NO_MEMORY;
979 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
980 return rc;
981}
982
983
984/**
985 * Convert hypervisor HC virtual address to HC physical address.
986 *
987 * @returns HC physical address.
988 * @param pVM VM Handle
989 * @param pvR3 Host context virtual address.
990 */
991VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
992{
993 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
994 for (;;)
995 {
996 switch (pLookup->enmType)
997 {
998 case MMLOOKUPHYPERTYPE_LOCKED:
999 {
1000 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
1001 if (off < pLookup->cb)
1002 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
1003 break;
1004 }
1005
1006 case MMLOOKUPHYPERTYPE_HCPHYS:
1007 {
1008 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
1009 if (off < pLookup->cb)
1010 return pLookup->u.HCPhys.HCPhys + off;
1011 break;
1012 }
1013
1014 case MMLOOKUPHYPERTYPE_GCPHYS:
1015 case MMLOOKUPHYPERTYPE_MMIO2:
1016 case MMLOOKUPHYPERTYPE_DYNAMIC:
1017 /* can (or don't want to) convert these kind of records. */
1018 break;
1019
1020 default:
1021 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1022 break;
1023 }
1024
1025 /* next */
1026 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1027 break;
1028 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1029 }
1030
1031 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
1032 return NIL_RTHCPHYS;
1033}
1034
1035
1036#if 0 /* unused, not implemented */
1037/**
1038 * Convert hypervisor HC physical address to HC virtual address.
1039 *
1040 * @returns HC virtual address.
1041 * @param pVM VM Handle
1042 * @param HCPhys Host context physical address.
1043 */
1044VMMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1045{
1046 void *pv;
1047 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1048 if (RT_SUCCESS(rc))
1049 return pv;
1050 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1051 return NULL;
1052}
1053
1054
1055/**
1056 * Convert hypervisor HC physical address to HC virtual address.
1057 *
1058 * @returns VBox status.
1059 * @param pVM VM Handle
1060 * @param HCPhys Host context physical address.
1061 * @param ppv Where to store the HC virtual address.
1062 */
1063VMMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1064{
1065 /*
1066 * Linear search.
1067 */
1068 /** @todo implement when actually used. */
1069 return VERR_INVALID_POINTER;
1070}
1071#endif /* unused, not implemented */
1072
1073
1074/**
1075 * Read hypervisor memory from GC virtual address.
1076 *
1077 * @returns VBox status.
1078 * @param pVM VM handle.
1079 * @param pvDst Destination address (HC of course).
1080 * @param GCPtr GC virtual address.
1081 * @param cb Number of bytes to read.
1082 *
1083 * @remarks For DBGF only.
1084 */
1085VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1086{
1087 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1088 return VERR_INVALID_PARAMETER;
1089 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1090}
1091
1092/**
1093 * Release the MM hypervisor heap lock if owned by the current VCPU
1094 *
1095 * @param pVM The VM to operate on.
1096 */
1097VMMR3DECL(void) MMR3ReleaseOwnedLocks(PVM pVM)
1098{
1099 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1100
1101 if (PDMCritSectIsOwner(&pHeap->Lock))
1102 PDMCritSectLeave(&pHeap->Lock);
1103}
1104
1105
1106/**
1107 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1108 *
1109 * @param pVM The VM handle.
1110 * @param pHlp Callback functions for doing output.
1111 * @param pszArgs Argument string. Optional and specific to the handler.
1112 */
1113static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1114{
1115 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
1116 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1117
1118 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1119 for (;;)
1120 {
1121 switch (pLookup->enmType)
1122 {
1123 case MMLOOKUPHYPERTYPE_LOCKED:
1124 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
1125 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1126 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1127 pLookup->u.Locked.pvR3,
1128 pLookup->u.Locked.pvR0,
1129 sizeof(RTHCPTR) * 2, "",
1130 pLookup->pszDesc);
1131 break;
1132
1133 case MMLOOKUPHYPERTYPE_HCPHYS:
1134 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
1135 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1136 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1137 pLookup->u.HCPhys.pvR3,
1138 pLookup->u.HCPhys.pvR0,
1139 pLookup->u.HCPhys.HCPhys,
1140 pLookup->pszDesc);
1141 break;
1142
1143 case MMLOOKUPHYPERTYPE_GCPHYS:
1144 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
1145 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1146 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1147 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1148 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1149 pLookup->pszDesc);
1150 break;
1151
1152 case MMLOOKUPHYPERTYPE_MMIO2:
1153 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
1154 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1155 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1156 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1157 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1158 pLookup->pszDesc);
1159 break;
1160
1161 case MMLOOKUPHYPERTYPE_DYNAMIC:
1162 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
1163 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1164 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1165 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1166 sizeof(RTHCPTR) * 2, "",
1167 pLookup->pszDesc);
1168 break;
1169
1170 default:
1171 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1172 break;
1173 }
1174
1175 /* next */
1176 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1177 break;
1178 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1179 }
1180}
1181
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette