VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMHyper.cpp@ 93597

Last change on this file since 93597 was 93597, checked in by vboxsync, 3 years ago

VMM/MM: Removed now unused MMR3HyperHCVirt2HCPhys(). bugref:10093

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.9 KB
Line 
1/* $Id: MMHyper.cpp 93597 2022-02-03 21:45:05Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/dbgf.h>
27#include "MMInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/gvm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/alloc.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36
37
38/*********************************************************************************************************************************
39* Internal Functions *
40*********************************************************************************************************************************/
41static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
42static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
43static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
44static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
45static int MMR3HyperReserveFence(PVM pVM);
46static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cHostPages, PCSUPPAGE paPages,
47 const char *pszDesc, PRTGCPTR pGCPtr);
48
49
50/**
51 * Determin the default heap size.
52 *
53 * @returns The heap size in bytes.
54 * @param pVM The cross context VM structure.
55 */
56static uint32_t mmR3HyperComputeHeapSize(PVM pVM)
57{
58 /** @todo Redo after moving allocations off the hyper heap. */
59
60 /*
61 * Gather parameters.
62 */
63 bool fCanUseLargerHeap = true;
64 //bool fCanUseLargerHeap;
65 //int rc = CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "CanUseLargerHeap", &fCanUseLargerHeap, false);
66 //AssertStmt(RT_SUCCESS(rc), fCanUseLargerHeap = false);
67
68 uint64_t cbRam;
69 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
70 AssertStmt(RT_SUCCESS(rc), cbRam = _1G);
71
72 /*
73 * We need to keep saved state compatibility if raw-mode is an option,
74 * so lets filter out that case first.
75 */
76 if ( !fCanUseLargerHeap
77 && VM_IS_RAW_MODE_ENABLED(pVM)
78 && cbRam < 16*_1G64)
79 return 1280 * _1K;
80
81 /*
82 * Calculate the heap size.
83 */
84 uint32_t cbHeap = _1M;
85
86 /* The newer chipset may have more devices attached, putting additional
87 pressure on the heap. */
88 if (fCanUseLargerHeap)
89 cbHeap += _1M;
90
91 /* More CPUs means some extra memory usage. */
92 if (pVM->cCpus > 1)
93 cbHeap += pVM->cCpus * _64K;
94
95 /* Lots of memory means extra memory consumption as well (pool). */
96 if (cbRam > 16*_1G64)
97 cbHeap += _2M; /** @todo figure out extactly how much */
98
99 return RT_ALIGN(cbHeap, _256K);
100}
101
102
103/**
104 * Initializes the hypervisor related MM stuff without
105 * calling down to PGM.
106 *
107 * PGM is not initialized at this point, PGM relies on
108 * the heap to initialize.
109 *
110 * @returns VBox status code.
111 */
112int mmR3HyperInit(PVM pVM)
113{
114 LogFlow(("mmR3HyperInit:\n"));
115
116 /*
117 * Decide Hypervisor mapping in the guest context
118 * And setup various hypervisor area and heap parameters.
119 */
120 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
121 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
122 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
123 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
124
125 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
126 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
127 * precious kernel space on heap for the PATM.
128 */
129 PCFGMNODE pMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
130 uint32_t cbHyperHeap;
131 int rc = CFGMR3QueryU32Def(pMM, "cbHyperHeap", &cbHyperHeap, mmR3HyperComputeHeapSize(pVM));
132 AssertLogRelRCReturn(rc, rc);
133
134 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, GUEST_PAGE_SIZE);
135 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
136
137 /*
138 * Allocate the hypervisor heap.
139 *
140 * (This must be done before we start adding memory to the
141 * hypervisor static area because lookup records are allocated from it.)
142 */
143 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
144 if (RT_SUCCESS(rc))
145 {
146 /*
147 * Make a small head fence to fend of accidental sequential access.
148 */
149 MMR3HyperReserveFence(pVM);
150
151 /*
152 * Map the VM structure into the hypervisor space.
153 * Note! Keeping the mappings here for now in case someone is using
154 * MMHyperR3ToR0 or similar.
155 */
156 AssertCompileSizeAlignment(VM, HOST_PAGE_SIZE);
157 AssertCompileSizeAlignment(VMCPU, HOST_PAGE_SIZE);
158 AssertCompileSizeAlignment(GVM, HOST_PAGE_SIZE);
159 AssertCompileSizeAlignment(GVMCPU, HOST_PAGE_SIZE);
160 AssertRelease(pVM->cbSelf == sizeof(VM));
161 AssertRelease(pVM->cbVCpu == sizeof(VMCPU));
162/** @todo get rid of this (don't dare right now because of
163 * possible MMHyperYYToXX use on the VM structure.) */
164 RTGCPTR GCPtr;
165 if (SUPR3IsDriverless())
166 GCPtr = _1G;
167 else
168 {
169 Assert(GUEST_PAGE_SHIFT == HOST_PAGE_SHIFT);
170 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0ForCall, sizeof(VM) >> HOST_PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
171 uint32_t offPages = RT_UOFFSETOF_DYN(GVM, aCpus) >> HOST_PAGE_SHIFT; /* (Using the _DYN variant avoids -Winvalid-offset) */
172 for (uint32_t idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++, offPages += sizeof(GVMCPU) >> HOST_PAGE_SHIFT)
173 {
174 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
175 RTGCPTR GCPtrIgn;
176 rc = MMR3HyperMapPages(pVM, pVCpu, pVM->pVMR0ForCall + offPages * HOST_PAGE_SIZE,
177 sizeof(VMCPU) >> HOST_PAGE_SHIFT, &pVM->paVMPagesR3[offPages], "VMCPU", &GCPtrIgn);
178 }
179 }
180 if (RT_SUCCESS(rc))
181 {
182 pVM->pVMRC = (RTRCPTR)GCPtr;
183 for (VMCPUID i = 0; i < pVM->cCpus; i++)
184 pVM->apCpusR3[i]->pVMRC = pVM->pVMRC;
185
186 /* Reserve a page for fencing. */
187 MMR3HyperReserveFence(pVM);
188
189 /*
190 * Map the heap into the hypervisor space.
191 */
192 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
193 if (RT_SUCCESS(rc))
194 {
195 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
196 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
197
198 /*
199 * Register info handlers.
200 */
201 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
202
203 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
204 return VINF_SUCCESS;
205 }
206 /* Caller will do proper cleanup. */
207 }
208 }
209
210 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
211 return rc;
212}
213
214
215/**
216 * Cleans up the hypervisor heap.
217 *
218 * @returns VBox status code.
219 */
220int mmR3HyperTerm(PVM pVM)
221{
222 if (pVM->mm.s.pHyperHeapR3)
223 PDMR3CritSectDelete(pVM, &pVM->mm.s.pHyperHeapR3->Lock);
224
225 return VINF_SUCCESS;
226}
227
228
229/**
230 * Finalizes the HMA mapping (obsolete).
231 *
232 * This is called later during init, most (all) HMA allocations should be done
233 * by the time this function is called.
234 *
235 * @returns VBox status code.
236 */
237VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
238{
239 LogFlow(("MMR3HyperInitFinalize:\n"));
240
241 /*
242 * Initialize the hyper heap critical section.
243 */
244 int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, RT_SRC_POS, "MM-HYPER");
245 AssertRC(rc);
246
247 pVM->mm.s.fPGMInitialized = true;
248
249 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
250 return VINF_SUCCESS;
251}
252
253
254/**
255 * Maps locked R3 virtual memory into the hypervisor region in the GC.
256 *
257 * @return VBox status code.
258 *
259 * @param pVM The cross context VM structure.
260 * @param pvR3 The ring-3 address of the memory, must be page aligned.
261 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
262 * @param cHostPages The number of host pages.
263 * @param paPages The page descriptors.
264 * @param pszDesc Mapping description.
265 * @param pGCPtr Where to store the GC address corresponding to pvR3.
266 */
267static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cHostPages, PCSUPPAGE paPages,
268 const char *pszDesc, PRTGCPTR pGCPtr)
269{
270 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cHostPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
271 pvR3, pvR0, cHostPages, paPages, pszDesc, pszDesc, pGCPtr));
272
273 /*
274 * Validate input.
275 */
276 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
277 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
278 AssertReturn(cHostPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
279 AssertReturn(cHostPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
280 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
281 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
282 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
283 AssertReturn(GUEST_PAGE_SIZE == HOST_PAGE_SIZE, VERR_NOT_SUPPORTED);
284
285 /*
286 * Add the memory to the hypervisor area.
287 */
288 RTGCPTR GCPtr;
289 PMMLOOKUPHYPER pLookup;
290 int rc = mmR3HyperMap(pVM, cHostPages << HOST_PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
291 if (RT_SUCCESS(rc))
292 {
293 /*
294 * Copy the physical page addresses and tell PGM about them.
295 */
296 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cHostPages);
297 if (paHCPhysPages)
298 {
299 bool const fDriverless = SUPR3IsDriverless();
300 for (size_t i = 0; i < cHostPages; i++)
301 {
302 AssertReleaseMsgReturn( ( paPages[i].Phys != 0
303 && paPages[i].Phys != NIL_RTHCPHYS
304 && !(paPages[i].Phys & HOST_PAGE_OFFSET_MASK))
305 || fDriverless,
306 ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
307 VERR_INTERNAL_ERROR);
308 paHCPhysPages[i] = paPages[i].Phys;
309 }
310
311 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
312 pLookup->u.Locked.pvR3 = pvR3;
313 pLookup->u.Locked.pvR0 = pvR0;
314 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
315
316 /* done. */
317 *pGCPtr = GCPtr;
318 return rc;
319 }
320 /* Don't care about failure clean, we're screwed if this fails anyway. */
321 }
322
323 return rc;
324}
325
326
327/**
328 * Reserves an electric fence page.
329 *
330 * @returns VBox status code.
331 * @param pVM The cross context VM structure.
332 */
333static int MMR3HyperReserveFence(PVM pVM)
334{
335 RT_NOREF(pVM);
336 return VINF_SUCCESS;
337}
338
339
340/**
341 * Adds memory to the hypervisor memory arena.
342 *
343 * @return VBox status code.
344 * @param pVM The cross context VM structure.
345 * @param cb Size of the memory. Will be rounded up to nearest page.
346 * @param pszDesc The description of the memory.
347 * @param pGCPtr Where to store the GC address.
348 * @param ppLookup Where to store the pointer to the lookup record.
349 * @remark We assume the threading structure of VBox imposes natural
350 * serialization of most functions, this one included.
351 */
352static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
353{
354 /*
355 * Validate input.
356 */
357 const uint32_t cbAligned = RT_ALIGN_32(cb, GUEST_PAGE_SIZE);
358 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
359 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
360 {
361 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
362 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
363 return VERR_NO_MEMORY;
364 }
365
366 /*
367 * Allocate lookup record.
368 */
369 PMMLOOKUPHYPER pLookup;
370 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
371 if (RT_SUCCESS(rc))
372 {
373 /*
374 * Initialize it and insert it.
375 */
376 pLookup->offNext = pVM->mm.s.offLookupHyper;
377 pLookup->cb = cbAligned;
378 pLookup->off = pVM->mm.s.offHyperNextStatic;
379 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
380 if (pLookup->offNext != (int32_t)NIL_OFFSET)
381 pLookup->offNext -= pVM->mm.s.offLookupHyper;
382 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
383 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
384 pLookup->pszDesc = pszDesc;
385
386 /* Mapping. */
387 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
388 pVM->mm.s.offHyperNextStatic += cbAligned;
389
390 /* Return pointer. */
391 *ppLookup = pLookup;
392 }
393
394 AssertRC(rc);
395 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
396 return rc;
397}
398
399
400/**
401 * Allocates a new heap.
402 *
403 * @returns VBox status code.
404 * @param pVM The cross context VM structure.
405 * @param cb The size of the new heap.
406 * @param ppHeap Where to store the heap pointer on successful return.
407 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
408 * success.
409 */
410static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
411{
412 /*
413 * Allocate the hypervisor heap.
414 */
415 const uint32_t cbAligned = RT_ALIGN_32(cb, RT_MAX(GUEST_PAGE_SIZE, HOST_PAGE_SIZE));
416 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
417 uint32_t const cHostPages = cbAligned >> HOST_PAGE_SHIFT;
418 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cHostPages * sizeof(paPages[0]));
419 if (!paPages)
420 return VERR_NO_MEMORY;
421 void *pv;
422 RTR0PTR pvR0 = NIL_RTR0PTR;
423 int rc = SUPR3PageAllocEx(cHostPages,
424 0 /*fFlags*/,
425 &pv,
426 &pvR0,
427 paPages);
428 if (RT_SUCCESS(rc))
429 {
430 Assert((pvR0 != NIL_RTR0PTR && !(HOST_PAGE_OFFSET_MASK & pvR0)) || SUPR3IsDriverless());
431 memset(pv, 0, cbAligned);
432
433 /*
434 * Initialize the heap and first free chunk.
435 */
436 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
437 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
438 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
439 pHeap->pbHeapR0 = pvR0 + MMYPERHEAP_HDR_SIZE;
440 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
441 pHeap->pVMR3 = pVM;
442 pHeap->pVMR0 = pVM->pVMR0ForCall;
443 pHeap->pVMRC = pVM->pVMRC;
444 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
445 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
446 //pHeap->offFreeHead = 0;
447 //pHeap->offFreeTail = 0;
448 pHeap->offPageAligned = pHeap->cbHeap;
449 //pHeap->HyperHeapStatTree = 0;
450 pHeap->paPages = paPages;
451
452 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
453 pFree->cb = pHeap->cbFree;
454 //pFree->core.offNext = 0;
455 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
456 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
457 //pFree->offNext = 0;
458 //pFree->offPrev = 0;
459
460 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
461 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
462
463 *ppHeap = pHeap;
464 *pR0PtrHeap = pvR0;
465 return VINF_SUCCESS;
466 }
467 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> HOST_PAGE_SHIFT, rc));
468
469 *ppHeap = NULL;
470 return rc;
471}
472
473
474/**
475 * Allocates a new heap.
476 */
477static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
478{
479 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, GUEST_PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
480 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, HOST_PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
481 Assert(pHeap->pbHeapR0);
482 Assert(pHeap->paPages);
483 int rc = MMR3HyperMapPages(pVM,
484 pHeap,
485 pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE,
486 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> HOST_PAGE_SHIFT,
487 pHeap->paPages,
488 "Heap", ppHeapGC);
489 if (RT_SUCCESS(rc))
490 {
491 pHeap->pVMRC = pVM->pVMRC;
492 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
493 /* Reserve a page for fencing. */
494 MMR3HyperReserveFence(pVM);
495
496 /* We won't need these any more. */
497 MMR3HeapFree(pHeap->paPages);
498 pHeap->paPages = NULL;
499 }
500 return rc;
501}
502
503
504/**
505 * Allocates memory in the Hypervisor (GC VMM) area which never will
506 * be freed and doesn't have any offset based relation to other heap blocks.
507 *
508 * The latter means that two blocks allocated by this API will not have the
509 * same relative position to each other in GC and HC. In short, never use
510 * this API for allocating nodes for an offset based AVL tree!
511 *
512 * The returned memory is of course zeroed.
513 *
514 * @returns VBox status code.
515 * @param pVM The cross context VM structure.
516 * @param cb Number of bytes to allocate.
517 * @param uAlignment Required memory alignment in bytes.
518 * Values are 0,8,16,32 and GUEST_PAGE_SIZE.
519 * 0 -> default alignment, i.e. 8 bytes.
520 * @param enmTag The statistics tag.
521 * @param ppv Where to store the address to the allocated
522 * memory.
523 * @remark This is assumed not to be used at times when serialization is required.
524 */
525VMMR3DECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
526{
527 return MMR3HyperAllocOnceNoRelEx(pVM, cb, uAlignment, enmTag, 0/*fFlags*/, ppv);
528}
529
530
531/**
532 * Allocates memory in the Hypervisor (GC VMM) area which never will
533 * be freed and doesn't have any offset based relation to other heap blocks.
534 *
535 * The latter means that two blocks allocated by this API will not have the
536 * same relative position to each other in GC and HC. In short, never use
537 * this API for allocating nodes for an offset based AVL tree!
538 *
539 * The returned memory is of course zeroed.
540 *
541 * @returns VBox status code.
542 * @param pVM The cross context VM structure.
543 * @param cb Number of bytes to allocate.
544 * @param uAlignment Required memory alignment in bytes.
545 * Values are 0,8,16,32 and GUEST_PAGE_SIZE.
546 * 0 -> default alignment, i.e. 8 bytes.
547 * @param enmTag The statistics tag.
548 * @param fFlags Flags, see MMHYPER_AONR_FLAGS_KERNEL_MAPPING.
549 * @param ppv Where to store the address to the allocated memory.
550 * @remark This is assumed not to be used at times when serialization is required.
551 */
552VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, uint32_t fFlags, void **ppv)
553{
554 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
555 Assert(!(fFlags & ~(MMHYPER_AONR_FLAGS_KERNEL_MAPPING)));
556
557 /*
558 * Choose between allocating a new chunk of HMA memory
559 * and the heap. We will only do BIG allocations from HMA and
560 * only at creation time.
561 */
562 if ( ( cb < _64K
563 && ( uAlignment != GUEST_PAGE_SIZE
564 || cb < 48*_1K)
565 && !(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING)
566 )
567 || VMR3GetState(pVM) != VMSTATE_CREATING
568 )
569 {
570 Assert(!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING));
571 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
572 if ( rc != VERR_MM_HYPER_NO_MEMORY
573 || cb <= 8*_1K)
574 {
575 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
576 cb, uAlignment, rc, *ppv));
577 return rc;
578 }
579 }
580
581 /*
582 * Validate alignment.
583 */
584 switch (uAlignment)
585 {
586 case 0:
587 case 8:
588 case 16:
589 case 32:
590 case GUEST_PAGE_SIZE:
591 break;
592 default:
593 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
594 return VERR_INVALID_PARAMETER;
595 }
596
597 /*
598 * Allocate the pages and map them into HMA space.
599 */
600 uint32_t const cbAligned = RT_ALIGN_32(cb, RT_MAX(GUEST_PAGE_SIZE, HOST_PAGE_SIZE));
601 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
602 uint32_t const cHostPages = cbAligned >> HOST_PAGE_SHIFT;
603 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cHostPages * sizeof(paPages[0]));
604 if (!paPages)
605 return VERR_NO_TMP_MEMORY;
606 void *pvPages;
607 RTR0PTR pvR0 = NIL_RTR0PTR;
608 int rc = SUPR3PageAllocEx(cHostPages,
609 0 /*fFlags*/,
610 &pvPages,
611 &pvR0,
612 paPages);
613 if (RT_SUCCESS(rc))
614 {
615 Assert(pvR0 != NIL_RTR0PTR || SUPR3IsDriverless());
616 memset(pvPages, 0, cbAligned);
617
618 RTGCPTR GCPtr;
619 rc = MMR3HyperMapPages(pVM,
620 pvPages,
621 pvR0,
622 cHostPages,
623 paPages,
624 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmGetTagName(enmTag)),
625 &GCPtr);
626 /* not needed anymore */
627 RTMemTmpFree(paPages);
628 if (RT_SUCCESS(rc))
629 {
630 *ppv = pvPages;
631 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
632 cbAligned, uAlignment, *ppv));
633 MMR3HyperReserveFence(pVM);
634 return rc;
635 }
636 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
637 SUPR3PageFreeEx(pvPages, cHostPages);
638
639
640 /*
641 * HACK ALERT! Try allocate it off the heap so that we don't freak
642 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
643 */
644 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
645 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
646 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
647 if (RT_SUCCESS(rc2))
648 {
649 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
650 cb, uAlignment, rc, *ppv));
651 return rc;
652 }
653 }
654 else
655 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
656
657 if (rc == VERR_NO_MEMORY)
658 rc = VERR_MM_HYPER_NO_MEMORY;
659 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
660 return rc;
661}
662
663
664/**
665 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
666 *
667 * @param pVM The cross context VM structure.
668 * @param pHlp Callback functions for doing output.
669 * @param pszArgs Argument string. Optional and specific to the handler.
670 */
671static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
672{
673 NOREF(pszArgs);
674
675 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
676 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
677
678 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
679 for (;;)
680 {
681 switch (pLookup->enmType)
682 {
683 case MMLOOKUPHYPERTYPE_LOCKED:
684 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
685 pLookup->off + pVM->mm.s.pvHyperAreaGC,
686 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
687 pLookup->u.Locked.pvR3,
688 pLookup->u.Locked.pvR0,
689 sizeof(RTHCPTR) * 2, "",
690 pLookup->pszDesc);
691 break;
692
693 case MMLOOKUPHYPERTYPE_HCPHYS:
694 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
695 pLookup->off + pVM->mm.s.pvHyperAreaGC,
696 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
697 pLookup->u.HCPhys.pvR3,
698 pLookup->u.HCPhys.pvR0,
699 pLookup->u.HCPhys.HCPhys,
700 pLookup->pszDesc);
701 break;
702
703 case MMLOOKUPHYPERTYPE_GCPHYS:
704 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
705 pLookup->off + pVM->mm.s.pvHyperAreaGC,
706 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
707 sizeof(RTHCPTR) * 2 * 2 + 1, "",
708 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
709 pLookup->pszDesc);
710 break;
711
712 case MMLOOKUPHYPERTYPE_MMIO2:
713 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
714 pLookup->off + pVM->mm.s.pvHyperAreaGC,
715 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
716 sizeof(RTHCPTR) * 2 * 2 + 1, "",
717 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
718 pLookup->pszDesc);
719 break;
720
721 case MMLOOKUPHYPERTYPE_DYNAMIC:
722 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
723 pLookup->off + pVM->mm.s.pvHyperAreaGC,
724 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
725 sizeof(RTHCPTR) * 2 * 2 + 1, "",
726 sizeof(RTHCPTR) * 2, "",
727 pLookup->pszDesc);
728 break;
729
730 default:
731 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
732 break;
733 }
734
735 /* next */
736 if ((unsigned)pLookup->offNext == NIL_OFFSET)
737 break;
738 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
739 }
740}
741
742
743#if 0
744/**
745 * Re-allocates memory from the hyper heap.
746 *
747 * @returns VBox status code.
748 * @param pVM The cross context VM structure.
749 * @param pvOld The existing block of memory in the hyper heap to
750 * re-allocate (can be NULL).
751 * @param cbOld Size of the existing block.
752 * @param uAlignmentNew Required memory alignment in bytes. Values are
753 * 0,8,16,32 and GUEST_PAGE_SIZE. 0 -> default
754 * alignment, i.e. 8 bytes.
755 * @param enmTagNew The statistics tag.
756 * @param cbNew The required size of the new block.
757 * @param ppv Where to store the address to the re-allocated
758 * block.
759 *
760 * @remarks This does not work like normal realloc() on failure, the memory
761 * pointed to by @a pvOld is lost if there isn't sufficient space on
762 * the hyper heap for the re-allocation to succeed.
763*/
764VMMR3DECL(int) MMR3HyperRealloc(PVM pVM, void *pvOld, size_t cbOld, unsigned uAlignmentNew, MMTAG enmTagNew, size_t cbNew,
765 void **ppv)
766{
767 if (!pvOld)
768 return MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
769
770 if (!cbNew && pvOld)
771 return MMHyperFree(pVM, pvOld);
772
773 if (cbOld == cbNew)
774 return VINF_SUCCESS;
775
776 size_t cbData = RT_MIN(cbNew, cbOld);
777 void *pvTmp = RTMemTmpAlloc(cbData);
778 if (RT_UNLIKELY(!pvTmp))
779 {
780 MMHyperFree(pVM, pvOld);
781 return VERR_NO_TMP_MEMORY;
782 }
783 memcpy(pvTmp, pvOld, cbData);
784
785 int rc = MMHyperFree(pVM, pvOld);
786 if (RT_SUCCESS(rc))
787 {
788 rc = MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
789 if (RT_SUCCESS(rc))
790 {
791 Assert(cbData <= cbNew);
792 memcpy(*ppv, pvTmp, cbData);
793 }
794 }
795 else
796 AssertMsgFailed(("Failed to free hyper heap block pvOld=%p cbOld=%u\n", pvOld, cbOld));
797
798 RTMemTmpFree(pvTmp);
799 return rc;
800}
801#endif
802
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette