VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMHyper.cpp@ 92780

Last change on this file since 92780 was 92703, checked in by vboxsync, 3 years ago

VMM: Trying to cope without the support driver... bugref:10138

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 31.1 KB
Line 
1/* $Id: MMHyper.cpp 92703 2021-12-02 12:45:58Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER
23#include <VBox/vmm/pgm.h>
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/hm.h>
26#include <VBox/vmm/dbgf.h>
27#include "MMInternal.h"
28#include <VBox/vmm/vm.h>
29#include <VBox/vmm/gvm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/alloc.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36
37
38/*********************************************************************************************************************************
39* Internal Functions *
40*********************************************************************************************************************************/
41static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
42static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
43static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
44static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
45static int MMR3HyperReserveFence(PVM pVM);
46static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
47 const char *pszDesc, PRTGCPTR pGCPtr);
48
49
50/**
51 * Determin the default heap size.
52 *
53 * @returns The heap size in bytes.
54 * @param pVM The cross context VM structure.
55 */
56static uint32_t mmR3HyperComputeHeapSize(PVM pVM)
57{
58 /** @todo Redo after moving allocations off the hyper heap. */
59
60 /*
61 * Gather parameters.
62 */
63 bool fCanUseLargerHeap = true;
64 //bool fCanUseLargerHeap;
65 //int rc = CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "CanUseLargerHeap", &fCanUseLargerHeap, false);
66 //AssertStmt(RT_SUCCESS(rc), fCanUseLargerHeap = false);
67
68 uint64_t cbRam;
69 int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
70 AssertStmt(RT_SUCCESS(rc), cbRam = _1G);
71
72 /*
73 * We need to keep saved state compatibility if raw-mode is an option,
74 * so lets filter out that case first.
75 */
76 if ( !fCanUseLargerHeap
77 && VM_IS_RAW_MODE_ENABLED(pVM)
78 && cbRam < 16*_1G64)
79 return 1280 * _1K;
80
81 /*
82 * Calculate the heap size.
83 */
84 uint32_t cbHeap = _1M;
85
86 /* The newer chipset may have more devices attached, putting additional
87 pressure on the heap. */
88 if (fCanUseLargerHeap)
89 cbHeap += _1M;
90
91 /* More CPUs means some extra memory usage. */
92 if (pVM->cCpus > 1)
93 cbHeap += pVM->cCpus * _64K;
94
95 /* Lots of memory means extra memory consumption as well (pool). */
96 if (cbRam > 16*_1G64)
97 cbHeap += _2M; /** @todo figure out extactly how much */
98
99 return RT_ALIGN(cbHeap, _256K);
100}
101
102
103/**
104 * Initializes the hypervisor related MM stuff without
105 * calling down to PGM.
106 *
107 * PGM is not initialized at this point, PGM relies on
108 * the heap to initialize.
109 *
110 * @returns VBox status code.
111 */
112int mmR3HyperInit(PVM pVM)
113{
114 LogFlow(("mmR3HyperInit:\n"));
115
116 /*
117 * Decide Hypervisor mapping in the guest context
118 * And setup various hypervisor area and heap parameters.
119 */
120 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
121 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
122 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
123 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
124
125 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
126 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
127 * precious kernel space on heap for the PATM.
128 */
129 PCFGMNODE pMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
130 uint32_t cbHyperHeap;
131 int rc = CFGMR3QueryU32Def(pMM, "cbHyperHeap", &cbHyperHeap, mmR3HyperComputeHeapSize(pVM));
132 AssertLogRelRCReturn(rc, rc);
133
134 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
135 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
136
137 /*
138 * Allocate the hypervisor heap.
139 *
140 * (This must be done before we start adding memory to the
141 * hypervisor static area because lookup records are allocated from it.)
142 */
143 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
144 if (RT_SUCCESS(rc))
145 {
146 /*
147 * Make a small head fence to fend of accidental sequential access.
148 */
149 MMR3HyperReserveFence(pVM);
150
151 /*
152 * Map the VM structure into the hypervisor space.
153 * Note! Keeping the mappings here for now in case someone is using
154 * MMHyperR3ToR0 or similar.
155 */
156 AssertCompileSizeAlignment(VM, PAGE_SIZE);
157 AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
158 AssertCompileSizeAlignment(GVM, PAGE_SIZE);
159 AssertCompileSizeAlignment(GVMCPU, PAGE_SIZE);
160 AssertRelease(pVM->cbSelf == sizeof(VM));
161 AssertRelease(pVM->cbVCpu == sizeof(VMCPU));
162/** @todo get rid of this (don't dare right now because of
163 * possible MMHyperYYToXX use on the VM structure.) */
164 RTGCPTR GCPtr;
165 if (SUPR3IsDriverless())
166 GCPtr = _1G;
167 else
168 {
169 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0ForCall, sizeof(VM) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
170 uint32_t offPages = RT_UOFFSETOF_DYN(GVM, aCpus) >> PAGE_SHIFT; /* (Using the _DYN variant avoids -Winvalid-offset) */
171 for (uint32_t idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++, offPages += sizeof(GVMCPU) >> PAGE_SHIFT)
172 {
173 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
174 RTGCPTR GCPtrIgn;
175 rc = MMR3HyperMapPages(pVM, pVCpu, pVM->pVMR0ForCall + offPages * PAGE_SIZE,
176 sizeof(VMCPU) >> PAGE_SHIFT, &pVM->paVMPagesR3[offPages], "VMCPU", &GCPtrIgn);
177 }
178 }
179 if (RT_SUCCESS(rc))
180 {
181 pVM->pVMRC = (RTRCPTR)GCPtr;
182 for (VMCPUID i = 0; i < pVM->cCpus; i++)
183 pVM->apCpusR3[i]->pVMRC = pVM->pVMRC;
184
185 /* Reserve a page for fencing. */
186 MMR3HyperReserveFence(pVM);
187
188 /*
189 * Map the heap into the hypervisor space.
190 */
191 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
192 if (RT_SUCCESS(rc))
193 {
194 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
195 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
196
197 /*
198 * Register info handlers.
199 */
200 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
201
202 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
203 return VINF_SUCCESS;
204 }
205 /* Caller will do proper cleanup. */
206 }
207 }
208
209 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
210 return rc;
211}
212
213
214/**
215 * Cleans up the hypervisor heap.
216 *
217 * @returns VBox status code.
218 */
219int mmR3HyperTerm(PVM pVM)
220{
221 if (pVM->mm.s.pHyperHeapR3)
222 PDMR3CritSectDelete(pVM, &pVM->mm.s.pHyperHeapR3->Lock);
223
224 return VINF_SUCCESS;
225}
226
227
228/**
229 * Finalizes the HMA mapping (obsolete).
230 *
231 * This is called later during init, most (all) HMA allocations should be done
232 * by the time this function is called.
233 *
234 * @returns VBox status code.
235 */
236VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
237{
238 LogFlow(("MMR3HyperInitFinalize:\n"));
239
240 /*
241 * Initialize the hyper heap critical section.
242 */
243 int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, RT_SRC_POS, "MM-HYPER");
244 AssertRC(rc);
245
246 pVM->mm.s.fPGMInitialized = true;
247
248 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
249 return VINF_SUCCESS;
250}
251
252
253/**
254 * Maps locked R3 virtual memory into the hypervisor region in the GC.
255 *
256 * @return VBox status code.
257 *
258 * @param pVM The cross context VM structure.
259 * @param pvR3 The ring-3 address of the memory, must be page aligned.
260 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
261 * @param cPages The number of pages.
262 * @param paPages The page descriptors.
263 * @param pszDesc Mapping description.
264 * @param pGCPtr Where to store the GC address corresponding to pvR3.
265 */
266static int MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
267 const char *pszDesc, PRTGCPTR pGCPtr)
268{
269 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
270 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
271
272 /*
273 * Validate input.
274 */
275 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
276 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
277 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
278 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
279 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
280 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
281 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
282
283 /*
284 * Add the memory to the hypervisor area.
285 */
286 RTGCPTR GCPtr;
287 PMMLOOKUPHYPER pLookup;
288 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
289 if (RT_SUCCESS(rc))
290 {
291 /*
292 * Copy the physical page addresses and tell PGM about them.
293 */
294 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
295 if (paHCPhysPages)
296 {
297 bool const fDriverless = SUPR3IsDriverless();
298 for (size_t i = 0; i < cPages; i++)
299 {
300 AssertReleaseMsgReturn( ( paPages[i].Phys != 0
301 && paPages[i].Phys != NIL_RTHCPHYS
302 && !(paPages[i].Phys & PAGE_OFFSET_MASK))
303 || fDriverless,
304 ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
305 VERR_INTERNAL_ERROR);
306 paHCPhysPages[i] = paPages[i].Phys;
307 }
308
309 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
310 pLookup->u.Locked.pvR3 = pvR3;
311 pLookup->u.Locked.pvR0 = pvR0;
312 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
313
314 /* done. */
315 *pGCPtr = GCPtr;
316 return rc;
317 }
318 /* Don't care about failure clean, we're screwed if this fails anyway. */
319 }
320
321 return rc;
322}
323
324
325/**
326 * Reserves an electric fence page.
327 *
328 * @returns VBox status code.
329 * @param pVM The cross context VM structure.
330 */
331static int MMR3HyperReserveFence(PVM pVM)
332{
333 RT_NOREF(pVM);
334 return VINF_SUCCESS;
335}
336
337
338/**
339 * Adds memory to the hypervisor memory arena.
340 *
341 * @return VBox status code.
342 * @param pVM The cross context VM structure.
343 * @param cb Size of the memory. Will be rounded up to nearest page.
344 * @param pszDesc The description of the memory.
345 * @param pGCPtr Where to store the GC address.
346 * @param ppLookup Where to store the pointer to the lookup record.
347 * @remark We assume the threading structure of VBox imposes natural
348 * serialization of most functions, this one included.
349 */
350static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
351{
352 /*
353 * Validate input.
354 */
355 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
356 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
357 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
358 {
359 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
360 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
361 return VERR_NO_MEMORY;
362 }
363
364 /*
365 * Allocate lookup record.
366 */
367 PMMLOOKUPHYPER pLookup;
368 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
369 if (RT_SUCCESS(rc))
370 {
371 /*
372 * Initialize it and insert it.
373 */
374 pLookup->offNext = pVM->mm.s.offLookupHyper;
375 pLookup->cb = cbAligned;
376 pLookup->off = pVM->mm.s.offHyperNextStatic;
377 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
378 if (pLookup->offNext != (int32_t)NIL_OFFSET)
379 pLookup->offNext -= pVM->mm.s.offLookupHyper;
380 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
381 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
382 pLookup->pszDesc = pszDesc;
383
384 /* Mapping. */
385 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
386 pVM->mm.s.offHyperNextStatic += cbAligned;
387
388 /* Return pointer. */
389 *ppLookup = pLookup;
390 }
391
392 AssertRC(rc);
393 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
394 return rc;
395}
396
397
398/**
399 * Allocates a new heap.
400 *
401 * @returns VBox status code.
402 * @param pVM The cross context VM structure.
403 * @param cb The size of the new heap.
404 * @param ppHeap Where to store the heap pointer on successful return.
405 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
406 * success.
407 */
408static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
409{
410 /*
411 * Allocate the hypervisor heap.
412 */
413 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
414 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
415 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
416 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
417 if (!paPages)
418 return VERR_NO_MEMORY;
419 void *pv;
420 RTR0PTR pvR0 = NIL_RTR0PTR;
421 int rc = SUPR3PageAllocEx(cPages,
422 0 /*fFlags*/,
423 &pv,
424 &pvR0,
425 paPages);
426 if (RT_SUCCESS(rc))
427 {
428 Assert((pvR0 != NIL_RTR0PTR && !(PAGE_OFFSET_MASK & pvR0)) || SUPR3IsDriverless());
429 memset(pv, 0, cbAligned);
430
431 /*
432 * Initialize the heap and first free chunk.
433 */
434 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
435 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
436 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
437 pHeap->pbHeapR0 = pvR0 + MMYPERHEAP_HDR_SIZE;
438 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
439 pHeap->pVMR3 = pVM;
440 pHeap->pVMR0 = pVM->pVMR0ForCall;
441 pHeap->pVMRC = pVM->pVMRC;
442 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
443 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
444 //pHeap->offFreeHead = 0;
445 //pHeap->offFreeTail = 0;
446 pHeap->offPageAligned = pHeap->cbHeap;
447 //pHeap->HyperHeapStatTree = 0;
448 pHeap->paPages = paPages;
449
450 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
451 pFree->cb = pHeap->cbFree;
452 //pFree->core.offNext = 0;
453 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
454 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
455 //pFree->offNext = 0;
456 //pFree->offPrev = 0;
457
458 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
459 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
460
461 *ppHeap = pHeap;
462 *pR0PtrHeap = pvR0;
463 return VINF_SUCCESS;
464 }
465 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
466
467 *ppHeap = NULL;
468 return rc;
469}
470
471
472/**
473 * Allocates a new heap.
474 */
475static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
476{
477 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
478 Assert(pHeap->pbHeapR0);
479 Assert(pHeap->paPages);
480 int rc = MMR3HyperMapPages(pVM,
481 pHeap,
482 pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE,
483 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
484 pHeap->paPages,
485 "Heap", ppHeapGC);
486 if (RT_SUCCESS(rc))
487 {
488 pHeap->pVMRC = pVM->pVMRC;
489 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
490 /* Reserve a page for fencing. */
491 MMR3HyperReserveFence(pVM);
492
493 /* We won't need these any more. */
494 MMR3HeapFree(pHeap->paPages);
495 pHeap->paPages = NULL;
496 }
497 return rc;
498}
499
500
501/**
502 * Allocates memory in the Hypervisor (GC VMM) area which never will
503 * be freed and doesn't have any offset based relation to other heap blocks.
504 *
505 * The latter means that two blocks allocated by this API will not have the
506 * same relative position to each other in GC and HC. In short, never use
507 * this API for allocating nodes for an offset based AVL tree!
508 *
509 * The returned memory is of course zeroed.
510 *
511 * @returns VBox status code.
512 * @param pVM The cross context VM structure.
513 * @param cb Number of bytes to allocate.
514 * @param uAlignment Required memory alignment in bytes.
515 * Values are 0,8,16,32 and PAGE_SIZE.
516 * 0 -> default alignment, i.e. 8 bytes.
517 * @param enmTag The statistics tag.
518 * @param ppv Where to store the address to the allocated
519 * memory.
520 * @remark This is assumed not to be used at times when serialization is required.
521 */
522VMMR3DECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
523{
524 return MMR3HyperAllocOnceNoRelEx(pVM, cb, uAlignment, enmTag, 0/*fFlags*/, ppv);
525}
526
527
528/**
529 * Allocates memory in the Hypervisor (GC VMM) area which never will
530 * be freed and doesn't have any offset based relation to other heap blocks.
531 *
532 * The latter means that two blocks allocated by this API will not have the
533 * same relative position to each other in GC and HC. In short, never use
534 * this API for allocating nodes for an offset based AVL tree!
535 *
536 * The returned memory is of course zeroed.
537 *
538 * @returns VBox status code.
539 * @param pVM The cross context VM structure.
540 * @param cb Number of bytes to allocate.
541 * @param uAlignment Required memory alignment in bytes.
542 * Values are 0,8,16,32 and PAGE_SIZE.
543 * 0 -> default alignment, i.e. 8 bytes.
544 * @param enmTag The statistics tag.
545 * @param fFlags Flags, see MMHYPER_AONR_FLAGS_KERNEL_MAPPING.
546 * @param ppv Where to store the address to the allocated memory.
547 * @remark This is assumed not to be used at times when serialization is required.
548 */
549VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, uint32_t fFlags, void **ppv)
550{
551 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
552 Assert(!(fFlags & ~(MMHYPER_AONR_FLAGS_KERNEL_MAPPING)));
553
554 /*
555 * Choose between allocating a new chunk of HMA memory
556 * and the heap. We will only do BIG allocations from HMA and
557 * only at creation time.
558 */
559 if ( ( cb < _64K
560 && ( uAlignment != PAGE_SIZE
561 || cb < 48*_1K)
562 && !(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING)
563 )
564 || VMR3GetState(pVM) != VMSTATE_CREATING
565 )
566 {
567 Assert(!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING));
568 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
569 if ( rc != VERR_MM_HYPER_NO_MEMORY
570 || cb <= 8*_1K)
571 {
572 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
573 cb, uAlignment, rc, *ppv));
574 return rc;
575 }
576 }
577
578 /*
579 * Validate alignment.
580 */
581 switch (uAlignment)
582 {
583 case 0:
584 case 8:
585 case 16:
586 case 32:
587 case PAGE_SIZE:
588 break;
589 default:
590 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
591 return VERR_INVALID_PARAMETER;
592 }
593
594 /*
595 * Allocate the pages and map them into HMA space.
596 */
597 uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
598 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
599 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
600 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
601 if (!paPages)
602 return VERR_NO_TMP_MEMORY;
603 void *pvPages;
604 RTR0PTR pvR0 = NIL_RTR0PTR;
605 int rc = SUPR3PageAllocEx(cPages,
606 0 /*fFlags*/,
607 &pvPages,
608 &pvR0,
609 paPages);
610 if (RT_SUCCESS(rc))
611 {
612 Assert(pvR0 != NIL_RTR0PTR || SUPR3IsDriverless());
613 memset(pvPages, 0, cbAligned);
614
615 RTGCPTR GCPtr;
616 rc = MMR3HyperMapPages(pVM,
617 pvPages,
618 pvR0,
619 cPages,
620 paPages,
621 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmGetTagName(enmTag)),
622 &GCPtr);
623 /* not needed anymore */
624 RTMemTmpFree(paPages);
625 if (RT_SUCCESS(rc))
626 {
627 *ppv = pvPages;
628 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
629 cbAligned, uAlignment, *ppv));
630 MMR3HyperReserveFence(pVM);
631 return rc;
632 }
633 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
634 SUPR3PageFreeEx(pvPages, cPages);
635
636
637 /*
638 * HACK ALERT! Try allocate it off the heap so that we don't freak
639 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
640 */
641 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
642 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
643 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
644 if (RT_SUCCESS(rc2))
645 {
646 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
647 cb, uAlignment, rc, *ppv));
648 return rc;
649 }
650 }
651 else
652 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
653
654 if (rc == VERR_NO_MEMORY)
655 rc = VERR_MM_HYPER_NO_MEMORY;
656 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
657 return rc;
658}
659
660
661/**
662 * Convert hypervisor HC virtual address to HC physical address.
663 *
664 * @returns HC physical address.
665 * @param pVM The cross context VM structure.
666 * @param pvR3 Host context virtual address.
667 */
668VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
669{
670 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
671 for (;;)
672 {
673 switch (pLookup->enmType)
674 {
675 case MMLOOKUPHYPERTYPE_LOCKED:
676 {
677 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
678 if (off < pLookup->cb)
679 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
680 break;
681 }
682
683 case MMLOOKUPHYPERTYPE_HCPHYS:
684 {
685 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
686 if (off < pLookup->cb)
687 return pLookup->u.HCPhys.HCPhys + off;
688 break;
689 }
690
691 case MMLOOKUPHYPERTYPE_GCPHYS:
692 case MMLOOKUPHYPERTYPE_MMIO2:
693 case MMLOOKUPHYPERTYPE_DYNAMIC:
694 /* can (or don't want to) convert these kind of records. */
695 break;
696
697 default:
698 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
699 break;
700 }
701
702 /* next */
703 if ((unsigned)pLookup->offNext == NIL_OFFSET)
704 break;
705 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
706 }
707
708 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
709 return NIL_RTHCPHYS;
710}
711
712
713/**
714 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
715 *
716 * @param pVM The cross context VM structure.
717 * @param pHlp Callback functions for doing output.
718 * @param pszArgs Argument string. Optional and specific to the handler.
719 */
720static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
721{
722 NOREF(pszArgs);
723
724 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
725 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
726
727 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
728 for (;;)
729 {
730 switch (pLookup->enmType)
731 {
732 case MMLOOKUPHYPERTYPE_LOCKED:
733 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
734 pLookup->off + pVM->mm.s.pvHyperAreaGC,
735 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
736 pLookup->u.Locked.pvR3,
737 pLookup->u.Locked.pvR0,
738 sizeof(RTHCPTR) * 2, "",
739 pLookup->pszDesc);
740 break;
741
742 case MMLOOKUPHYPERTYPE_HCPHYS:
743 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
744 pLookup->off + pVM->mm.s.pvHyperAreaGC,
745 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
746 pLookup->u.HCPhys.pvR3,
747 pLookup->u.HCPhys.pvR0,
748 pLookup->u.HCPhys.HCPhys,
749 pLookup->pszDesc);
750 break;
751
752 case MMLOOKUPHYPERTYPE_GCPHYS:
753 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
754 pLookup->off + pVM->mm.s.pvHyperAreaGC,
755 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
756 sizeof(RTHCPTR) * 2 * 2 + 1, "",
757 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
758 pLookup->pszDesc);
759 break;
760
761 case MMLOOKUPHYPERTYPE_MMIO2:
762 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
763 pLookup->off + pVM->mm.s.pvHyperAreaGC,
764 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
765 sizeof(RTHCPTR) * 2 * 2 + 1, "",
766 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
767 pLookup->pszDesc);
768 break;
769
770 case MMLOOKUPHYPERTYPE_DYNAMIC:
771 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
772 pLookup->off + pVM->mm.s.pvHyperAreaGC,
773 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
774 sizeof(RTHCPTR) * 2 * 2 + 1, "",
775 sizeof(RTHCPTR) * 2, "",
776 pLookup->pszDesc);
777 break;
778
779 default:
780 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
781 break;
782 }
783
784 /* next */
785 if ((unsigned)pLookup->offNext == NIL_OFFSET)
786 break;
787 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
788 }
789}
790
791
792#if 0
793/**
794 * Re-allocates memory from the hyper heap.
795 *
796 * @returns VBox status code.
797 * @param pVM The cross context VM structure.
798 * @param pvOld The existing block of memory in the hyper heap to
799 * re-allocate (can be NULL).
800 * @param cbOld Size of the existing block.
801 * @param uAlignmentNew Required memory alignment in bytes. Values are
802 * 0,8,16,32 and PAGE_SIZE. 0 -> default alignment,
803 * i.e. 8 bytes.
804 * @param enmTagNew The statistics tag.
805 * @param cbNew The required size of the new block.
806 * @param ppv Where to store the address to the re-allocated
807 * block.
808 *
809 * @remarks This does not work like normal realloc() on failure, the memory
810 * pointed to by @a pvOld is lost if there isn't sufficient space on
811 * the hyper heap for the re-allocation to succeed.
812*/
813VMMR3DECL(int) MMR3HyperRealloc(PVM pVM, void *pvOld, size_t cbOld, unsigned uAlignmentNew, MMTAG enmTagNew, size_t cbNew,
814 void **ppv)
815{
816 if (!pvOld)
817 return MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
818
819 if (!cbNew && pvOld)
820 return MMHyperFree(pVM, pvOld);
821
822 if (cbOld == cbNew)
823 return VINF_SUCCESS;
824
825 size_t cbData = RT_MIN(cbNew, cbOld);
826 void *pvTmp = RTMemTmpAlloc(cbData);
827 if (RT_UNLIKELY(!pvTmp))
828 {
829 MMHyperFree(pVM, pvOld);
830 return VERR_NO_TMP_MEMORY;
831 }
832 memcpy(pvTmp, pvOld, cbData);
833
834 int rc = MMHyperFree(pVM, pvOld);
835 if (RT_SUCCESS(rc))
836 {
837 rc = MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
838 if (RT_SUCCESS(rc))
839 {
840 Assert(cbData <= cbNew);
841 memcpy(*ppv, pvTmp, cbData);
842 }
843 }
844 else
845 AssertMsgFailed(("Failed to free hyper heap block pvOld=%p cbOld=%u\n", pvOld, cbOld));
846
847 RTMemTmpFree(pvTmp);
848 return rc;
849}
850#endif
851
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette