VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 847

Last change on this file since 847 was 323, checked in by vboxsync, 18 years ago

drop the default, there's just one other call.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 30.7 KB
Line 
1/* $Id: MMHyper.cpp 323 2007-01-25 17:25:01Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23
24
25/*******************************************************************************
26* Header Files *
27*******************************************************************************/
28#define LOG_GROUP LOG_GROUP_MM_HYPER
29#include <VBox/pgm.h>
30#include <VBox/mm.h>
31#include <VBox/dbgf.h>
32#include "MMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/err.h>
35#include <VBox/param.h>
36#include <VBox/log.h>
37#include <iprt/alloc.h>
38#include <iprt/assert.h>
39#include <iprt/string.h>
40
41
42/*******************************************************************************
43* Internal Functions *
44*******************************************************************************/
45static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
46static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
47static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap);
48static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
49static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
50
51
52/**
53 * Initializes the hypvervisor related MM stuff without
54 * calling down to PGM.
55 *
56 * PGM is not initialized at this point, PGM relies on
57 * the heap to initialize.
58 *
59 * @returns VBox status.
60 */
61int mmr3HyperInit(PVM pVM)
62{
63 LogFlow(("mmr3HyperInit:\n"));
64
65 /*
66 * Decide Hypervisor mapping in the guest context
67 * And setup various hypervisor area and heap parameters.
68 */
69 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
70 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
71 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
72 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
73
74 uint32_t cbHyperHeap;
75 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
76 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
77 cbHyperHeap = 1280*_1K;
78 else if (VBOX_FAILURE(rc))
79 {
80 LogRel(("MM/cbHyperHeap query -> %Vrc\n", rc));
81 AssertRCReturn(rc, rc);
82 }
83 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
84
85 /*
86 * Allocate the hypervisor heap.
87 *
88 * (This must be done before we start adding memory to the
89 * hypervisor static area because lookup records are allocated from it.)
90 */
91 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapHC);
92 if (VBOX_SUCCESS(rc))
93 {
94 /*
95 * Make a small head fence to fend of accidental sequential access.
96 */
97 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
98
99 /*
100 * Map the VM structure into the hypervisor space.
101 */
102 rc = MMR3HyperMapHCPhys(pVM, pVM, pVM->HCPhysVM, sizeof(VM), "VM", &pVM->pVMGC);
103 if (VBOX_SUCCESS(rc))
104 {
105 /* Reserve a page for fencing. */
106 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
107
108 /*
109 * Map the heap into the hypervisor space.
110 */
111 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapHC, &pVM->mm.s.pHyperHeapGC);
112 if (VBOX_SUCCESS(rc))
113 {
114 /*
115 * Register info handlers.
116 */
117 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
118
119 LogFlow(("mmr3HyperInit: returns VINF_SUCCESS\n"));
120 return VINF_SUCCESS;
121 }
122 /* Caller will do proper cleanup. */
123 }
124 }
125
126 LogFlow(("mmr3HyperInit: returns %Vrc\n", rc));
127 return rc;
128}
129
130
131/**
132 * Finalizes the HMA mapping.
133 *
134 * This is called later during init, most (all) HMA allocations should be done
135 * by the time this function is called.
136 *
137 * @returns VBox status.
138 */
139MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
140{
141 LogFlow(("MMR3HyperInitFinalize:\n"));
142
143 /*
144 * Adjust and create the HMA mapping.
145 */
146 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
147 pVM->mm.s.cbHyperArea -= _4M;
148 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
149 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
150 if (VBOX_FAILURE(rc))
151 return rc;
152 pVM->mm.s.fPGMInitialized = true;
153
154 /*
155 * Do all the delayed mappings.
156 */
157 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
158 for (;;)
159 {
160 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
161 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
162 switch (pLookup->enmType)
163 {
164 case MMLOOKUPHYPERTYPE_LOCKED:
165 rc = mmr3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
166 break;
167
168 case MMLOOKUPHYPERTYPE_HCPHYS:
169 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
170 break;
171
172 case MMLOOKUPHYPERTYPE_GCPHYS:
173 {
174 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
175 const size_t cb = pLookup->cb;
176 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
177 {
178 RTHCPHYS HCPhys;
179 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
180 if (VBOX_FAILURE(rc))
181 break;
182 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
183 if (VBOX_FAILURE(rc))
184 break;
185 }
186 break;
187 }
188
189 case MMLOOKUPHYPERTYPE_DYNAMIC:
190 /* do nothing here since these are either fences or managed by someone else using PGM. */
191 break;
192
193 default:
194 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
195 break;
196 }
197
198 if (VBOX_FAILURE(rc))
199 {
200 AssertMsgFailed(("rc=%Vrc cb=%d GCPtr=%VGv enmType=%d pszDesc=%s\n",
201 rc, pLookup->cb, pLookup->enmType, pLookup->pszDesc));
202 return rc;
203 }
204
205 /* next */
206 if (pLookup->offNext == (int32_t)NIL_OFFSET)
207 break;
208 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
209 }
210
211 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
212 return VINF_SUCCESS;
213}
214
215
216/**
217 * Callback function which will be called when PGM is trying to find
218 * a new location for the mapping.
219 *
220 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
221 * In 1) the callback should say if it objects to a suggested new location. If it
222 * accepts the new location, it is called again for doing it's relocation.
223 *
224 *
225 * @returns true if the location is ok.
226 * @returns false if another location should be found.
227 * @param pVM The VM handle.
228 * @param GCPtrOld The old virtual address.
229 * @param GCPtrNew The new virtual address.
230 * @param enmMode Used to indicate the callback mode.
231 * @param pvUser User argument. Ignored.
232 * @remark The return value is no a failure indicator, it's an acceptance
233 * indicator. Relocation can not fail!
234 */
235static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
236{
237 switch (enmMode)
238 {
239 /*
240 * Verify location - all locations are good for us.
241 */
242 case PGMRELOCATECALL_SUGGEST:
243 return true;
244
245 /*
246 * Execute the relocation.
247 */
248 case PGMRELOCATECALL_RELOCATE:
249 {
250 /*
251 * Accepted!
252 */
253 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%#x pVM->mm.s.pvHyperAreaGC=%#x\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
254 Log(("Relocating the hypervisor from %#x to %#x\n", GCPtrOld, GCPtrNew));
255
256 /* relocate our selves and the VM structure. */
257 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
258 pVM->pVMGC += offDelta;
259 pVM->mm.s.pvHyperAreaGC += offDelta;
260 pVM->mm.s.pHyperHeapGC += offDelta;
261 pVM->mm.s.pHyperHeapHC->pbHeapGC += offDelta;
262 pVM->mm.s.pHyperHeapHC->pVMGC += pVM->pVMGC;
263
264 /* relocate the rest. */
265 VMR3Relocate(pVM, offDelta);
266 return true;
267 }
268
269 default:
270 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
271 }
272
273 return false;
274}
275
276
277/**
278 * Maps contiguous HC physical memory into the hypervisor region in the GC.
279 *
280 * @return VBox status code.
281 *
282 * @param pVM VM handle.
283 * @param pvHC Host context address of the memory. Must be page aligned!
284 * @param HCPhys Host context physical address of the memory to be mapped. Must be page aligned!
285 * @param cb Size of the memory. Will be rounded up to nearest page.
286 * @param pszDesc Description.
287 * @param pGCPtr Where to store the GC address.
288 */
289MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
290{
291 LogFlow(("MMR3HyperMapHCPhys: pvHc=%p HCPhys=%VHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
292
293 /*
294 * Validate input.
295 */
296 AssertReturn(RT_ALIGN_P(pvHC, PAGE_SIZE) == pvHC, VERR_INVALID_PARAMETER);
297 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
298 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
299
300 /*
301 * Add the memory to the hypervisor area.
302 */
303 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
304 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
305 RTGCPTR GCPtr;
306 PMMLOOKUPHYPER pLookup;
307 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
308 if (VBOX_SUCCESS(rc))
309 {
310 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
311 pLookup->u.HCPhys.pvHC = pvHC;
312 pLookup->u.HCPhys.HCPhys = HCPhys;
313
314 /*
315 * Update the page table.
316 */
317 if (pVM->mm.s.fPGMInitialized)
318 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
319 if (VBOX_SUCCESS(rc))
320 *pGCPtr = GCPtr;
321 }
322 return rc;
323}
324
325
326/**
327 * Maps contiguous GC physical memory into the hypervisor region in the GC.
328 *
329 * @return VBox status code.
330 *
331 * @param pVM VM handle.
332 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
333 * @param cb Size of the memory. Will be rounded up to nearest page.
334 * @param pszDesc Mapping description.
335 * @param pGCPtr Where to store the GC address.
336 */
337MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
338{
339 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%VGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
340
341 /*
342 * Validate input.
343 */
344 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
345 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
346
347 /*
348 * Add the memory to the hypervisor area.
349 */
350 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
351 RTGCPTR GCPtr;
352 PMMLOOKUPHYPER pLookup;
353 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
354 if (VBOX_SUCCESS(rc))
355 {
356 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
357 pLookup->u.GCPhys.GCPhys = GCPhys;
358
359 /*
360 * Update the page table.
361 */
362 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
363 {
364 RTHCPHYS HCPhys;
365 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
366 AssertRC(rc);
367 if (VBOX_FAILURE(rc))
368 {
369 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
370 break;
371 }
372 if (pVM->mm.s.fPGMInitialized)
373 {
374 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
375 AssertRC(rc);
376 if (VBOX_FAILURE(rc))
377 {
378 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
379 break;
380 }
381 }
382 }
383
384 if (VBOX_SUCCESS(rc) && pGCPtr)
385 *pGCPtr = GCPtr;
386 }
387 return rc;
388}
389
390
391/**
392 * Locks and Maps HC virtual memory into the hypervisor region in the GC.
393 *
394 * @return VBox status code.
395 *
396 * @param pVM VM handle.
397 * @param pvHC Host context address of the memory (may be not page aligned).
398 * @param cb Size of the memory. Will be rounded up to nearest page.
399 * @param fFree Set this if MM is responsible for freeing the memory using SUPPageFree.
400 * @param pszDesc Mapping description.
401 * @param pGCPtr Where to store the GC address corresponding to pvHC.
402 */
403MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr)
404{
405 LogFlow(("MMR3HyperMapHCRam: pvHc=%p cb=%d fFree=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, (int)cb, fFree, pszDesc, pszDesc, pGCPtr));
406
407 /*
408 * Validate input.
409 */
410 if ( !pvHC
411 || cb <= 0
412 || !pszDesc
413 || !*pszDesc)
414 {
415 AssertMsgFailed(("Invalid parameter\n"));
416 return VERR_INVALID_PARAMETER;
417 }
418
419 /*
420 * Page align address and size.
421 */
422 void *pvHCPage = (void *)((uintptr_t)pvHC & PAGE_BASE_HC_MASK);
423 cb += (uintptr_t)pvHC & PAGE_OFFSET_MASK;
424 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
425
426 /*
427 * Add the memory to the hypervisor area.
428 */
429 RTGCPTR GCPtr;
430 PMMLOOKUPHYPER pLookup;
431 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
432 if (VBOX_SUCCESS(rc))
433 {
434 /*
435 * Lock the heap memory and tell PGM about the locked pages.
436 */
437 PMMLOCKEDMEM pLockedMem;
438 rc = mmr3LockMem(pVM, pvHCPage, cb, fFree ? MM_LOCKED_TYPE_HYPER : MM_LOCKED_TYPE_HYPER_NOFREE, &pLockedMem, false /* fSilentFailure */);
439 if (VBOX_SUCCESS(rc))
440 {
441 /* map the stuff into guest address space. */
442 if (pVM->mm.s.fPGMInitialized)
443 rc = mmr3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
444 if (VBOX_SUCCESS(rc))
445 {
446 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
447 pLookup->u.Locked.pvHC = pvHC;
448 pLookup->u.Locked.pLockedMem = pLockedMem;
449
450 /* done. */
451 GCPtr |= (uintptr_t)pvHC & PAGE_OFFSET_MASK;
452 *pGCPtr = GCPtr;
453 return rc;
454 }
455 /* Don't care about failure clean, we're screwed if this fails anyway. */
456 }
457 }
458
459 return rc;
460}
461
462
463/**
464 * Reserves a hypervisor memory area.
465 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPTR.
466 *
467 * @return VBox status code.
468 *
469 * @param pVM VM handle.
470 * @param cb Size of the memory. Will be rounded up to nearest page.
471 * @param pszDesc Mapping description.
472 * @param pGCPtr Where to store the assigned GC address. Optional.
473 */
474MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
475{
476 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
477
478 /*
479 * Validate input.
480 */
481 if ( cb <= 0
482 || !pszDesc
483 || !*pszDesc)
484 {
485 AssertMsgFailed(("Invalid parameter\n"));
486 return VERR_INVALID_PARAMETER;
487 }
488
489 /*
490 * Add the memory to the hypervisor area.
491 */
492 RTGCPTR GCPtr;
493 PMMLOOKUPHYPER pLookup;
494 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
495 if (VBOX_SUCCESS(rc))
496 {
497 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
498 if (pGCPtr)
499 *pGCPtr = GCPtr;
500 return VINF_SUCCESS;
501 }
502 return rc;
503}
504
505
506/**
507 * Adds memory to the hypervisor memory arena.
508 *
509 * @return VBox status code.
510 * @param pVM The VM handle.
511 * @param cb Size of the memory. Will be rounded up to neares page.
512 * @param pszDesc The description of the memory.
513 * @param pGCPtr Where to store the GC address.
514 * @param ppLookup Where to store the pointer to the lookup record.
515 * @remark We assume the threading structure of VBox imposes natural
516 * serialization of most functions, this one included.
517 */
518static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
519{
520 /*
521 * Validate input.
522 */
523 const uint32_t cbAligned = RT_ALIGN(cb, PAGE_SIZE);
524 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
525 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
526 {
527 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x\n",
528 pVM->mm.s.offHyperNextStatic, cbAligned));
529 return VERR_NO_MEMORY;
530 }
531
532 /*
533 * Allocate lookup record.
534 */
535 PMMLOOKUPHYPER pLookup;
536 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
537 if (VBOX_SUCCESS(rc))
538 {
539 /*
540 * Initialize it and insert it.
541 */
542 pLookup->offNext = pVM->mm.s.offLookupHyper;
543 pLookup->cb = cbAligned;
544 pLookup->off = pVM->mm.s.offHyperNextStatic;
545 pVM->mm.s.offLookupHyper = (char *)pLookup - (char *)pVM->mm.s.pHyperHeapHC;
546 if (pLookup->offNext != (int32_t)NIL_OFFSET)
547 pLookup->offNext -= pVM->mm.s.offLookupHyper;
548 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
549 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
550 pLookup->pszDesc = pszDesc;
551
552 /* Mapping. */
553 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
554 pVM->mm.s.offHyperNextStatic += cbAligned;
555
556 /* Return pointer. */
557 *ppLookup = pLookup;
558 }
559
560 AssertRC(rc);
561 LogFlow(("mmR3HyperMap: returns %Vrc *pGCPtr=%VGv\n", rc, *pGCPtr));
562 return rc;
563}
564
565
566/**
567 * Allocates a new heap.
568 *
569 * @returns VBox status code.
570 * @param pVM The VM handle.
571 * @param cb The size of the new heap.
572 * @param ppHeap Where to store the heap pointer on successful return.
573 */
574static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap)
575{
576 /*
577 * Allocate the hypervisor heap.
578 */
579 const uint32_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
580 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
581 void *pv;
582 int rc = SUPPageAlloc(cbAligned >> PAGE_SHIFT, &pv);
583 if (VBOX_SUCCESS(rc))
584 {
585 /*
586 * Initialize the heap and first free chunk.
587 */
588 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
589 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
590 pHeap->pVMHC = pVM;
591 pHeap->pVMGC = pVM->pVMGC;
592 pHeap->pbHeapHC = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
593 //pHeap->pbHeapGC = 0; // set by mmR3HyperHeapMap()
594 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
595 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
596 //pHeap->offFreeHead = 0;
597 //pHeap->offFreeTail = 0;
598 pHeap->offPageAligned = pHeap->cbHeap;
599 //pHeap->HyperHeapStatTree = 0;
600
601 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapHC;
602 pFree->cb = pHeap->cbFree;
603 //pFree->core.offNext = 0;
604 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
605 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
606 //pFree->offNext = 0;
607 //pFree->offPrev = 0;
608
609 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
610 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
611
612 *ppHeap = pHeap;
613 return VINF_SUCCESS;
614 }
615 AssertMsgFailed(("SUPPageAlloc(%d,) -> %Vrc\n", cbAligned >> PAGE_SHIFT, rc));
616
617 *ppHeap = NULL;
618 return rc;
619}
620
621
622/**
623 * Allocates a new heap.
624 */
625static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
626{
627 int rc = MMR3HyperMapHCRam(pVM, pHeap, pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, true, "Heap", ppHeapGC);
628 if (VBOX_SUCCESS(rc))
629 {
630 pHeap->pVMGC = pVM->pVMGC;
631 pHeap->pbHeapGC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
632 /* Reserve a page for fencing. */
633 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
634 }
635 return rc;
636}
637
638
639#if 0
640/**
641 * Destroys a heap.
642 */
643static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
644{
645 /* all this is dealt with when unlocking and freeing locked memory. */
646}
647#endif
648
649
650/**
651 * Allocates memory in the Hypervisor (GC VMM) area which never will
652 * be freed and doesn't have any offset based relation to other heap blocks.
653 *
654 * The latter means that two blocks allocated by this API will not have the
655 * same relative position to each other in GC and HC. In short, never use
656 * this API for allocating nodes for an offset based AVL tree!
657 *
658 * The returned memory is of course zeroed.
659 *
660 * @returns VBox status code.
661 * @param pVM The VM to operate on.
662 * @param cb Number of bytes to allocate.
663 * @param uAlignment Required memory alignment in bytes.
664 * Values are 0,8,16,32 and PAGE_SIZE.
665 * 0 -> default alignment, i.e. 8 bytes.
666 * @param enmTag The statistics tag.
667 * @param ppv Where to store the address to the allocated
668 * memory.
669 * @remark This is assumed not to be used at times when serialization is required.
670 */
671MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
672{
673 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
674 AssertMsg(cb <= _4M, ("Allocating more than 4MB!? (cb=%#x) HMA limit might need adjusting if you allocate more.\n", cb));
675
676 /*
677 * Choose between allocating a new chunk of HMA memory
678 * and the heap. We will only do BIG allocations from HMA.
679 */
680 if ( cb < _64K
681 && ( uAlignment != PAGE_SIZE
682 || cb < 48*_1K))
683 {
684 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
685 if ( rc != VERR_MM_HYPER_NO_MEMORY
686 || cb <= 8*_1K)
687 {
688 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
689 cb, uAlignment, rc, *ppv));
690 return rc;
691 }
692 }
693
694 /*
695 * Validate alignment.
696 */
697 switch (uAlignment)
698 {
699 case 0:
700 case 8:
701 case 16:
702 case 32:
703 case PAGE_SIZE:
704 break;
705 default:
706 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
707 return VERR_INVALID_PARAMETER;
708 }
709
710 /*
711 * Allocate the pages and the HMA space.
712 */
713 cb = RT_ALIGN(cb, PAGE_SIZE);
714 void *pvPages;
715 int rc = SUPPageAlloc(cb >> PAGE_SHIFT, &pvPages);
716 if (VBOX_SUCCESS(rc))
717 {
718 RTGCPTR GCPtr;
719 rc = MMR3HyperMapHCRam(pVM, pvPages, cb, true, mmR3GetTagName(enmTag), &GCPtr);
720 if (VBOX_SUCCESS(rc))
721 {
722 *ppv = pvPages;
723 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
724 cb, uAlignment, *ppv));
725 return rc;
726 }
727 SUPPageFree(pvPages);
728 }
729 if (rc == VERR_NO_MEMORY)
730 rc = VERR_MM_HYPER_NO_MEMORY;
731 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
732 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
733 return rc;
734}
735
736
737/**
738 * Convert hypervisor HC virtual address to HC physical address.
739 *
740 * @returns HC physical address.
741 * @param pVM VM Handle
742 * @param pvHC Host context physical address.
743 */
744MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC)
745{
746 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
747 for (;;)
748 {
749 switch (pLookup->enmType)
750 {
751 case MMLOOKUPHYPERTYPE_LOCKED:
752 {
753 unsigned off = (char *)pvHC - (char *)pLookup->u.Locked.pvHC;
754 if (off < pLookup->cb)
755 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
756 break;
757 }
758
759 case MMLOOKUPHYPERTYPE_HCPHYS:
760 {
761 unsigned off = (char *)pvHC - (char *)pLookup->u.HCPhys.pvHC;
762 if (off < pLookup->cb)
763 return pLookup->u.HCPhys.HCPhys + off;
764 break;
765 }
766
767 case MMLOOKUPHYPERTYPE_GCPHYS:
768 case MMLOOKUPHYPERTYPE_DYNAMIC:
769 /* can convert these kind of records. */
770 break;
771
772 default:
773 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
774 break;
775 }
776
777 /* next */
778 if ((unsigned)pLookup->offNext == NIL_OFFSET)
779 break;
780 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
781 }
782
783 AssertMsgFailed(("pvHC=%p is not inside the hypervisor memory area!\n", pvHC));
784 return NIL_RTHCPHYS;
785}
786
787
788#if 0 /* unused, not implemented */
789/**
790 * Convert hypervisor HC physical address to HC virtual address.
791 *
792 * @returns HC virtual address.
793 * @param pVM VM Handle
794 * @param HCPhys Host context physical address.
795 */
796MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
797{
798 void *pv;
799 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
800 if (VBOX_SUCCESS(rc))
801 return pv;
802 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
803 return NULL;
804}
805
806
807/**
808 * Convert hypervisor HC physical address to HC virtual address.
809 *
810 * @returns VBox status.
811 * @param pVM VM Handle
812 * @param HCPhys Host context physical address.
813 * @param ppv Where to store the HC virtual address.
814 */
815MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
816{
817 /*
818 * Linear search.
819 */
820 /** @todo implement when actually used. */
821 return VERR_INVALID_POINTER;
822}
823#endif /* unused, not implemented */
824
825
826/**
827 * Read hypervisor memory from GC virtual address.
828 *
829 * @returns VBox status.
830 * @param pVM VM handle.
831 * @param pvDst Destination address (HC of course).
832 * @param GCPtr GC virtual address.
833 * @param cb Number of bytes to read.
834 */
835MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
836{
837 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
838 return VERR_INVALID_PARAMETER;
839 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
840}
841
842
843/**
844 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
845 *
846 * @param pVM The VM handle.
847 * @param pHlp Callback functions for doing output.
848 * @param pszArgs Argument string. Optional and specific to the handler.
849 */
850static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
851{
852 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %VGv, 0x%08x bytes\n",
853 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
854
855 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
856 for (;;)
857 {
858 switch (pLookup->enmType)
859 {
860 case MMLOOKUPHYPERTYPE_LOCKED:
861 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv LOCKED %-*s %s\n",
862 pLookup->off + pVM->mm.s.pvHyperAreaGC,
863 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
864 pLookup->u.Locked.pvHC,
865 sizeof(RTHCPTR) * 2,
866 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE
867 ? "nofree" : "autofree",
868 pLookup->pszDesc);
869 break;
870
871 case MMLOOKUPHYPERTYPE_HCPHYS:
872 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv HCPHYS %VHp %s\n",
873 pLookup->off + pVM->mm.s.pvHyperAreaGC,
874 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
875 pLookup->u.HCPhys.pvHC, pLookup->u.HCPhys.HCPhys,
876 pLookup->pszDesc);
877 break;
878
879 case MMLOOKUPHYPERTYPE_GCPHYS:
880 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s GCPHYS %VGp%*s %s\n",
881 pLookup->off + pVM->mm.s.pvHyperAreaGC,
882 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
883 sizeof(RTHCPTR) * 2, "",
884 pLookup->u.GCPhys.GCPhys, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
885 pLookup->pszDesc);
886 break;
887
888 case MMLOOKUPHYPERTYPE_DYNAMIC:
889 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n",
890 pLookup->off + pVM->mm.s.pvHyperAreaGC,
891 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
892 sizeof(RTHCPTR) * 2, "",
893 sizeof(RTHCPTR) * 2, "",
894 pLookup->pszDesc);
895 break;
896
897 default:
898 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
899 break;
900 }
901
902 /* next */
903 if ((unsigned)pLookup->offNext == NIL_OFFSET)
904 break;
905 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
906 }
907}
908
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette