VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 5458

Last change on this file since 5458 was 4071, checked in by vboxsync, 17 years ago

Biggest check-in ever. New source code headers for all (C) innotek files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.9 KB
Line 
1/* $Id: MMHyper.cpp 4071 2007-08-07 17:07:59Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19
20
21/*******************************************************************************
22* Header Files *
23*******************************************************************************/
24#define LOG_GROUP LOG_GROUP_MM_HYPER
25#include <VBox/pgm.h>
26#include <VBox/mm.h>
27#include <VBox/dbgf.h>
28#include "MMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/err.h>
31#include <VBox/param.h>
32#include <VBox/log.h>
33#include <iprt/alloc.h>
34#include <iprt/assert.h>
35#include <iprt/string.h>
36
37
38/*******************************************************************************
39* Internal Functions *
40*******************************************************************************/
41static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
42static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
43static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap);
44static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
45static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
46
47
48/**
49 * Initializes the hypvervisor related MM stuff without
50 * calling down to PGM.
51 *
52 * PGM is not initialized at this point, PGM relies on
53 * the heap to initialize.
54 *
55 * @returns VBox status.
56 */
57int mmr3HyperInit(PVM pVM)
58{
59 LogFlow(("mmr3HyperInit:\n"));
60
61 /*
62 * Decide Hypervisor mapping in the guest context
63 * And setup various hypervisor area and heap parameters.
64 */
65 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
66 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
67 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
68 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
69
70 uint32_t cbHyperHeap;
71 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "cbHyperHeap", &cbHyperHeap);
72 if (rc == VERR_CFGM_NO_PARENT || rc == VERR_CFGM_VALUE_NOT_FOUND)
73 cbHyperHeap = 1280*_1K;
74 else if (VBOX_FAILURE(rc))
75 {
76 LogRel(("MM/cbHyperHeap query -> %Vrc\n", rc));
77 AssertRCReturn(rc, rc);
78 }
79 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
80
81 /*
82 * Allocate the hypervisor heap.
83 *
84 * (This must be done before we start adding memory to the
85 * hypervisor static area because lookup records are allocated from it.)
86 */
87 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapHC);
88 if (VBOX_SUCCESS(rc))
89 {
90 /*
91 * Make a small head fence to fend of accidental sequential access.
92 */
93 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
94
95 /*
96 * Map the VM structure into the hypervisor space.
97 */
98 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(sizeof(VM), PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &pVM->pVMGC);
99 if (VBOX_SUCCESS(rc))
100 {
101 /* Reserve a page for fencing. */
102 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
103
104 /*
105 * Map the heap into the hypervisor space.
106 */
107 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapHC, &pVM->mm.s.pHyperHeapGC);
108 if (VBOX_SUCCESS(rc))
109 {
110 /*
111 * Register info handlers.
112 */
113 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
114
115 LogFlow(("mmr3HyperInit: returns VINF_SUCCESS\n"));
116 return VINF_SUCCESS;
117 }
118 /* Caller will do proper cleanup. */
119 }
120 }
121
122 LogFlow(("mmr3HyperInit: returns %Vrc\n", rc));
123 return rc;
124}
125
126
127/**
128 * Finalizes the HMA mapping.
129 *
130 * This is called later during init, most (all) HMA allocations should be done
131 * by the time this function is called.
132 *
133 * @returns VBox status.
134 */
135MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
136{
137 LogFlow(("MMR3HyperInitFinalize:\n"));
138
139 /*
140 * Adjust and create the HMA mapping.
141 */
142 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
143 pVM->mm.s.cbHyperArea -= _4M;
144 int rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea,
145 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
146 if (VBOX_FAILURE(rc))
147 return rc;
148 pVM->mm.s.fPGMInitialized = true;
149
150 /*
151 * Do all the delayed mappings.
152 */
153 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
154 for (;;)
155 {
156 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
157 unsigned cPages = pLookup->cb >> PAGE_SHIFT;
158 switch (pLookup->enmType)
159 {
160 case MMLOOKUPHYPERTYPE_LOCKED:
161 rc = mmr3MapLocked(pVM, pLookup->u.Locked.pLockedMem, GCPtr, 0, cPages, 0);
162 break;
163
164 case MMLOOKUPHYPERTYPE_HCPHYS:
165 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
166 break;
167
168 case MMLOOKUPHYPERTYPE_GCPHYS:
169 {
170 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
171 const size_t cb = pLookup->cb;
172 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
173 {
174 RTHCPHYS HCPhys;
175 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
176 if (VBOX_FAILURE(rc))
177 break;
178 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
179 if (VBOX_FAILURE(rc))
180 break;
181 }
182 break;
183 }
184
185 case MMLOOKUPHYPERTYPE_DYNAMIC:
186 /* do nothing here since these are either fences or managed by someone else using PGM. */
187 break;
188
189 default:
190 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
191 break;
192 }
193
194 if (VBOX_FAILURE(rc))
195 {
196 AssertMsgFailed(("rc=%Vrc cb=%d GCPtr=%VGv enmType=%d pszDesc=%s\n",
197 rc, pLookup->cb, pLookup->enmType, pLookup->pszDesc));
198 return rc;
199 }
200
201 /* next */
202 if (pLookup->offNext == (int32_t)NIL_OFFSET)
203 break;
204 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
205 }
206
207 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Callback function which will be called when PGM is trying to find
214 * a new location for the mapping.
215 *
216 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
217 * In 1) the callback should say if it objects to a suggested new location. If it
218 * accepts the new location, it is called again for doing it's relocation.
219 *
220 *
221 * @returns true if the location is ok.
222 * @returns false if another location should be found.
223 * @param pVM The VM handle.
224 * @param GCPtrOld The old virtual address.
225 * @param GCPtrNew The new virtual address.
226 * @param enmMode Used to indicate the callback mode.
227 * @param pvUser User argument. Ignored.
228 * @remark The return value is no a failure indicator, it's an acceptance
229 * indicator. Relocation can not fail!
230 */
231static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
232{
233 switch (enmMode)
234 {
235 /*
236 * Verify location - all locations are good for us.
237 */
238 case PGMRELOCATECALL_SUGGEST:
239 return true;
240
241 /*
242 * Execute the relocation.
243 */
244 case PGMRELOCATECALL_RELOCATE:
245 {
246 /*
247 * Accepted!
248 */
249 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%#x pVM->mm.s.pvHyperAreaGC=%#x\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
250 Log(("Relocating the hypervisor from %#x to %#x\n", GCPtrOld, GCPtrNew));
251
252 /* relocate our selves and the VM structure. */
253 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
254 pVM->pVMGC += offDelta;
255 pVM->mm.s.pvHyperAreaGC += offDelta;
256 pVM->mm.s.pHyperHeapGC += offDelta;
257 pVM->mm.s.pHyperHeapHC->pbHeapGC += offDelta;
258 pVM->mm.s.pHyperHeapHC->pVMGC += pVM->pVMGC;
259
260 /* relocate the rest. */
261 VMR3Relocate(pVM, offDelta);
262 return true;
263 }
264
265 default:
266 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
267 }
268
269 return false;
270}
271
272
273/**
274 * Maps contiguous HC physical memory into the hypervisor region in the GC.
275 *
276 * @return VBox status code.
277 *
278 * @param pVM VM handle.
279 * @param pvHC Host context address of the memory. Must be page aligned!
280 * @param HCPhys Host context physical address of the memory to be mapped. Must be page aligned!
281 * @param cb Size of the memory. Will be rounded up to nearest page.
282 * @param pszDesc Description.
283 * @param pGCPtr Where to store the GC address.
284 */
285MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
286{
287 LogFlow(("MMR3HyperMapHCPhys: pvHc=%p HCPhys=%VHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
288
289 /*
290 * Validate input.
291 */
292 AssertReturn(RT_ALIGN_P(pvHC, PAGE_SIZE) == pvHC, VERR_INVALID_PARAMETER);
293 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
294 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
295
296 /*
297 * Add the memory to the hypervisor area.
298 */
299 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
300 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
301 RTGCPTR GCPtr;
302 PMMLOOKUPHYPER pLookup;
303 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
304 if (VBOX_SUCCESS(rc))
305 {
306 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
307 pLookup->u.HCPhys.pvHC = pvHC;
308 pLookup->u.HCPhys.HCPhys = HCPhys;
309
310 /*
311 * Update the page table.
312 */
313 if (pVM->mm.s.fPGMInitialized)
314 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
315 if (VBOX_SUCCESS(rc))
316 *pGCPtr = GCPtr;
317 }
318 return rc;
319}
320
321
322/**
323 * Maps contiguous GC physical memory into the hypervisor region in the GC.
324 *
325 * @return VBox status code.
326 *
327 * @param pVM VM handle.
328 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
329 * @param cb Size of the memory. Will be rounded up to nearest page.
330 * @param pszDesc Mapping description.
331 * @param pGCPtr Where to store the GC address.
332 */
333MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
334{
335 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%VGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
336
337 /*
338 * Validate input.
339 */
340 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
341 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
342
343 /*
344 * Add the memory to the hypervisor area.
345 */
346 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
347 RTGCPTR GCPtr;
348 PMMLOOKUPHYPER pLookup;
349 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
350 if (VBOX_SUCCESS(rc))
351 {
352 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
353 pLookup->u.GCPhys.GCPhys = GCPhys;
354
355 /*
356 * Update the page table.
357 */
358 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
359 {
360 RTHCPHYS HCPhys;
361 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
362 AssertRC(rc);
363 if (VBOX_FAILURE(rc))
364 {
365 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
366 break;
367 }
368 if (pVM->mm.s.fPGMInitialized)
369 {
370 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
371 AssertRC(rc);
372 if (VBOX_FAILURE(rc))
373 {
374 AssertMsgFailed(("rc=%Vrc GCPhys=%VGv off=%#x %s\n", rc, GCPhys, off, pszDesc));
375 break;
376 }
377 }
378 }
379
380 if (VBOX_SUCCESS(rc) && pGCPtr)
381 *pGCPtr = GCPtr;
382 }
383 return rc;
384}
385
386
387/**
388 * Locks and Maps HC virtual memory into the hypervisor region in the GC.
389 *
390 * @return VBox status code.
391 *
392 * @param pVM VM handle.
393 * @param pvHC Host context address of the memory (may be not page aligned).
394 * @param cb Size of the memory. Will be rounded up to nearest page.
395 * @param fFree Set this if MM is responsible for freeing the memory using SUPPageFree.
396 * @param pszDesc Mapping description.
397 * @param pGCPtr Where to store the GC address corresponding to pvHC.
398 */
399MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr)
400{
401 LogFlow(("MMR3HyperMapHCRam: pvHc=%p cb=%d fFree=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvHC, (int)cb, fFree, pszDesc, pszDesc, pGCPtr));
402
403 /*
404 * Validate input.
405 */
406 if ( !pvHC
407 || cb <= 0
408 || !pszDesc
409 || !*pszDesc)
410 {
411 AssertMsgFailed(("Invalid parameter\n"));
412 return VERR_INVALID_PARAMETER;
413 }
414
415 /*
416 * Page align address and size.
417 */
418 void *pvHCPage = (void *)((uintptr_t)pvHC & PAGE_BASE_HC_MASK);
419 cb += (uintptr_t)pvHC & PAGE_OFFSET_MASK;
420 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
421
422 /*
423 * Add the memory to the hypervisor area.
424 */
425 RTGCPTR GCPtr;
426 PMMLOOKUPHYPER pLookup;
427 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
428 if (VBOX_SUCCESS(rc))
429 {
430 /*
431 * Lock the heap memory and tell PGM about the locked pages.
432 */
433 PMMLOCKEDMEM pLockedMem;
434 rc = mmr3LockMem(pVM, pvHCPage, cb, fFree ? MM_LOCKED_TYPE_HYPER : MM_LOCKED_TYPE_HYPER_NOFREE, &pLockedMem, false /* fSilentFailure */);
435 if (VBOX_SUCCESS(rc))
436 {
437 /* map the stuff into guest address space. */
438 if (pVM->mm.s.fPGMInitialized)
439 rc = mmr3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
440 if (VBOX_SUCCESS(rc))
441 {
442 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
443 pLookup->u.Locked.pvHC = pvHC;
444 pLookup->u.Locked.pvR0 = NIL_RTR0PTR;
445 pLookup->u.Locked.pLockedMem = pLockedMem;
446
447 /* done. */
448 GCPtr |= (uintptr_t)pvHC & PAGE_OFFSET_MASK;
449 *pGCPtr = GCPtr;
450 return rc;
451 }
452 /* Don't care about failure clean, we're screwed if this fails anyway. */
453 }
454 }
455
456 return rc;
457}
458
459
460/**
461 * Maps locked R3 virtual memory into the hypervisor region in the GC.
462 *
463 * @return VBox status code.
464 *
465 * @param pVM VM handle.
466 * @param pvR3 The ring-3 address of the memory, must be page aligned.
467 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
468 * @param cPages The number of pages.
469 * @param paPages The page descriptors.
470 * @param pszDesc Mapping description.
471 * @param pGCPtr Where to store the GC address corresponding to pvHC.
472 */
473MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
474{
475 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
476 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
477
478 /*
479 * Validate input.
480 */
481 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
482 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
483 AssertReturn(cPages > 0, VERR_INVALID_PARAMETER);
484 AssertReturn(cPages < 1024, VERR_INVALID_PARAMETER);
485 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
486 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
487 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
488
489 /*
490 * Add the memory to the hypervisor area.
491 */
492 RTGCPTR GCPtr;
493 PMMLOOKUPHYPER pLookup;
494 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
495 if (VBOX_SUCCESS(rc))
496 {
497 /*
498 * Create a locked memory record and tell PGM about this.
499 */
500 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
501 if (pLockedMem)
502 {
503 pLockedMem->pv = pvR3;
504 pLockedMem->cb = cPages << PAGE_SHIFT;
505 pLockedMem->eType = MM_LOCKED_TYPE_HYPER_PAGES;
506 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
507 for (size_t i = 0; i < cPages; i++)
508 {
509 AssertReleaseReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR);
510 pLockedMem->aPhysPages[i].Phys = paPages[i].Phys;
511 pLockedMem->aPhysPages[i].uReserved = (RTHCUINTPTR)pLockedMem;
512 }
513
514 /* map the stuff into guest address space. */
515 if (pVM->mm.s.fPGMInitialized)
516 rc = mmr3MapLocked(pVM, pLockedMem, GCPtr, 0, ~(size_t)0, 0);
517 if (VBOX_SUCCESS(rc))
518 {
519 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
520 pLookup->u.Locked.pvHC = pvR3;
521 pLookup->u.Locked.pvR0 = pvR0;
522 pLookup->u.Locked.pLockedMem = pLockedMem;
523
524 /* done. */
525 *pGCPtr = GCPtr;
526 return rc;
527 }
528 /* Don't care about failure clean, we're screwed if this fails anyway. */
529 }
530 }
531
532 return rc;
533}
534
535
536/**
537 * Reserves a hypervisor memory area.
538 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPTR.
539 *
540 * @return VBox status code.
541 *
542 * @param pVM VM handle.
543 * @param cb Size of the memory. Will be rounded up to nearest page.
544 * @param pszDesc Mapping description.
545 * @param pGCPtr Where to store the assigned GC address. Optional.
546 */
547MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
548{
549 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
550
551 /*
552 * Validate input.
553 */
554 if ( cb <= 0
555 || !pszDesc
556 || !*pszDesc)
557 {
558 AssertMsgFailed(("Invalid parameter\n"));
559 return VERR_INVALID_PARAMETER;
560 }
561
562 /*
563 * Add the memory to the hypervisor area.
564 */
565 RTGCPTR GCPtr;
566 PMMLOOKUPHYPER pLookup;
567 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
568 if (VBOX_SUCCESS(rc))
569 {
570 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
571 if (pGCPtr)
572 *pGCPtr = GCPtr;
573 return VINF_SUCCESS;
574 }
575 return rc;
576}
577
578
579/**
580 * Adds memory to the hypervisor memory arena.
581 *
582 * @return VBox status code.
583 * @param pVM The VM handle.
584 * @param cb Size of the memory. Will be rounded up to neares page.
585 * @param pszDesc The description of the memory.
586 * @param pGCPtr Where to store the GC address.
587 * @param ppLookup Where to store the pointer to the lookup record.
588 * @remark We assume the threading structure of VBox imposes natural
589 * serialization of most functions, this one included.
590 */
591static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
592{
593 /*
594 * Validate input.
595 */
596 const uint32_t cbAligned = RT_ALIGN(cb, PAGE_SIZE);
597 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
598 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
599 {
600 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x\n",
601 pVM->mm.s.offHyperNextStatic, cbAligned));
602 return VERR_NO_MEMORY;
603 }
604
605 /*
606 * Allocate lookup record.
607 */
608 PMMLOOKUPHYPER pLookup;
609 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
610 if (VBOX_SUCCESS(rc))
611 {
612 /*
613 * Initialize it and insert it.
614 */
615 pLookup->offNext = pVM->mm.s.offLookupHyper;
616 pLookup->cb = cbAligned;
617 pLookup->off = pVM->mm.s.offHyperNextStatic;
618 pVM->mm.s.offLookupHyper = (char *)pLookup - (char *)pVM->mm.s.pHyperHeapHC;
619 if (pLookup->offNext != (int32_t)NIL_OFFSET)
620 pLookup->offNext -= pVM->mm.s.offLookupHyper;
621 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
622 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
623 pLookup->pszDesc = pszDesc;
624
625 /* Mapping. */
626 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
627 pVM->mm.s.offHyperNextStatic += cbAligned;
628
629 /* Return pointer. */
630 *ppLookup = pLookup;
631 }
632
633 AssertRC(rc);
634 LogFlow(("mmR3HyperMap: returns %Vrc *pGCPtr=%VGv\n", rc, *pGCPtr));
635 return rc;
636}
637
638
639/**
640 * Allocates a new heap.
641 *
642 * @returns VBox status code.
643 * @param pVM The VM handle.
644 * @param cb The size of the new heap.
645 * @param ppHeap Where to store the heap pointer on successful return.
646 */
647static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap)
648{
649 /*
650 * Allocate the hypervisor heap.
651 */
652 const uint32_t cbAligned = RT_ALIGN_Z(cb, PAGE_SIZE);
653 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
654 void *pv;
655 int rc = SUPPageAlloc(cbAligned >> PAGE_SHIFT, &pv);
656 if (VBOX_SUCCESS(rc))
657 {
658 /*
659 * Initialize the heap and first free chunk.
660 */
661 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
662 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
663 pHeap->pVMHC = pVM;
664 pHeap->pVMGC = pVM->pVMGC;
665 pHeap->pbHeapHC = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
666 //pHeap->pbHeapGC = 0; // set by mmR3HyperHeapMap()
667 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
668 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
669 //pHeap->offFreeHead = 0;
670 //pHeap->offFreeTail = 0;
671 pHeap->offPageAligned = pHeap->cbHeap;
672 //pHeap->HyperHeapStatTree = 0;
673
674 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapHC;
675 pFree->cb = pHeap->cbFree;
676 //pFree->core.offNext = 0;
677 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
678 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
679 //pFree->offNext = 0;
680 //pFree->offPrev = 0;
681
682 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
683 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
684
685 *ppHeap = pHeap;
686 return VINF_SUCCESS;
687 }
688 AssertMsgFailed(("SUPPageAlloc(%d,) -> %Vrc\n", cbAligned >> PAGE_SHIFT, rc));
689
690 *ppHeap = NULL;
691 return rc;
692}
693
694
695/**
696 * Allocates a new heap.
697 */
698static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
699{
700 int rc = MMR3HyperMapHCRam(pVM, pHeap, pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, true, "Heap", ppHeapGC);
701 if (VBOX_SUCCESS(rc))
702 {
703 pHeap->pVMGC = pVM->pVMGC;
704 pHeap->pbHeapGC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
705 /* Reserve a page for fencing. */
706 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
707 }
708 return rc;
709}
710
711
712#if 0
713/**
714 * Destroys a heap.
715 */
716static int mmR3HyperHeapDestroy(PVM pVM, PMMHYPERHEAP pHeap)
717{
718 /* all this is dealt with when unlocking and freeing locked memory. */
719}
720#endif
721
722
723/**
724 * Allocates memory in the Hypervisor (GC VMM) area which never will
725 * be freed and doesn't have any offset based relation to other heap blocks.
726 *
727 * The latter means that two blocks allocated by this API will not have the
728 * same relative position to each other in GC and HC. In short, never use
729 * this API for allocating nodes for an offset based AVL tree!
730 *
731 * The returned memory is of course zeroed.
732 *
733 * @returns VBox status code.
734 * @param pVM The VM to operate on.
735 * @param cb Number of bytes to allocate.
736 * @param uAlignment Required memory alignment in bytes.
737 * Values are 0,8,16,32 and PAGE_SIZE.
738 * 0 -> default alignment, i.e. 8 bytes.
739 * @param enmTag The statistics tag.
740 * @param ppv Where to store the address to the allocated
741 * memory.
742 * @remark This is assumed not to be used at times when serialization is required.
743 */
744MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
745{
746 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
747 AssertMsg(cb <= _4M, ("Allocating more than 4MB!? (cb=%#x) HMA limit might need adjusting if you allocate more.\n", cb));
748
749 /*
750 * Choose between allocating a new chunk of HMA memory
751 * and the heap. We will only do BIG allocations from HMA.
752 */
753 if ( cb < _64K
754 && ( uAlignment != PAGE_SIZE
755 || cb < 48*_1K))
756 {
757 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
758 if ( rc != VERR_MM_HYPER_NO_MEMORY
759 || cb <= 8*_1K)
760 {
761 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
762 cb, uAlignment, rc, *ppv));
763 return rc;
764 }
765 }
766
767 /*
768 * Validate alignment.
769 */
770 switch (uAlignment)
771 {
772 case 0:
773 case 8:
774 case 16:
775 case 32:
776 case PAGE_SIZE:
777 break;
778 default:
779 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
780 return VERR_INVALID_PARAMETER;
781 }
782
783 /*
784 * Allocate the pages and the HMA space.
785 */
786 cb = RT_ALIGN(cb, PAGE_SIZE);
787 void *pvPages;
788 int rc = SUPPageAlloc(cb >> PAGE_SHIFT, &pvPages);
789 if (VBOX_SUCCESS(rc))
790 {
791 RTGCPTR GCPtr;
792 rc = MMR3HyperMapHCRam(pVM, pvPages, cb, true, mmR3GetTagName(enmTag), &GCPtr);
793 if (VBOX_SUCCESS(rc))
794 {
795 *ppv = pvPages;
796 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
797 cb, uAlignment, *ppv));
798 return rc;
799 }
800 SUPPageFree(pvPages, cb >> PAGE_SHIFT);
801 }
802 if (rc == VERR_NO_MEMORY)
803 rc = VERR_MM_HYPER_NO_MEMORY;
804 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
805 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
806 return rc;
807}
808
809
810/**
811 * Convert hypervisor HC virtual address to HC physical address.
812 *
813 * @returns HC physical address.
814 * @param pVM VM Handle
815 * @param pvHC Host context physical address.
816 */
817MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC)
818{
819 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
820 for (;;)
821 {
822 switch (pLookup->enmType)
823 {
824 case MMLOOKUPHYPERTYPE_LOCKED:
825 {
826 unsigned off = (char *)pvHC - (char *)pLookup->u.Locked.pvHC;
827 if (off < pLookup->cb)
828 return (pLookup->u.Locked.pLockedMem->aPhysPages[off >> PAGE_SHIFT].Phys & X86_PTE_PAE_PG_MASK) | (off & PAGE_OFFSET_MASK);
829 break;
830 }
831
832 case MMLOOKUPHYPERTYPE_HCPHYS:
833 {
834 unsigned off = (char *)pvHC - (char *)pLookup->u.HCPhys.pvHC;
835 if (off < pLookup->cb)
836 return pLookup->u.HCPhys.HCPhys + off;
837 break;
838 }
839
840 case MMLOOKUPHYPERTYPE_GCPHYS:
841 case MMLOOKUPHYPERTYPE_DYNAMIC:
842 /* can convert these kind of records. */
843 break;
844
845 default:
846 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
847 break;
848 }
849
850 /* next */
851 if ((unsigned)pLookup->offNext == NIL_OFFSET)
852 break;
853 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
854 }
855
856 AssertMsgFailed(("pvHC=%p is not inside the hypervisor memory area!\n", pvHC));
857 return NIL_RTHCPHYS;
858}
859
860
861#if 0 /* unused, not implemented */
862/**
863 * Convert hypervisor HC physical address to HC virtual address.
864 *
865 * @returns HC virtual address.
866 * @param pVM VM Handle
867 * @param HCPhys Host context physical address.
868 */
869MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
870{
871 void *pv;
872 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
873 if (VBOX_SUCCESS(rc))
874 return pv;
875 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
876 return NULL;
877}
878
879
880/**
881 * Convert hypervisor HC physical address to HC virtual address.
882 *
883 * @returns VBox status.
884 * @param pVM VM Handle
885 * @param HCPhys Host context physical address.
886 * @param ppv Where to store the HC virtual address.
887 */
888MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
889{
890 /*
891 * Linear search.
892 */
893 /** @todo implement when actually used. */
894 return VERR_INVALID_POINTER;
895}
896#endif /* unused, not implemented */
897
898
899/**
900 * Read hypervisor memory from GC virtual address.
901 *
902 * @returns VBox status.
903 * @param pVM VM handle.
904 * @param pvDst Destination address (HC of course).
905 * @param GCPtr GC virtual address.
906 * @param cb Number of bytes to read.
907 */
908MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
909{
910 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
911 return VERR_INVALID_PARAMETER;
912 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
913}
914
915
916/**
917 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
918 *
919 * @param pVM The VM handle.
920 * @param pHlp Callback functions for doing output.
921 * @param pszArgs Argument string. Optional and specific to the handler.
922 */
923static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
924{
925 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %VGv, 0x%08x bytes\n",
926 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
927
928 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((char*)pVM->mm.s.pHyperHeapHC + pVM->mm.s.offLookupHyper);
929 for (;;)
930 {
931 switch (pLookup->enmType)
932 {
933 case MMLOOKUPHYPERTYPE_LOCKED:
934 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv LOCKED %-*s %s\n",
935 pLookup->off + pVM->mm.s.pvHyperAreaGC,
936 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
937 pLookup->u.Locked.pvHC,
938 sizeof(RTHCPTR) * 2,
939 pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_NOFREE ? "nofree"
940 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER ? "autofree"
941 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_HYPER_PAGES ? "pages"
942 : pLookup->u.Locked.pLockedMem->eType == MM_LOCKED_TYPE_PHYS ? "gstphys"
943 : "??",
944 pLookup->pszDesc);
945 break;
946
947 case MMLOOKUPHYPERTYPE_HCPHYS:
948 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %VHv HCPHYS %VHp %s\n",
949 pLookup->off + pVM->mm.s.pvHyperAreaGC,
950 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
951 pLookup->u.HCPhys.pvHC, pLookup->u.HCPhys.HCPhys,
952 pLookup->pszDesc);
953 break;
954
955 case MMLOOKUPHYPERTYPE_GCPHYS:
956 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s GCPHYS %VGp%*s %s\n",
957 pLookup->off + pVM->mm.s.pvHyperAreaGC,
958 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
959 sizeof(RTHCPTR) * 2, "",
960 pLookup->u.GCPhys.GCPhys, RT_ABS(sizeof(RTHCPHYS) - sizeof(RTGCPHYS)) * 2, "",
961 pLookup->pszDesc);
962 break;
963
964 case MMLOOKUPHYPERTYPE_DYNAMIC:
965 pHlp->pfnPrintf(pHlp, "%VGv-%VGv %*s DYNAMIC %*s %s\n",
966 pLookup->off + pVM->mm.s.pvHyperAreaGC,
967 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
968 sizeof(RTHCPTR) * 2, "",
969 sizeof(RTHCPTR) * 2, "",
970 pLookup->pszDesc);
971 break;
972
973 default:
974 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
975 break;
976 }
977
978 /* next */
979 if ((unsigned)pLookup->offNext == NIL_OFFSET)
980 break;
981 pLookup = (PMMLOOKUPHYPER)((char *)pLookup + pLookup->offNext);
982 }
983}
984
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette