VirtualBox

source: vbox/trunk/src/VBox/VMM/MMHyper.cpp@ 32963

Last change on this file since 32963 was 32910, checked in by vboxsync, 14 years ago

VMM: hyper heap size selection

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 49.3 KB
Line 
1/* $Id: MMHyper.cpp 32910 2010-10-05 12:33:56Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER
23#include <VBox/pgm.h>
24#include <VBox/mm.h>
25#include <VBox/dbgf.h>
26#include "MMInternal.h"
27#include <VBox/vm.h>
28#include <VBox/err.h>
29#include <VBox/param.h>
30#include <VBox/log.h>
31#include <include/internal/pgm.h>
32#include <iprt/alloc.h>
33#include <iprt/assert.h>
34#include <iprt/string.h>
35
36
37/*******************************************************************************
38* Internal Functions *
39*******************************************************************************/
40static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser);
41static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
42static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
43static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
44static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
45
46
47DECLINLINE(uint32_t) computeHyperHeapSize(bool fCanUseLargerHeap, uint32_t cCpus, bool fHwVirtExtForced)
48{
49 if (cCpus > 1)
50 return _2M + cCpus * _64K;
51
52 if (fCanUseLargerHeap)
53 return 1280*_1K;
54 else
55 /* Size must be kept like this for saved state compatibility */
56 return fHwVirtExtForced ? 640*_1K : 1280*_1K;
57}
58
59
60/**
61 * Initializes the hypvervisor related MM stuff without
62 * calling down to PGM.
63 *
64 * PGM is not initialized at this point, PGM relies on
65 * the heap to initialize.
66 *
67 * @returns VBox status.
68 */
69int mmR3HyperInit(PVM pVM)
70{
71 LogFlow(("mmR3HyperInit:\n"));
72
73 /*
74 * Decide Hypervisor mapping in the guest context
75 * And setup various hypervisor area and heap parameters.
76 */
77 pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
78 pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
79 AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
80 Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
81
82 /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
83 * depending on whether VT-x/AMD-V is enabled or not! Don't waste
84 * precious kernel space on heap for the PATM.
85 */
86 PCFGMNODE pMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
87 bool fCanUseLargerHeap = false;
88 int rc = CFGMR3QueryBoolDef(pMM, "CanUseLargerHeap", &fCanUseLargerHeap, false);
89 uint32_t cbHyperHeap = computeHyperHeapSize(fCanUseLargerHeap, pVM->cCpus, VMMIsHwVirtExtForced(pVM));
90 rc = CFGMR3QueryU32Def(pMM, "cbHyperHeap", &cbHyperHeap, cbHyperHeap);
91 AssertLogRelRCReturn(rc, rc);
92
93 cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
94 LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
95
96 /*
97 * Allocate the hypervisor heap.
98 *
99 * (This must be done before we start adding memory to the
100 * hypervisor static area because lookup records are allocated from it.)
101 */
102 rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
103 if (RT_SUCCESS(rc))
104 {
105 /*
106 * Make a small head fence to fend of accidental sequential access.
107 */
108 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
109
110 /*
111 * Map the VM structure into the hypervisor space.
112 */
113 AssertRelease(pVM->cbSelf == RT_UOFFSETOF(VM, aCpus[pVM->cCpus]));
114 RTGCPTR GCPtr;
115 rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0, RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
116 if (RT_SUCCESS(rc))
117 {
118 pVM->pVMRC = (RTRCPTR)GCPtr;
119 for (VMCPUID i = 0; i < pVM->cCpus; i++)
120 pVM->aCpus[i].pVMRC = pVM->pVMRC;
121
122 /* Reserve a page for fencing. */
123 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
124
125 /*
126 * Map the heap into the hypervisor space.
127 */
128 rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
129 if (RT_SUCCESS(rc))
130 {
131 pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
132 Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
133
134 /*
135 * Register info handlers.
136 */
137 DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
138
139 LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
140 return VINF_SUCCESS;
141 }
142 /* Caller will do proper cleanup. */
143 }
144 }
145
146 LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
147 return rc;
148}
149
150
151/**
152 * Cleans up the hypervisor heap.
153 *
154 * @returns VBox status.
155 */
156int mmR3HyperTerm(PVM pVM)
157{
158 if (pVM->mm.s.pHyperHeapR3)
159 PDMR3CritSectDelete(&pVM->mm.s.pHyperHeapR3->Lock);
160
161 return VINF_SUCCESS;
162}
163
164
165/**
166 * Finalizes the HMA mapping.
167 *
168 * This is called later during init, most (all) HMA allocations should be done
169 * by the time this function is called.
170 *
171 * @returns VBox status.
172 */
173VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
174{
175 LogFlow(("MMR3HyperInitFinalize:\n"));
176
177 /*
178 * Initialize the hyper heap critical section.
179 */
180 int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, RT_SRC_POS, "MM-HYPER");
181 AssertRC(rc);
182
183 /*
184 * Adjust and create the HMA mapping.
185 */
186 while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
187 pVM->mm.s.cbHyperArea -= _4M;
188 rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
189 mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
190 if (RT_FAILURE(rc))
191 return rc;
192 pVM->mm.s.fPGMInitialized = true;
193
194 /*
195 * Do all the delayed mappings.
196 */
197 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
198 for (;;)
199 {
200 RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
201 uint32_t cPages = pLookup->cb >> PAGE_SHIFT;
202 switch (pLookup->enmType)
203 {
204 case MMLOOKUPHYPERTYPE_LOCKED:
205 {
206 PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;
207 for (uint32_t i = 0; i < cPages; i++)
208 {
209 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
210 AssertRCReturn(rc, rc);
211 }
212 break;
213 }
214
215 case MMLOOKUPHYPERTYPE_HCPHYS:
216 rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
217 break;
218
219 case MMLOOKUPHYPERTYPE_GCPHYS:
220 {
221 const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
222 const uint32_t cb = pLookup->cb;
223 for (uint32_t off = 0; off < cb; off += PAGE_SIZE)
224 {
225 RTHCPHYS HCPhys;
226 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
227 if (RT_FAILURE(rc))
228 break;
229 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
230 if (RT_FAILURE(rc))
231 break;
232 }
233 break;
234 }
235
236 case MMLOOKUPHYPERTYPE_MMIO2:
237 {
238 const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
239 for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
240 {
241 RTHCPHYS HCPhys;
242 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
243 if (RT_FAILURE(rc))
244 break;
245 rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
246 if (RT_FAILURE(rc))
247 break;
248 }
249 break;
250 }
251
252 case MMLOOKUPHYPERTYPE_DYNAMIC:
253 /* do nothing here since these are either fences or managed by someone else using PGM. */
254 break;
255
256 default:
257 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
258 break;
259 }
260
261 if (RT_FAILURE(rc))
262 {
263 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
264 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
265 return rc;
266 }
267
268 /* next */
269 if (pLookup->offNext == (int32_t)NIL_OFFSET)
270 break;
271 pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
272 }
273
274 LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Callback function which will be called when PGM is trying to find
281 * a new location for the mapping.
282 *
283 * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
284 * In 1) the callback should say if it objects to a suggested new location. If it
285 * accepts the new location, it is called again for doing it's relocation.
286 *
287 *
288 * @returns true if the location is ok.
289 * @returns false if another location should be found.
290 * @param pVM The VM handle.
291 * @param GCPtrOld The old virtual address.
292 * @param GCPtrNew The new virtual address.
293 * @param enmMode Used to indicate the callback mode.
294 * @param pvUser User argument. Ignored.
295 * @remark The return value is no a failure indicator, it's an acceptance
296 * indicator. Relocation can not fail!
297 */
298static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode, void *pvUser)
299{
300 switch (enmMode)
301 {
302 /*
303 * Verify location - all locations are good for us.
304 */
305 case PGMRELOCATECALL_SUGGEST:
306 return true;
307
308 /*
309 * Execute the relocation.
310 */
311 case PGMRELOCATECALL_RELOCATE:
312 {
313 /*
314 * Accepted!
315 */
316 AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC, ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
317 Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
318
319 /*
320 * Relocate the VM structure and ourselves.
321 */
322 RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
323 pVM->pVMRC += offDelta;
324 for (VMCPUID i = 0; i < pVM->cCpus; i++)
325 pVM->aCpus[i].pVMRC = pVM->pVMRC;
326
327 pVM->mm.s.pvHyperAreaGC += offDelta;
328 Assert(pVM->mm.s.pvHyperAreaGC < _4G);
329 pVM->mm.s.pHyperHeapRC += offDelta;
330 pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;
331 pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;
332
333 /*
334 * Relocate the rest.
335 */
336 VMR3Relocate(pVM, offDelta);
337 return true;
338 }
339
340 default:
341 AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
342 }
343
344 return false;
345}
346
347/**
348 * Service a VMMCALLRING3_MMHYPER_LOCK call.
349 *
350 * @returns VBox status code.
351 * @param pVM The VM handle.
352 */
353VMMR3DECL(int) MMR3LockCall(PVM pVM)
354{
355 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
356
357 int rc = PDMR3CritSectEnterEx(&pHeap->Lock, true /* fHostCall */);
358 AssertRC(rc);
359 return rc;
360}
361
362/**
363 * Maps contiguous HC physical memory into the hypervisor region in the GC.
364 *
365 * @return VBox status code.
366 *
367 * @param pVM VM handle.
368 * @param pvR3 Ring-3 address of the memory. Must be page aligned!
369 * @param pvR0 Optional ring-0 address of the memory.
370 * @param HCPhys Host context physical address of the memory to be
371 * mapped. Must be page aligned!
372 * @param cb Size of the memory. Will be rounded up to nearest page.
373 * @param pszDesc Description.
374 * @param pGCPtr Where to store the GC address.
375 */
376VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
377{
378 LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
379
380 /*
381 * Validate input.
382 */
383 AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
384 AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
385 AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
386 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
387
388 /*
389 * Add the memory to the hypervisor area.
390 */
391 uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
392 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
393 RTGCPTR GCPtr;
394 PMMLOOKUPHYPER pLookup;
395 int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
396 if (RT_SUCCESS(rc))
397 {
398 pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
399 pLookup->u.HCPhys.pvR3 = pvR3;
400 pLookup->u.HCPhys.pvR0 = pvR0;
401 pLookup->u.HCPhys.HCPhys = HCPhys;
402
403 /*
404 * Update the page table.
405 */
406 if (pVM->mm.s.fPGMInitialized)
407 rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
408 if (RT_SUCCESS(rc))
409 *pGCPtr = GCPtr;
410 }
411 return rc;
412}
413
414
415/**
416 * Maps contiguous GC physical memory into the hypervisor region in the GC.
417 *
418 * @return VBox status code.
419 *
420 * @param pVM VM handle.
421 * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
422 * @param cb Size of the memory. Will be rounded up to nearest page.
423 * @param pszDesc Mapping description.
424 * @param pGCPtr Where to store the GC address.
425 */
426VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
427{
428 LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
429
430 /*
431 * Validate input.
432 */
433 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
434 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
435
436 /*
437 * Add the memory to the hypervisor area.
438 */
439 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
440 RTGCPTR GCPtr;
441 PMMLOOKUPHYPER pLookup;
442 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
443 if (RT_SUCCESS(rc))
444 {
445 pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
446 pLookup->u.GCPhys.GCPhys = GCPhys;
447
448 /*
449 * Update the page table.
450 */
451 for (unsigned off = 0; off < cb; off += PAGE_SIZE)
452 {
453 RTHCPHYS HCPhys;
454 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
455 AssertRC(rc);
456 if (RT_FAILURE(rc))
457 {
458 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
459 break;
460 }
461 if (pVM->mm.s.fPGMInitialized)
462 {
463 rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
464 AssertRC(rc);
465 if (RT_FAILURE(rc))
466 {
467 AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
468 break;
469 }
470 }
471 }
472
473 if (RT_SUCCESS(rc) && pGCPtr)
474 *pGCPtr = GCPtr;
475 }
476 return rc;
477}
478
479
480/**
481 * Maps a portion of an MMIO2 region into the hypervisor region.
482 *
483 * Callers of this API must never deregister the MMIO2 region before the
484 * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
485 * API will be needed to perform cleanups.
486 *
487 * @return VBox status code.
488 *
489 * @param pVM Pointer to the shared VM structure.
490 * @param pDevIns The device owning the MMIO2 memory.
491 * @param iRegion The region.
492 * @param off The offset into the region. Will be rounded down to closest page boundrary.
493 * @param cb The number of bytes to map. Will be rounded up to the closest page boundrary.
494 * @param pszDesc Mapping description.
495 * @param pRCPtr Where to store the RC address.
496 */
497VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
498 const char *pszDesc, PRTRCPTR pRCPtr)
499{
500 LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
501 pDevIns, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
502 int rc;
503
504 /*
505 * Validate input.
506 */
507 AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
508 AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
509 uint32_t const offPage = off & PAGE_OFFSET_MASK;
510 off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
511 cb += offPage;
512 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
513 const RTGCPHYS offEnd = off + cb;
514 AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
515 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
516 {
517 RTHCPHYS HCPhys;
518 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
519 AssertMsgRCReturn(rc, ("rc=%Rrc - iRegion=%d off=%RGp\n", rc, iRegion, off), rc);
520 }
521
522 /*
523 * Add the memory to the hypervisor area.
524 */
525 RTGCPTR GCPtr;
526 PMMLOOKUPHYPER pLookup;
527 rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
528 if (RT_SUCCESS(rc))
529 {
530 pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
531 pLookup->u.MMIO2.pDevIns = pDevIns;
532 pLookup->u.MMIO2.iRegion = iRegion;
533 pLookup->u.MMIO2.off = off;
534
535 /*
536 * Update the page table.
537 */
538 if (pVM->mm.s.fPGMInitialized)
539 {
540 for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
541 {
542 RTHCPHYS HCPhys;
543 rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iRegion, offCur, &HCPhys);
544 AssertRCReturn(rc, VERR_INTERNAL_ERROR);
545 rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
546 if (RT_FAILURE(rc))
547 {
548 AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
549 break;
550 }
551 }
552 }
553
554 if (RT_SUCCESS(rc))
555 {
556 GCPtr |= offPage;
557 *pRCPtr = GCPtr;
558 AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
559 }
560 }
561 return rc;
562}
563
564
565/**
566 * Maps locked R3 virtual memory into the hypervisor region in the GC.
567 *
568 * @return VBox status code.
569 *
570 * @param pVM VM handle.
571 * @param pvR3 The ring-3 address of the memory, must be page aligned.
572 * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
573 * @param cPages The number of pages.
574 * @param paPages The page descriptors.
575 * @param pszDesc Mapping description.
576 * @param pGCPtr Where to store the GC address corresponding to pvR3.
577 */
578VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr)
579{
580 LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
581 pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
582
583 /*
584 * Validate input.
585 */
586 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
587 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
588 AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
589 AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
590 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
591 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
592 AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
593
594 /*
595 * Add the memory to the hypervisor area.
596 */
597 RTGCPTR GCPtr;
598 PMMLOOKUPHYPER pLookup;
599 int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
600 if (RT_SUCCESS(rc))
601 {
602 /*
603 * Copy the physical page addresses and tell PGM about them.
604 */
605 PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
606 if (paHCPhysPages)
607 {
608 for (size_t i = 0; i < cPages; i++)
609 {
610 AssertReleaseMsgReturn(paPages[i].Phys != 0 && paPages[i].Phys != NIL_RTHCPHYS && !(paPages[i].Phys & PAGE_OFFSET_MASK),
611 ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
612 VERR_INTERNAL_ERROR);
613 paHCPhysPages[i] = paPages[i].Phys;
614 }
615
616 if (pVM->mm.s.fPGMInitialized)
617 {
618 for (size_t i = 0; i < cPages; i++)
619 {
620 rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
621 AssertRCBreak(rc);
622 }
623 }
624 if (RT_SUCCESS(rc))
625 {
626 pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
627 pLookup->u.Locked.pvR3 = pvR3;
628 pLookup->u.Locked.pvR0 = pvR0;
629 pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
630
631 /* done. */
632 *pGCPtr = GCPtr;
633 return rc;
634 }
635 /* Don't care about failure clean, we're screwed if this fails anyway. */
636 }
637 }
638
639 return rc;
640}
641
642
643/**
644 * Reserves a hypervisor memory area.
645 * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
646 *
647 * @return VBox status code.
648 *
649 * @param pVM VM handle.
650 * @param cb Size of the memory. Will be rounded up to nearest page.
651 * @param pszDesc Mapping description.
652 * @param pGCPtr Where to store the assigned GC address. Optional.
653 */
654VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
655{
656 LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
657
658 /*
659 * Validate input.
660 */
661 if ( cb <= 0
662 || !pszDesc
663 || !*pszDesc)
664 {
665 AssertMsgFailed(("Invalid parameter\n"));
666 return VERR_INVALID_PARAMETER;
667 }
668
669 /*
670 * Add the memory to the hypervisor area.
671 */
672 RTGCPTR GCPtr;
673 PMMLOOKUPHYPER pLookup;
674 int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
675 if (RT_SUCCESS(rc))
676 {
677 pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
678 if (pGCPtr)
679 *pGCPtr = GCPtr;
680 return VINF_SUCCESS;
681 }
682 return rc;
683}
684
685
686/**
687 * Adds memory to the hypervisor memory arena.
688 *
689 * @return VBox status code.
690 * @param pVM The VM handle.
691 * @param cb Size of the memory. Will be rounded up to neares page.
692 * @param pszDesc The description of the memory.
693 * @param pGCPtr Where to store the GC address.
694 * @param ppLookup Where to store the pointer to the lookup record.
695 * @remark We assume the threading structure of VBox imposes natural
696 * serialization of most functions, this one included.
697 */
698static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
699{
700 /*
701 * Validate input.
702 */
703 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
704 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
705 if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
706 {
707 AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
708 pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
709 return VERR_NO_MEMORY;
710 }
711
712 /*
713 * Allocate lookup record.
714 */
715 PMMLOOKUPHYPER pLookup;
716 int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
717 if (RT_SUCCESS(rc))
718 {
719 /*
720 * Initialize it and insert it.
721 */
722 pLookup->offNext = pVM->mm.s.offLookupHyper;
723 pLookup->cb = cbAligned;
724 pLookup->off = pVM->mm.s.offHyperNextStatic;
725 pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
726 if (pLookup->offNext != (int32_t)NIL_OFFSET)
727 pLookup->offNext -= pVM->mm.s.offLookupHyper;
728 pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
729 memset(&pLookup->u, 0xff, sizeof(pLookup->u));
730 pLookup->pszDesc = pszDesc;
731
732 /* Mapping. */
733 *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
734 pVM->mm.s.offHyperNextStatic += cbAligned;
735
736 /* Return pointer. */
737 *ppLookup = pLookup;
738 }
739
740 AssertRC(rc);
741 LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
742 return rc;
743}
744
745
746/**
747 * Allocates a new heap.
748 *
749 * @returns VBox status code.
750 * @param pVM The VM handle.
751 * @param cb The size of the new heap.
752 * @param ppHeap Where to store the heap pointer on successful return.
753 * @param pR0PtrHeap Where to store the ring-0 address of the heap on
754 * success.
755 */
756static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
757{
758 /*
759 * Allocate the hypervisor heap.
760 */
761 const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
762 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
763 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
764 PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
765 if (!paPages)
766 return VERR_NO_MEMORY;
767 void *pv;
768 RTR0PTR pvR0 = NIL_RTR0PTR;
769 int rc = SUPR3PageAllocEx(cPages,
770 0 /*fFlags*/,
771 &pv,
772#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
773 VMMIsHwVirtExtForced(pVM) ? &pvR0 : NULL,
774#else
775 NULL,
776#endif
777 paPages);
778 if (RT_SUCCESS(rc))
779 {
780#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
781 if (!VMMIsHwVirtExtForced(pVM))
782 pvR0 = NIL_RTR0PTR;
783#else
784 pvR0 = (uintptr_t)pv;
785#endif
786 memset(pv, 0, cbAligned);
787
788 /*
789 * Initialize the heap and first free chunk.
790 */
791 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
792 pHeap->u32Magic = MMHYPERHEAP_MAGIC;
793 pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
794 pHeap->pbHeapR0 = pvR0 != NIL_RTR0PTR ? pvR0 + MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR;
795 //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
796 pHeap->pVMR3 = pVM;
797 pHeap->pVMR0 = pVM->pVMR0;
798 pHeap->pVMRC = pVM->pVMRC;
799 pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
800 pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
801 //pHeap->offFreeHead = 0;
802 //pHeap->offFreeTail = 0;
803 pHeap->offPageAligned = pHeap->cbHeap;
804 //pHeap->HyperHeapStatTree = 0;
805 pHeap->paPages = paPages;
806
807 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
808 pFree->cb = pHeap->cbFree;
809 //pFree->core.offNext = 0;
810 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
811 pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
812 //pFree->offNext = 0;
813 //pFree->offPrev = 0;
814
815 STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
816 STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
817
818 *ppHeap = pHeap;
819 *pR0PtrHeap = pvR0;
820 return VINF_SUCCESS;
821 }
822 AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
823
824 *ppHeap = NULL;
825 return rc;
826}
827
828/**
829 * Allocates a new heap.
830 */
831static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
832{
833 Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
834 Assert(pHeap->paPages);
835 int rc = MMR3HyperMapPages(pVM,
836 pHeap,
837 pHeap->pbHeapR0 != NIL_RTR0PTR ? pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE : NIL_RTR0PTR,
838 (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
839 pHeap->paPages,
840 "Heap", ppHeapGC);
841 if (RT_SUCCESS(rc))
842 {
843 pHeap->pVMRC = pVM->pVMRC;
844 pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
845 /* Reserve a page for fencing. */
846 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
847
848 /* We won't need these any more. */
849 MMR3HeapFree(pHeap->paPages);
850 pHeap->paPages = NULL;
851 }
852 return rc;
853}
854
855
856/**
857 * Allocates memory in the Hypervisor (GC VMM) area which never will
858 * be freed and doesn't have any offset based relation to other heap blocks.
859 *
860 * The latter means that two blocks allocated by this API will not have the
861 * same relative position to each other in GC and HC. In short, never use
862 * this API for allocating nodes for an offset based AVL tree!
863 *
864 * The returned memory is of course zeroed.
865 *
866 * @returns VBox status code.
867 * @param pVM The VM to operate on.
868 * @param cb Number of bytes to allocate.
869 * @param uAlignment Required memory alignment in bytes.
870 * Values are 0,8,16,32 and PAGE_SIZE.
871 * 0 -> default alignment, i.e. 8 bytes.
872 * @param enmTag The statistics tag.
873 * @param ppv Where to store the address to the allocated
874 * memory.
875 * @remark This is assumed not to be used at times when serialization is required.
876 */
877VMMR3DECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
878{
879 return MMR3HyperAllocOnceNoRelEx(pVM, cb, uAlignment, enmTag, 0/*fFlags*/, ppv);
880}
881
882
883/**
884 * Allocates memory in the Hypervisor (GC VMM) area which never will
885 * be freed and doesn't have any offset based relation to other heap blocks.
886 *
887 * The latter means that two blocks allocated by this API will not have the
888 * same relative position to each other in GC and HC. In short, never use
889 * this API for allocating nodes for an offset based AVL tree!
890 *
891 * The returned memory is of course zeroed.
892 *
893 * @returns VBox status code.
894 * @param pVM The VM to operate on.
895 * @param cb Number of bytes to allocate.
896 * @param uAlignment Required memory alignment in bytes.
897 * Values are 0,8,16,32 and PAGE_SIZE.
898 * 0 -> default alignment, i.e. 8 bytes.
899 * @param enmTag The statistics tag.
900 * @param fFlags Flags, see MMHYPER_AONR_FLAGS_KERNEL_MAPPING.
901 * @param ppv Where to store the address to the allocated memory.
902 * @remark This is assumed not to be used at times when serialization is required.
903 */
904VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, uint32_t fFlags, void **ppv)
905{
906 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
907 Assert(!(fFlags & ~(MMHYPER_AONR_FLAGS_KERNEL_MAPPING)));
908
909 /*
910 * Choose between allocating a new chunk of HMA memory
911 * and the heap. We will only do BIG allocations from HMA and
912 * only at creation time.
913 */
914 if ( ( cb < _64K
915 && ( uAlignment != PAGE_SIZE
916 || cb < 48*_1K)
917 && !(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING)
918 )
919 || VMR3GetState(pVM) != VMSTATE_CREATING
920 )
921 {
922 Assert(!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING));
923 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
924 if ( rc != VERR_MM_HYPER_NO_MEMORY
925 || cb <= 8*_1K)
926 {
927 Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
928 cb, uAlignment, rc, *ppv));
929 return rc;
930 }
931 }
932
933#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
934 /*
935 * Set MMHYPER_AONR_FLAGS_KERNEL_MAPPING if we're in going to execute in ring-0.
936 */
937 if (VMMIsHwVirtExtForced(pVM))
938 fFlags |= MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
939#endif
940
941 /*
942 * Validate alignment.
943 */
944 switch (uAlignment)
945 {
946 case 0:
947 case 8:
948 case 16:
949 case 32:
950 case PAGE_SIZE:
951 break;
952 default:
953 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
954 return VERR_INVALID_PARAMETER;
955 }
956
957 /*
958 * Allocate the pages and map them into HMA space.
959 */
960 uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
961 AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
962 uint32_t const cPages = cbAligned >> PAGE_SHIFT;
963 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
964 if (!paPages)
965 return VERR_NO_TMP_MEMORY;
966 void *pvPages;
967 RTR0PTR pvR0 = NIL_RTR0PTR;
968 int rc = SUPR3PageAllocEx(cPages,
969 0 /*fFlags*/,
970 &pvPages,
971 fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING ? &pvR0 : NULL,
972 paPages);
973 if (RT_SUCCESS(rc))
974 {
975 if (!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING))
976#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
977 pvR0 = NIL_RTR0PTR;
978#else
979 pvR0 = (RTR0PTR)pvPages;
980#endif
981
982 memset(pvPages, 0, cbAligned);
983
984 RTGCPTR GCPtr;
985 rc = MMR3HyperMapPages(pVM,
986 pvPages,
987 pvR0,
988 cPages,
989 paPages,
990 MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmGetTagName(enmTag)),
991 &GCPtr);
992 if (RT_SUCCESS(rc))
993 {
994 *ppv = pvPages;
995 Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
996 cbAligned, uAlignment, *ppv));
997 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
998 return rc;
999 }
1000 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
1001 SUPR3PageFreeEx(pvPages, cPages);
1002
1003
1004 /*
1005 * HACK ALERT! Try allocate it off the heap so that we don't freak
1006 * out during vga/vmmdev mmio2 allocation with certain ram sizes.
1007 */
1008 /** @todo make a proper fix for this so we will never end up in this kind of situation! */
1009 Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
1010 int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
1011 if (RT_SUCCESS(rc2))
1012 {
1013 Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
1014 cb, uAlignment, rc, *ppv));
1015 return rc;
1016 }
1017 }
1018 else
1019 AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
1020
1021 if (rc == VERR_NO_MEMORY)
1022 rc = VERR_MM_HYPER_NO_MEMORY;
1023 LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
1024 return rc;
1025}
1026
1027
1028/**
1029 * Lookus up a ring-3 pointer to HMA.
1030 *
1031 * @returns The lookup record on success, NULL on failure.
1032 * @param pVM The VM handle.
1033 * @param pvR3 The ring-3 address to look up.
1034 */
1035DECLINLINE(PMMLOOKUPHYPER) mmR3HyperLookupR3(PVM pVM, void *pvR3)
1036{
1037 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1038 for (;;)
1039 {
1040 switch (pLookup->enmType)
1041 {
1042 case MMLOOKUPHYPERTYPE_LOCKED:
1043 {
1044 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
1045 if (off < pLookup->cb)
1046 return pLookup;
1047 break;
1048 }
1049
1050 case MMLOOKUPHYPERTYPE_HCPHYS:
1051 {
1052 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
1053 if (off < pLookup->cb)
1054 return pLookup;
1055 break;
1056 }
1057
1058 case MMLOOKUPHYPERTYPE_GCPHYS:
1059 case MMLOOKUPHYPERTYPE_MMIO2:
1060 case MMLOOKUPHYPERTYPE_DYNAMIC:
1061 /** @todo ? */
1062 break;
1063
1064 default:
1065 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1066 return NULL;
1067 }
1068
1069 /* next */
1070 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1071 return NULL;
1072 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1073 }
1074}
1075
1076
1077/**
1078 * Set / unset guard status on one or more hyper heap pages.
1079 *
1080 * @returns VBox status code (first failure).
1081 * @param pVM The VM handle.
1082 * @param pvStart The hyper heap page address. Must be page
1083 * aligned.
1084 * @param cb The number of bytes. Must be page aligned.
1085 * @param fSet Wheter to set or unset guard page status.
1086 */
1087VMMR3DECL(int) MMR3HyperSetGuard(PVM pVM, void *pvStart, size_t cb, bool fSet)
1088{
1089 /*
1090 * Validate input.
1091 */
1092 AssertReturn(!((uintptr_t)pvStart & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
1093 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1094 AssertReturn(cb <= UINT32_MAX, VERR_INVALID_PARAMETER);
1095 PMMLOOKUPHYPER pLookup = mmR3HyperLookupR3(pVM, pvStart);
1096 AssertReturn(pLookup, VERR_INVALID_PARAMETER);
1097 AssertReturn(pLookup->enmType == MMLOOKUPHYPERTYPE_LOCKED, VERR_INVALID_PARAMETER);
1098
1099 /*
1100 * Get down to business.
1101 * Note! We quietly ignore errors from the support library since the
1102 * protection stuff isn't possible to implement on all platforms.
1103 */
1104 uint8_t *pbR3 = (uint8_t *)pLookup->u.Locked.pvR3;
1105 RTR0PTR R0Ptr = pLookup->u.Locked.pvR0 != (uintptr_t)pLookup->u.Locked.pvR3
1106 ? pLookup->u.Locked.pvR0
1107 : NIL_RTR0PTR;
1108 uint32_t off = (uint32_t)((uint8_t *)pvStart - pbR3);
1109 int rc;
1110 if (fSet)
1111 {
1112 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, 0);
1113 SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_NONE);
1114 }
1115 else
1116 {
1117 rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
1118 SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
1119 }
1120 return rc;
1121}
1122
1123
1124/**
1125 * Convert hypervisor HC virtual address to HC physical address.
1126 *
1127 * @returns HC physical address.
1128 * @param pVM VM Handle
1129 * @param pvR3 Host context virtual address.
1130 */
1131VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
1132{
1133 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1134 for (;;)
1135 {
1136 switch (pLookup->enmType)
1137 {
1138 case MMLOOKUPHYPERTYPE_LOCKED:
1139 {
1140 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
1141 if (off < pLookup->cb)
1142 return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
1143 break;
1144 }
1145
1146 case MMLOOKUPHYPERTYPE_HCPHYS:
1147 {
1148 unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
1149 if (off < pLookup->cb)
1150 return pLookup->u.HCPhys.HCPhys + off;
1151 break;
1152 }
1153
1154 case MMLOOKUPHYPERTYPE_GCPHYS:
1155 case MMLOOKUPHYPERTYPE_MMIO2:
1156 case MMLOOKUPHYPERTYPE_DYNAMIC:
1157 /* can (or don't want to) convert these kind of records. */
1158 break;
1159
1160 default:
1161 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1162 break;
1163 }
1164
1165 /* next */
1166 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1167 break;
1168 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1169 }
1170
1171 AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
1172 return NIL_RTHCPHYS;
1173}
1174
1175
1176/**
1177 * Implements the return case of MMR3HyperQueryInfoFromHCPhys.
1178 *
1179 * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW.
1180 * @param pVM The VM handle.
1181 * @param HCPhys The host physical address to look for.
1182 * @param pLookup The HMA lookup entry corresponding to HCPhys.
1183 * @param pszWhat Where to return the description.
1184 * @param cbWhat Size of the return buffer.
1185 * @param pcbAlloc Where to return the size of whatever it is.
1186 */
1187static int mmR3HyperQueryInfoFromHCPhysFound(PVM pVM, RTHCPHYS HCPhys, PMMLOOKUPHYPER pLookup,
1188 char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
1189{
1190 *pcbAlloc = pLookup->cb;
1191 int rc = RTStrCopy(pszWhat, cbWhat, pLookup->pszDesc);
1192 return rc == VERR_BUFFER_OVERFLOW ? VINF_BUFFER_OVERFLOW : rc;
1193}
1194
1195
1196/**
1197 * Scans the HMA for the physical page and reports back a description if found.
1198 *
1199 * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW, VERR_NOT_FOUND.
1200 * @param pVM The VM handle.
1201 * @param HCPhys The host physical address to look for.
1202 * @param pszWhat Where to return the description.
1203 * @param cbWhat Size of the return buffer.
1204 * @param pcbAlloc Where to return the size of whatever it is.
1205 */
1206VMMR3_INT_DECL(int) MMR3HyperQueryInfoFromHCPhys(PVM pVM, RTHCPHYS HCPhys, char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
1207{
1208 RTHCPHYS HCPhysPage = HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
1209 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1210 for (;;)
1211 {
1212 switch (pLookup->enmType)
1213 {
1214 case MMLOOKUPHYPERTYPE_LOCKED:
1215 {
1216 uint32_t i = pLookup->cb >> PAGE_SHIFT;
1217 while (i-- > 0)
1218 if (pLookup->u.Locked.paHCPhysPages[i] == HCPhysPage)
1219 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
1220 break;
1221 }
1222
1223 case MMLOOKUPHYPERTYPE_HCPHYS:
1224 {
1225 if (pLookup->u.HCPhys.HCPhys - HCPhysPage < pLookup->cb)
1226 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
1227 break;
1228 }
1229
1230 case MMLOOKUPHYPERTYPE_MMIO2:
1231 case MMLOOKUPHYPERTYPE_GCPHYS:
1232 case MMLOOKUPHYPERTYPE_DYNAMIC:
1233 {
1234 /* brute force. */
1235 uint32_t i = pLookup->cb >> PAGE_SHIFT;
1236 while (i-- > 0)
1237 {
1238 RTGCPTR GCPtr = pLookup->off + pVM->mm.s.pvHyperAreaGC;
1239 RTHCPHYS HCPhysCur;
1240 int rc = PGMMapGetPage(pVM, GCPtr, NULL, &HCPhysCur);
1241 if (RT_SUCCESS(rc) && HCPhysCur == HCPhysPage)
1242 return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
1243 }
1244 break;
1245 }
1246 default:
1247 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1248 break;
1249 }
1250
1251 /* next */
1252 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1253 break;
1254 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1255 }
1256 return VERR_NOT_FOUND;
1257}
1258
1259
1260#if 0 /* unused, not implemented */
1261/**
1262 * Convert hypervisor HC physical address to HC virtual address.
1263 *
1264 * @returns HC virtual address.
1265 * @param pVM VM Handle
1266 * @param HCPhys Host context physical address.
1267 */
1268VMMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys)
1269{
1270 void *pv;
1271 int rc = MMR3HyperHCPhys2HCVirtEx(pVM, HCPhys, &pv);
1272 if (RT_SUCCESS(rc))
1273 return pv;
1274 AssertMsgFailed(("Invalid address HCPhys=%x rc=%d\n", HCPhys, rc));
1275 return NULL;
1276}
1277
1278
1279/**
1280 * Convert hypervisor HC physical address to HC virtual address.
1281 *
1282 * @returns VBox status.
1283 * @param pVM VM Handle
1284 * @param HCPhys Host context physical address.
1285 * @param ppv Where to store the HC virtual address.
1286 */
1287VMMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv)
1288{
1289 /*
1290 * Linear search.
1291 */
1292 /** @todo implement when actually used. */
1293 return VERR_INVALID_POINTER;
1294}
1295#endif /* unused, not implemented */
1296
1297
1298/**
1299 * Read hypervisor memory from GC virtual address.
1300 *
1301 * @returns VBox status.
1302 * @param pVM VM handle.
1303 * @param pvDst Destination address (HC of course).
1304 * @param GCPtr GC virtual address.
1305 * @param cb Number of bytes to read.
1306 *
1307 * @remarks For DBGF only.
1308 */
1309VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
1310{
1311 if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
1312 return VERR_INVALID_POINTER;
1313 return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
1314}
1315
1316
1317/**
1318 * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
1319 *
1320 * @param pVM The VM handle.
1321 * @param pHlp Callback functions for doing output.
1322 * @param pszArgs Argument string. Optional and specific to the handler.
1323 */
1324static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1325{
1326 pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
1327 pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
1328
1329 PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
1330 for (;;)
1331 {
1332 switch (pLookup->enmType)
1333 {
1334 case MMLOOKUPHYPERTYPE_LOCKED:
1335 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
1336 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1337 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1338 pLookup->u.Locked.pvR3,
1339 pLookup->u.Locked.pvR0,
1340 sizeof(RTHCPTR) * 2, "",
1341 pLookup->pszDesc);
1342 break;
1343
1344 case MMLOOKUPHYPERTYPE_HCPHYS:
1345 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
1346 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1347 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1348 pLookup->u.HCPhys.pvR3,
1349 pLookup->u.HCPhys.pvR0,
1350 pLookup->u.HCPhys.HCPhys,
1351 pLookup->pszDesc);
1352 break;
1353
1354 case MMLOOKUPHYPERTYPE_GCPHYS:
1355 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
1356 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1357 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1358 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1359 pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1360 pLookup->pszDesc);
1361 break;
1362
1363 case MMLOOKUPHYPERTYPE_MMIO2:
1364 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
1365 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1366 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1367 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1368 pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
1369 pLookup->pszDesc);
1370 break;
1371
1372 case MMLOOKUPHYPERTYPE_DYNAMIC:
1373 pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
1374 pLookup->off + pVM->mm.s.pvHyperAreaGC,
1375 pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
1376 sizeof(RTHCPTR) * 2 * 2 + 1, "",
1377 sizeof(RTHCPTR) * 2, "",
1378 pLookup->pszDesc);
1379 break;
1380
1381 default:
1382 AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
1383 break;
1384 }
1385
1386 /* next */
1387 if ((unsigned)pLookup->offNext == NIL_OFFSET)
1388 break;
1389 pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
1390 }
1391}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette