VirtualBox

source: vbox/trunk/src/VBox/VMM/MM.cpp@ 7796

Last change on this file since 7796 was 7635, checked in by vboxsync, 17 years ago

The new MMIO2 code.
WARNING! This changes the pci mapping protocol for MMIO2 so it's working the same way as I/O ports and normal MMIO memory. External users of the interface will have to update their mapping routines.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.3 KB
Line 
1/* $Id: MM.cpp 7635 2008-03-28 17:15:38Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager).
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_mm MM - The Memory Monitor/Manager
20 *
21 * WARNING: THIS IS SOMEWHAT OUTDATED!
22 *
23 * It seems like this is going to be the entity taking care of memory allocations
24 * and the locking of physical memory for a VM. MM will track these allocations and
25 * pinnings so pointer conversions, memory read and write, and correct clean up can
26 * be done.
27 *
28 * Memory types:
29 * - Hypervisor Memory Area (HMA).
30 * - Page tables.
31 * - Physical pages.
32 *
33 * The first two types are not accessible using the generic conversion functions
34 * for GC memory, there are special functions for these.
35 *
36 *
37 * A decent structure for this component need to be eveloped as we see usage. One
38 * or two rewrites is probabaly needed to get it right...
39 *
40 *
41 *
42 * @section Hypervisor Memory Area
43 *
44 * The hypervisor is give 4MB of space inside the guest, we assume that we can
45 * steal an page directory entry from the guest OS without cause trouble. In
46 * addition to these 4MB we'll be mapping memory for the graphics emulation,
47 * but that will be an independant mapping.
48 *
49 * The 4MBs are divided into two main parts:
50 * -# The static code and data
51 * -# The shortlived page mappings.
52 *
53 * The first part is used for the VM structure, the core code (VMMSwitch),
54 * GC modules, and the alloc-only-heap. The size will be determined at a
55 * later point but initially we'll say 2MB of locked memory, most of which
56 * is non contiguous physically.
57 *
58 * The second part is used for mapping pages to the hypervisor. We'll be using
59 * a simple round robin when doing these mappings. This means that no-one can
60 * assume that a mapping hangs around for very long, while the managing of the
61 * pages are very simple.
62 *
63 *
64 *
65 * @section Page Pool
66 *
67 * The MM manages a per VM page pool from which other components can allocate
68 * locked, page aligned and page granular memory objects. The pool provides
69 * facilities to convert back and forth between physical and virtual addresses
70 * (within the pool of course). Several specialized interfaces are provided
71 * for the most common alloctions and convertions to save the caller from
72 * bothersome casting and extra parameter passing.
73 *
74 *
75 */
76
77
78
79/*******************************************************************************
80* Header Files *
81*******************************************************************************/
82#define LOG_GROUP LOG_GROUP_MM
83#include <VBox/mm.h>
84#include <VBox/pgm.h>
85#include <VBox/cfgm.h>
86#include <VBox/ssm.h>
87#include <VBox/gmm.h>
88#include "MMInternal.h"
89#include <VBox/vm.h>
90#include <VBox/uvm.h>
91#include <VBox/err.h>
92#include <VBox/param.h>
93
94#include <VBox/log.h>
95#include <iprt/alloc.h>
96#include <iprt/assert.h>
97#include <iprt/string.h>
98
99
100/*******************************************************************************
101* Defined Constants And Macros *
102*******************************************************************************/
103/** The current saved state versino of MM. */
104#define MM_SAVED_STATE_VERSION 2
105
106
107/*******************************************************************************
108* Internal Functions *
109*******************************************************************************/
110static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
111static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
112
113
114/**
115 * Initializes the MM members of the UVM.
116 *
117 * This is currently only the ring-3 heap.
118 *
119 * @returns VBox status code.
120 * @param pUVM Pointer to the user mode VM structure.
121 */
122MMR3DECL(int) MMR3InitUVM(PUVM pUVM)
123{
124 /*
125 * Assert sizes and order.
126 */
127 AssertCompile(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
128 AssertRelease(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
129 Assert(!pUVM->mm.s.pHeap);
130
131 /*
132 * Init the heap.
133 */
134 return mmR3HeapCreateU(pUVM, &pUVM->mm.s.pHeap);
135}
136
137
138/**
139 * Initializes the MM.
140 *
141 * MM is managing the virtual address space (among other things) and
142 * setup the hypvervisor memory area mapping in the VM structure and
143 * the hypvervisor alloc-only-heap. Assuming the current init order
144 * and components the hypvervisor memory area looks like this:
145 * -# VM Structure.
146 * -# Hypervisor alloc only heap (also call Hypervisor memory region).
147 * -# Core code.
148 *
149 * MM determins the virtual address of the hypvervisor memory area by
150 * checking for location at previous run. If that property isn't available
151 * it will choose a default starting location, currently 0xe0000000.
152 *
153 * @returns VBox status code.
154 * @param pVM The VM to operate on.
155 */
156MMR3DECL(int) MMR3Init(PVM pVM)
157{
158 LogFlow(("MMR3Init\n"));
159
160 /*
161 * Assert alignment, sizes and order.
162 */
163 AssertRelease(!(RT_OFFSETOF(VM, mm.s) & 31));
164 AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
165 AssertMsg(pVM->mm.s.offVM == 0, ("Already initialized!\n"));
166
167 /*
168 * Init the structure.
169 */
170 pVM->mm.s.offVM = RT_OFFSETOF(VM, mm);
171 pVM->mm.s.offLookupHyper = NIL_OFFSET;
172
173 /*
174 * Init the page pool.
175 */
176 int rc = mmR3PagePoolInit(pVM);
177 if (VBOX_SUCCESS(rc))
178 {
179 /*
180 * Init the hypervisor related stuff.
181 */
182 rc = mmR3HyperInit(pVM);
183 if (VBOX_SUCCESS(rc))
184 {
185 /*
186 * Register the saved state data unit.
187 */
188 rc = SSMR3RegisterInternal(pVM, "mm", 1, MM_SAVED_STATE_VERSION, sizeof(uint32_t) * 2,
189 NULL, mmR3Save, NULL,
190 NULL, mmR3Load, NULL);
191 if (VBOX_SUCCESS(rc))
192 return rc;
193
194 /* .... failure .... */
195 }
196 }
197 MMR3Term(pVM);
198 return rc;
199}
200
201
202/**
203 * Initializes the MM parts which depends on PGM being initialized.
204 *
205 * @returns VBox status code.
206 * @param pVM The VM to operate on.
207 * @remark No cleanup necessary since MMR3Term() will be called on failure.
208 */
209MMR3DECL(int) MMR3InitPaging(PVM pVM)
210{
211 LogFlow(("MMR3InitPaging:\n"));
212
213 /*
214 * Query the CFGM values.
215 */
216 int rc;
217 PCFGMNODE pMMCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
218 if (pMMCfg)
219 {
220 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "MM", &pMMCfg);
221 AssertRCReturn(rc, rc);
222 }
223
224 /** @cfgm{RamPreAlloc, boolean, false}
225 * Indicates whether the base RAM should all be allocated before starting
226 * the VM (default), or if it should be allocated when first written to.
227 */
228 bool fPreAlloc;
229 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RamPreAlloc", &fPreAlloc);
230 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
231 fPreAlloc = false;
232 else
233 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamPreAlloc\", rc=%Vrc.\n", rc), rc);
234
235 /** @cfgm{RamSize, uint64_t, 0, 0, UINT64_MAX}
236 * Specifies the size of the base RAM that is to be set up during
237 * VM initialization.
238 */
239 uint64_t cbRam;
240 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
241 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
242 cbRam = 0;
243 else
244 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc), rc);
245
246 cbRam &= X86_PTE_PAE_PG_MASK;
247 pVM->mm.s.cbRamBase = cbRam; /* Warning: don't move this code to MMR3Init without fixing REMR3Init. */
248 Log(("MM: %RU64 bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : ""));
249
250 /** @cfgm{MM/Policy, string, no overcommitment}
251 * Specifies the policy to use when reserving memory for this VM. The recognized
252 * value is 'no overcommitment' (default). See GMMPOLICY.
253 */
254 GMMOCPOLICY enmPolicy;
255 char sz[64];
256 rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Policy", sz, sizeof(sz));
257 if (RT_SUCCESS(rc))
258 {
259 if ( !RTStrICmp(sz, "no_oc")
260 || !RTStrICmp(sz, "no overcommitment"))
261 enmPolicy = GMMOCPOLICY_NO_OC;
262 else
263 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Policy\" value \"%s\"", sz);
264 }
265 else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
266 enmPolicy = GMMOCPOLICY_NO_OC;
267 else
268 AssertMsgRCReturn(rc, ("Configuration error: Failed to query string \"MM/Policy\", rc=%Vrc.\n", rc), rc);
269
270 /** @cfgm{MM/Priority, string, normal}
271 * Specifies the memory priority of this VM. The priority comes into play when the
272 * system is overcommitted and the VMs needs to be milked for memory. The recognized
273 * values are 'low', 'normal' (default) and 'high'. See GMMPRIORITY.
274 */
275 GMMPRIORITY enmPriority;
276 rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Priority", sz, sizeof(sz));
277 if (RT_SUCCESS(rc))
278 {
279 if (!RTStrICmp(sz, "low"))
280 enmPriority = GMMPRIORITY_LOW;
281 else if (!RTStrICmp(sz, "normal"))
282 enmPriority = GMMPRIORITY_NORMAL;
283 else if (!RTStrICmp(sz, "high"))
284 enmPriority = GMMPRIORITY_HIGH;
285 else
286 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Priority\" value \"%s\"", sz);
287 }
288 else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
289 enmPriority = GMMPRIORITY_NORMAL;
290 else
291 AssertMsgRCReturn(rc, ("Configuration error: Failed to query string \"MM/Priority\", rc=%Vrc.\n", rc), rc);
292
293 /*
294 * Make the initial memory reservation with GMM.
295 */
296 rc = GMMR3InitialReservation(pVM, cbRam >> PAGE_SHIFT, 1, 1, enmPolicy, enmPriority);
297 if (RT_FAILURE(rc))
298 {
299 if (rc == VERR_GMM_MEMORY_RESERVATION_DECLINED)
300 return VMSetError(pVM, rc, RT_SRC_POS,
301 N_("Insufficient free memory to start the VM (cbRam=%#RX64 enmPolicy=%d enmPriority=%d)"),
302 cbRam, enmPolicy, enmPriority);
303 return VMSetError(pVM, rc, RT_SRC_POS, "GMMR3InitialReservation(,%#RX64,0,0,%d,%d)",
304 cbRam >> PAGE_SHIFT, enmPolicy, enmPriority);
305 }
306
307 /*
308 * If RamSize is 0 we're done now.
309 */
310 if (cbRam < PAGE_SIZE)
311 {
312 Log(("MM: No RAM configured\n"));
313 return VINF_SUCCESS;
314 }
315
316 /*
317 * Setup the base ram (PGM).
318 */
319 rc = PGMR3PhysRegisterRam(pVM, 0, cbRam, "Base RAM");
320#ifdef VBOX_WITH_NEW_PHYS_CODE
321 if (RT_SUCCESS(rc) && fPreAlloc)
322 {
323 /** @todo RamPreAlloc should be handled at the very end of the VM creation. (lazy bird) */
324 return VM_SET_ERROR(pVM, VERR_NOT_IMPLEMENTED, "TODO: RamPreAlloc");
325 }
326#else
327 if (RT_SUCCESS(rc))
328 {
329 /*
330 * Allocate the first chunk, as we'll map ROM ranges there.
331 * If requested, allocated the rest too.
332 */
333 RTGCPHYS GCPhys = (RTGCPHYS)0;
334 rc = PGM3PhysGrowRange(pVM, &GCPhys);
335 if (RT_SUCCESS(rc) && fPreAlloc)
336 for (GCPhys = PGM_DYNAMIC_CHUNK_SIZE;
337 GCPhys < cbRam && RT_SUCCESS(rc);
338 GCPhys += PGM_DYNAMIC_CHUNK_SIZE)
339 rc = PGM3PhysGrowRange(pVM, &GCPhys);
340 }
341#endif
342
343 LogFlow(("MMR3InitPaging: returns %Vrc\n", rc));
344 return rc;
345}
346
347
348/**
349 * Terminates the MM.
350 *
351 * Termination means cleaning up and freeing all resources,
352 * the VM it self is at this point powered off or suspended.
353 *
354 * @returns VBox status code.
355 * @param pVM The VM to operate on.
356 */
357MMR3DECL(int) MMR3Term(PVM pVM)
358{
359 /*
360 * Destroy the page pool. (first as it used the hyper heap)
361 */
362 mmR3PagePoolTerm(pVM);
363
364 /*
365 * Release locked memory.
366 * (Associated record are released by the heap.)
367 */
368 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;
369 while (pLockedMem)
370 {
371 int rc = SUPPageUnlock(pLockedMem->pv);
372 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));
373 switch (pLockedMem->eType)
374 {
375 case MM_LOCKED_TYPE_HYPER:
376 rc = SUPPageFree(pLockedMem->pv, pLockedMem->cb >> PAGE_SHIFT);
377 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));
378 break;
379 case MM_LOCKED_TYPE_HYPER_NOFREE:
380 case MM_LOCKED_TYPE_HYPER_PAGES:
381 case MM_LOCKED_TYPE_PHYS:
382 /* nothing to do. */
383 break;
384 }
385 /* next */
386 pLockedMem = pLockedMem->pNext;
387 }
388
389 /*
390 * Zero stuff to detect after termination use of the MM interface
391 */
392 pVM->mm.s.offLookupHyper = NIL_OFFSET;
393 pVM->mm.s.pLockedMem = NULL;
394 pVM->mm.s.pHyperHeapHC = NULL; /* freed above. */
395 pVM->mm.s.pHyperHeapGC = 0; /* freed above. */
396 pVM->mm.s.offVM = 0; /* init assertion on this */
397
398 return 0;
399}
400
401
402/**
403 * Terminates the UVM part of MM.
404 *
405 * Termination means cleaning up and freeing all resources,
406 * the VM it self is at this point powered off or suspended.
407 *
408 * @returns VBox status code.
409 * @param pUVM Pointer to the user mode VM structure.
410 */
411MMR3DECL(void) MMR3TermUVM(PUVM pUVM)
412{
413 /*
414 * Destroy the heap.
415 */
416 mmR3HeapDestroy(pUVM->mm.s.pHeap);
417 pUVM->mm.s.pHeap = NULL;
418}
419
420
421/**
422 * Reset notification.
423 *
424 * MM will reload shadow ROMs into RAM at this point and make
425 * the ROM writable.
426 *
427 * @param pVM The VM handle.
428 */
429MMR3DECL(void) MMR3Reset(PVM pVM)
430{
431 mmR3PhysRomReset(pVM);
432}
433
434
435/**
436 * Execute state save operation.
437 *
438 * @returns VBox status code.
439 * @param pVM VM Handle.
440 * @param pSSM SSM operation handle.
441 */
442static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
443{
444 LogFlow(("mmR3Save:\n"));
445
446 /* (PGM saves the physical memory.) */
447 SSMR3PutU64(pSSM, pVM->mm.s.cBasePages);
448 return SSMR3PutU64(pSSM, pVM->mm.s.cbRamBase);
449}
450
451
452/**
453 * Execute state load operation.
454 *
455 * @returns VBox status code.
456 * @param pVM VM Handle.
457 * @param pSSM SSM operation handle.
458 * @param u32Version Data layout version.
459 */
460static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
461{
462 LogFlow(("mmR3Load:\n"));
463
464 /*
465 * Validate version.
466 */
467 if ( SSM_VERSION_MAJOR_CHANGED(u32Version, MM_SAVED_STATE_VERSION)
468 || !u32Version)
469 {
470 Log(("mmR3Load: Invalid version u32Version=%d!\n", u32Version));
471 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
472 }
473
474 /*
475 * Check the cBasePages and cbRamBase values.
476 */
477 int rc;
478 RTUINT cb1;
479
480 /* cBasePages */
481 uint64_t cPages;
482 if (u32Version != 1)
483 rc = SSMR3GetU64(pSSM, &cPages);
484 else
485 {
486 rc = SSMR3GetUInt(pSSM, &cb1);
487 cPages = cb1 >> PAGE_SHIFT;
488 }
489 if (VBOX_FAILURE(rc))
490 return rc;
491 if (cPages != pVM->mm.s.cBasePages)
492 {
493 Log(("mmR3Load: Memory configuration has changed. cPages=%#RX64 saved=%#RX64\n", pVM->mm.s.cBasePages, cPages));
494 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
495 }
496
497 /* cbRamBase */
498 uint64_t cb;
499 if (u32Version != 1)
500 rc = SSMR3GetU64(pSSM, &cb);
501 else
502 {
503 rc = SSMR3GetUInt(pSSM, &cb1);
504 cb = cb1;
505 }
506 if (VBOX_FAILURE(rc))
507 return rc;
508 if (cb != pVM->mm.s.cbRamBase)
509 {
510 Log(("mmR3Load: Memory configuration has changed. cbRamBase=%#RX64 save=%#RX64\n", pVM->mm.s.cbRamBase, cb));
511 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
512 }
513
514 /* (PGM restores the physical memory.) */
515 return rc;
516}
517
518
519/**
520 * Updates GMM with memory reservation changes.
521 *
522 * Called when MM::cbRamRegistered, MM::cShadowPages or MM::cFixedPages changes.
523 *
524 * @returns VBox status code - see GMMR0UpdateReservation.
525 * @param pVM The shared VM structure.
526 */
527int mmR3UpdateReservation(PVM pVM)
528{
529 VM_ASSERT_EMT(pVM);
530 if (pVM->mm.s.fDoneMMR3InitPaging)
531 return GMMR3UpdateReservation(pVM,
532 RT_MAX(pVM->mm.s.cBasePages, 1),
533 RT_MAX(pVM->mm.s.cShadowPages, 1),
534 RT_MAX(pVM->mm.s.cFixedPages, 1));
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Interface for PGM to increase the reservation of RAM and ROM pages.
541 *
542 * This can be called before MMR3InitPaging.
543 *
544 * @returns VBox status code. Will set VM error on failure.
545 * @param pVM The shared VM structure.
546 * @param cAddBasePages The number of pages to add.
547 */
548MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages)
549{
550 uint64_t cOld = pVM->mm.s.cBasePages;
551 pVM->mm.s.cBasePages += cAddBasePages;
552 LogFlow(("MMR3IncreaseBaseReservation: +%RU64 (%RU64 -> %RU64\n", cAddBasePages, cOld, pVM->mm.s.cBasePages));
553 int rc = mmR3UpdateReservation(pVM);
554 if (RT_FAILURE(rc))
555 {
556 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserved physical memory for the RAM (%#RX64 -> %#RX64)"), cOld, pVM->mm.s.cBasePages);
557 pVM->mm.s.cBasePages = cOld;
558 }
559 return rc;
560}
561
562
563/**
564 * Interface for PGM to adjust the reservation of fixed pages.
565 *
566 * This can be called before MMR3InitPaging.
567 *
568 * @returns VBox status code. Will set VM error on failure.
569 * @param pVM The shared VM structure.
570 * @param cDeltaFixedPages The number of pages to add (positive) or subtract (negative).
571 * @param pszDesc Some description associated with the reservation.
572 */
573MMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc)
574{
575 const uint32_t cOld = pVM->mm.s.cFixedPages;
576 pVM->mm.s.cFixedPages += cDeltaFixedPages;
577 LogFlow(("MMR3AdjustFixedReservation: %d (%u -> %u)\n", cDeltaFixedPages, cOld, pVM->mm.s.cFixedPages));
578 int rc = mmR3UpdateReservation(pVM);
579 if (RT_FAILURE(rc))
580 {
581 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory (%#x -> %#x; %s)"),
582 cOld, pVM->mm.s.cFixedPages, pszDesc);
583 pVM->mm.s.cFixedPages = cOld;
584 }
585 return rc;
586}
587
588
589/**
590 * Interface for PGM to update the reservation of shadow pages.
591 *
592 * This can be called before MMR3InitPaging.
593 *
594 * @returns VBox status code. Will set VM error on failure.
595 * @param pVM The shared VM structure.
596 * @param cShadowPages The new page count.
597 */
598MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages)
599{
600 const uint32_t cOld = pVM->mm.s.cShadowPages;
601 pVM->mm.s.cShadowPages = cShadowPages;
602 LogFlow(("MMR3UpdateShadowReservation: %u -> %u\n", cOld, pVM->mm.s.cShadowPages));
603 int rc = mmR3UpdateReservation(pVM);
604 if (RT_FAILURE(rc))
605 {
606 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory for shadow page tables (%#x -> %#x)"), cOld, pVM->mm.s.cShadowPages);
607 pVM->mm.s.cShadowPages = cOld;
608 }
609 return rc;
610}
611
612
613/**
614 * Locks physical memory which backs a virtual memory range (HC) adding
615 * the required records to the pLockedMem list.
616 *
617 * @returns VBox status code.
618 * @param pVM The VM handle.
619 * @param pv Pointer to memory range which shall be locked down.
620 * This pointer is page aligned.
621 * @param cb Size of memory range (in bytes). This size is page aligned.
622 * @param eType Memory type.
623 * @param ppLockedMem Where to store the pointer to the created locked memory record.
624 * This is optional, pass NULL if not used.
625 * @param fSilentFailure Don't raise an error when unsuccessful. Upper layer with deal with it.
626 */
627int mmR3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure)
628{
629 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);
630 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
631
632 if (ppLockedMem)
633 *ppLockedMem = NULL;
634
635 /*
636 * Allocate locked mem structure.
637 */
638 unsigned cPages = cb >> PAGE_SHIFT;
639 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);
640 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
641 if (!pLockedMem)
642 return VERR_NO_MEMORY;
643 pLockedMem->pv = pv;
644 pLockedMem->cb = cb;
645 pLockedMem->eType = eType;
646 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
647
648 /*
649 * Lock the memory.
650 */
651 int rc = SUPPageLock(pv, cPages, &pLockedMem->aPhysPages[0]);
652 if (VBOX_SUCCESS(rc))
653 {
654 /*
655 * Setup the reserved field.
656 */
657 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];
658 for (unsigned c = cPages; c > 0; c--, pPhysPage++)
659 pPhysPage->uReserved = (RTHCUINTPTR)pLockedMem;
660
661 /*
662 * Insert into the list.
663 *
664 * ASSUME no protected needed here as only one thread in the system can possibly
665 * be doing this. No other threads will walk this list either we assume.
666 */
667 pLockedMem->pNext = pVM->mm.s.pLockedMem;
668 pVM->mm.s.pLockedMem = pLockedMem;
669 /* Set return value. */
670 if (ppLockedMem)
671 *ppLockedMem = pLockedMem;
672 }
673 else
674 {
675 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));
676 MMR3HeapFree(pLockedMem);
677 if (!fSilentFailure)
678 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);
679 }
680
681 return rc;
682}
683
684
685/**
686 * Maps a part of or an entire locked memory region into the guest context.
687 *
688 * @returns VBox status.
689 * God knows what happens if we fail...
690 * @param pVM VM handle.
691 * @param pLockedMem Locked memory structure.
692 * @param Addr GC Address where to start the mapping.
693 * @param iPage Page number in the locked memory region.
694 * @param cPages Number of pages to map.
695 * @param fFlags See the fFlags argument of PGR3Map().
696 */
697int mmR3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)
698{
699 /*
700 * Adjust ~0 argument
701 */
702 if (cPages == ~(size_t)0)
703 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;
704 Assert(cPages != ~0U);
705 /* no incorrect arguments are accepted */
706 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);
707 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));
708 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));
709
710 /*
711 * Map the the pages.
712 */
713 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];
714 while (cPages)
715 {
716 RTHCPHYS HCPhys = pPhysPage->Phys;
717 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);
718 if (VBOX_FAILURE(rc))
719 {
720 /** @todo how the hell can we do a proper bailout here. */
721 return rc;
722 }
723
724 /* next */
725 cPages--;
726 iPage++;
727 pPhysPage++;
728 Addr += PAGE_SIZE;
729 }
730
731 return VINF_SUCCESS;
732}
733
734
735/**
736 * Convert HC Physical address to HC Virtual address.
737 *
738 * @returns VBox status.
739 * @param pVM VM handle.
740 * @param HCPhys The host context virtual address.
741 * @param ppv Where to store the resulting address.
742 * @thread The Emulation Thread.
743 */
744MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
745{
746 /*
747 * Try page tables.
748 */
749 int rc = MMPagePhys2PageTry(pVM, HCPhys, ppv);
750 if (VBOX_SUCCESS(rc))
751 return rc;
752
753 /*
754 * Iterate the locked memory - very slow.
755 */
756 uint32_t off = HCPhys & PAGE_OFFSET_MASK;
757 HCPhys &= X86_PTE_PAE_PG_MASK;
758 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext)
759 {
760 size_t iPage = pCur->cb >> PAGE_SHIFT;
761 while (iPage-- > 0)
762 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys)
763 {
764 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off;
765 return VINF_SUCCESS;
766 }
767 }
768 /* give up */
769 return VERR_INVALID_POINTER;
770}
771
772
773/**
774 * Read memory from GC virtual address using the current guest CR3.
775 *
776 * @returns VBox status.
777 * @param pVM VM handle.
778 * @param pvDst Destination address (HC of course).
779 * @param GCPtr GC virtual address.
780 * @param cb Number of bytes to read.
781 */
782MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
783{
784 if (GCPtr - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
785 return MMR3HyperReadGCVirt(pVM, pvDst, GCPtr, cb);
786 return PGMPhysReadGCPtr(pVM, pvDst, GCPtr, cb);
787}
788
789
790/**
791 * Write to memory at GC virtual address translated using the current guest CR3.
792 *
793 * @returns VBox status.
794 * @param pVM VM handle.
795 * @param GCPtrDst GC virtual address.
796 * @param pvSrc The source address (HC of course).
797 * @param cb Number of bytes to read.
798 */
799MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
800{
801 if (GCPtrDst - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
802 return VERR_ACCESS_DENIED;
803 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
804}
805
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette