VirtualBox

source: vbox/trunk/src/VBox/VMM/MM.cpp@ 10700

Last change on this file since 10700 was 8155, checked in by vboxsync, 17 years ago

The Big Sun Rebranding Header Change

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 25.5 KB
Line 
1/* $Id: MM.cpp 8155 2008-04-18 15:16:47Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager).
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/** @page pg_mm MM - The Memory Monitor/Manager
24 *
25 * WARNING: THIS IS SOMEWHAT OUTDATED!
26 *
27 * It seems like this is going to be the entity taking care of memory allocations
28 * and the locking of physical memory for a VM. MM will track these allocations and
29 * pinnings so pointer conversions, memory read and write, and correct clean up can
30 * be done.
31 *
32 * Memory types:
33 * - Hypervisor Memory Area (HMA).
34 * - Page tables.
35 * - Physical pages.
36 *
37 * The first two types are not accessible using the generic conversion functions
38 * for GC memory, there are special functions for these.
39 *
40 *
41 * A decent structure for this component need to be eveloped as we see usage. One
42 * or two rewrites is probabaly needed to get it right...
43 *
44 *
45 *
46 * @section Hypervisor Memory Area
47 *
48 * The hypervisor is give 4MB of space inside the guest, we assume that we can
49 * steal an page directory entry from the guest OS without cause trouble. In
50 * addition to these 4MB we'll be mapping memory for the graphics emulation,
51 * but that will be an independant mapping.
52 *
53 * The 4MBs are divided into two main parts:
54 * -# The static code and data
55 * -# The shortlived page mappings.
56 *
57 * The first part is used for the VM structure, the core code (VMMSwitch),
58 * GC modules, and the alloc-only-heap. The size will be determined at a
59 * later point but initially we'll say 2MB of locked memory, most of which
60 * is non contiguous physically.
61 *
62 * The second part is used for mapping pages to the hypervisor. We'll be using
63 * a simple round robin when doing these mappings. This means that no-one can
64 * assume that a mapping hangs around for very long, while the managing of the
65 * pages are very simple.
66 *
67 *
68 *
69 * @section Page Pool
70 *
71 * The MM manages a per VM page pool from which other components can allocate
72 * locked, page aligned and page granular memory objects. The pool provides
73 * facilities to convert back and forth between physical and virtual addresses
74 * (within the pool of course). Several specialized interfaces are provided
75 * for the most common alloctions and convertions to save the caller from
76 * bothersome casting and extra parameter passing.
77 *
78 *
79 */
80
81
82
83/*******************************************************************************
84* Header Files *
85*******************************************************************************/
86#define LOG_GROUP LOG_GROUP_MM
87#include <VBox/mm.h>
88#include <VBox/pgm.h>
89#include <VBox/cfgm.h>
90#include <VBox/ssm.h>
91#include <VBox/gmm.h>
92#include "MMInternal.h"
93#include <VBox/vm.h>
94#include <VBox/uvm.h>
95#include <VBox/err.h>
96#include <VBox/param.h>
97
98#include <VBox/log.h>
99#include <iprt/alloc.h>
100#include <iprt/assert.h>
101#include <iprt/string.h>
102
103
104/*******************************************************************************
105* Defined Constants And Macros *
106*******************************************************************************/
107/** The current saved state versino of MM. */
108#define MM_SAVED_STATE_VERSION 2
109
110
111/*******************************************************************************
112* Internal Functions *
113*******************************************************************************/
114static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM);
115static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
116
117
118/**
119 * Initializes the MM members of the UVM.
120 *
121 * This is currently only the ring-3 heap.
122 *
123 * @returns VBox status code.
124 * @param pUVM Pointer to the user mode VM structure.
125 */
126MMR3DECL(int) MMR3InitUVM(PUVM pUVM)
127{
128 /*
129 * Assert sizes and order.
130 */
131 AssertCompile(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
132 AssertRelease(sizeof(pUVM->mm.s) <= sizeof(pUVM->mm.padding));
133 Assert(!pUVM->mm.s.pHeap);
134
135 /*
136 * Init the heap.
137 */
138 return mmR3HeapCreateU(pUVM, &pUVM->mm.s.pHeap);
139}
140
141
142/**
143 * Initializes the MM.
144 *
145 * MM is managing the virtual address space (among other things) and
146 * setup the hypvervisor memory area mapping in the VM structure and
147 * the hypvervisor alloc-only-heap. Assuming the current init order
148 * and components the hypvervisor memory area looks like this:
149 * -# VM Structure.
150 * -# Hypervisor alloc only heap (also call Hypervisor memory region).
151 * -# Core code.
152 *
153 * MM determins the virtual address of the hypvervisor memory area by
154 * checking for location at previous run. If that property isn't available
155 * it will choose a default starting location, currently 0xe0000000.
156 *
157 * @returns VBox status code.
158 * @param pVM The VM to operate on.
159 */
160MMR3DECL(int) MMR3Init(PVM pVM)
161{
162 LogFlow(("MMR3Init\n"));
163
164 /*
165 * Assert alignment, sizes and order.
166 */
167 AssertRelease(!(RT_OFFSETOF(VM, mm.s) & 31));
168 AssertRelease(sizeof(pVM->mm.s) <= sizeof(pVM->mm.padding));
169 AssertMsg(pVM->mm.s.offVM == 0, ("Already initialized!\n"));
170
171 /*
172 * Init the structure.
173 */
174 pVM->mm.s.offVM = RT_OFFSETOF(VM, mm);
175 pVM->mm.s.offLookupHyper = NIL_OFFSET;
176
177 /*
178 * Init the page pool.
179 */
180 int rc = mmR3PagePoolInit(pVM);
181 if (VBOX_SUCCESS(rc))
182 {
183 /*
184 * Init the hypervisor related stuff.
185 */
186 rc = mmR3HyperInit(pVM);
187 if (VBOX_SUCCESS(rc))
188 {
189 /*
190 * Register the saved state data unit.
191 */
192 rc = SSMR3RegisterInternal(pVM, "mm", 1, MM_SAVED_STATE_VERSION, sizeof(uint32_t) * 2,
193 NULL, mmR3Save, NULL,
194 NULL, mmR3Load, NULL);
195 if (VBOX_SUCCESS(rc))
196 return rc;
197
198 /* .... failure .... */
199 }
200 }
201 MMR3Term(pVM);
202 return rc;
203}
204
205
206/**
207 * Initializes the MM parts which depends on PGM being initialized.
208 *
209 * @returns VBox status code.
210 * @param pVM The VM to operate on.
211 * @remark No cleanup necessary since MMR3Term() will be called on failure.
212 */
213MMR3DECL(int) MMR3InitPaging(PVM pVM)
214{
215 LogFlow(("MMR3InitPaging:\n"));
216
217 /*
218 * Query the CFGM values.
219 */
220 int rc;
221 PCFGMNODE pMMCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
222 if (pMMCfg)
223 {
224 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "MM", &pMMCfg);
225 AssertRCReturn(rc, rc);
226 }
227
228 /** @cfgm{RamPreAlloc, boolean, false}
229 * Indicates whether the base RAM should all be allocated before starting
230 * the VM (default), or if it should be allocated when first written to.
231 */
232 bool fPreAlloc;
233 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RamPreAlloc", &fPreAlloc);
234 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
235 fPreAlloc = false;
236 else
237 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamPreAlloc\", rc=%Vrc.\n", rc), rc);
238
239 /** @cfgm{RamSize, uint64_t, 0, 0, UINT64_MAX}
240 * Specifies the size of the base RAM that is to be set up during
241 * VM initialization.
242 */
243 uint64_t cbRam;
244 rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
245 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
246 cbRam = 0;
247 else
248 AssertMsgRCReturn(rc, ("Configuration error: Failed to query integer \"RamSize\", rc=%Vrc.\n", rc), rc);
249
250 cbRam &= X86_PTE_PAE_PG_MASK;
251 pVM->mm.s.cbRamBase = cbRam; /* Warning: don't move this code to MMR3Init without fixing REMR3Init. */
252 Log(("MM: %RU64 bytes of RAM%s\n", cbRam, fPreAlloc ? " (PreAlloc)" : ""));
253
254 /** @cfgm{MM/Policy, string, no overcommitment}
255 * Specifies the policy to use when reserving memory for this VM. The recognized
256 * value is 'no overcommitment' (default). See GMMPOLICY.
257 */
258 GMMOCPOLICY enmPolicy;
259 char sz[64];
260 rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Policy", sz, sizeof(sz));
261 if (RT_SUCCESS(rc))
262 {
263 if ( !RTStrICmp(sz, "no_oc")
264 || !RTStrICmp(sz, "no overcommitment"))
265 enmPolicy = GMMOCPOLICY_NO_OC;
266 else
267 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Policy\" value \"%s\"", sz);
268 }
269 else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
270 enmPolicy = GMMOCPOLICY_NO_OC;
271 else
272 AssertMsgRCReturn(rc, ("Configuration error: Failed to query string \"MM/Policy\", rc=%Vrc.\n", rc), rc);
273
274 /** @cfgm{MM/Priority, string, normal}
275 * Specifies the memory priority of this VM. The priority comes into play when the
276 * system is overcommitted and the VMs needs to be milked for memory. The recognized
277 * values are 'low', 'normal' (default) and 'high'. See GMMPRIORITY.
278 */
279 GMMPRIORITY enmPriority;
280 rc = CFGMR3QueryString(CFGMR3GetRoot(pVM), "Priority", sz, sizeof(sz));
281 if (RT_SUCCESS(rc))
282 {
283 if (!RTStrICmp(sz, "low"))
284 enmPriority = GMMPRIORITY_LOW;
285 else if (!RTStrICmp(sz, "normal"))
286 enmPriority = GMMPRIORITY_NORMAL;
287 else if (!RTStrICmp(sz, "high"))
288 enmPriority = GMMPRIORITY_HIGH;
289 else
290 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, "Unknown \"MM/Priority\" value \"%s\"", sz);
291 }
292 else if (rc == VERR_CFGM_VALUE_NOT_FOUND)
293 enmPriority = GMMPRIORITY_NORMAL;
294 else
295 AssertMsgRCReturn(rc, ("Configuration error: Failed to query string \"MM/Priority\", rc=%Vrc.\n", rc), rc);
296
297 /*
298 * Make the initial memory reservation with GMM.
299 */
300 rc = GMMR3InitialReservation(pVM, cbRam >> PAGE_SHIFT, 1, 1, enmPolicy, enmPriority);
301 if (RT_FAILURE(rc))
302 {
303 if (rc == VERR_GMM_MEMORY_RESERVATION_DECLINED)
304 return VMSetError(pVM, rc, RT_SRC_POS,
305 N_("Insufficient free memory to start the VM (cbRam=%#RX64 enmPolicy=%d enmPriority=%d)"),
306 cbRam, enmPolicy, enmPriority);
307 return VMSetError(pVM, rc, RT_SRC_POS, "GMMR3InitialReservation(,%#RX64,0,0,%d,%d)",
308 cbRam >> PAGE_SHIFT, enmPolicy, enmPriority);
309 }
310
311 /*
312 * If RamSize is 0 we're done now.
313 */
314 if (cbRam < PAGE_SIZE)
315 {
316 Log(("MM: No RAM configured\n"));
317 return VINF_SUCCESS;
318 }
319
320 /*
321 * Setup the base ram (PGM).
322 */
323 rc = PGMR3PhysRegisterRam(pVM, 0, cbRam, "Base RAM");
324#ifdef VBOX_WITH_NEW_PHYS_CODE
325 if (RT_SUCCESS(rc) && fPreAlloc)
326 {
327 /** @todo RamPreAlloc should be handled at the very end of the VM creation. (lazy bird) */
328 return VM_SET_ERROR(pVM, VERR_NOT_IMPLEMENTED, "TODO: RamPreAlloc");
329 }
330#else
331 if (RT_SUCCESS(rc))
332 {
333 /*
334 * Allocate the first chunk, as we'll map ROM ranges there.
335 * If requested, allocated the rest too.
336 */
337 RTGCPHYS GCPhys = (RTGCPHYS)0;
338 rc = PGM3PhysGrowRange(pVM, &GCPhys);
339 if (RT_SUCCESS(rc) && fPreAlloc)
340 for (GCPhys = PGM_DYNAMIC_CHUNK_SIZE;
341 GCPhys < cbRam && RT_SUCCESS(rc);
342 GCPhys += PGM_DYNAMIC_CHUNK_SIZE)
343 rc = PGM3PhysGrowRange(pVM, &GCPhys);
344 }
345#endif
346
347 LogFlow(("MMR3InitPaging: returns %Vrc\n", rc));
348 return rc;
349}
350
351
352/**
353 * Terminates the MM.
354 *
355 * Termination means cleaning up and freeing all resources,
356 * the VM it self is at this point powered off or suspended.
357 *
358 * @returns VBox status code.
359 * @param pVM The VM to operate on.
360 */
361MMR3DECL(int) MMR3Term(PVM pVM)
362{
363 /*
364 * Destroy the page pool. (first as it used the hyper heap)
365 */
366 mmR3PagePoolTerm(pVM);
367
368 /*
369 * Release locked memory.
370 * (Associated record are released by the heap.)
371 */
372 PMMLOCKEDMEM pLockedMem = pVM->mm.s.pLockedMem;
373 while (pLockedMem)
374 {
375 int rc = SUPPageUnlock(pLockedMem->pv);
376 AssertMsgRC(rc, ("SUPPageUnlock(%p) -> rc=%d\n", pLockedMem->pv, rc));
377 switch (pLockedMem->eType)
378 {
379 case MM_LOCKED_TYPE_HYPER:
380 rc = SUPPageFree(pLockedMem->pv, pLockedMem->cb >> PAGE_SHIFT);
381 AssertMsgRC(rc, ("SUPPageFree(%p) -> rc=%d\n", pLockedMem->pv, rc));
382 break;
383 case MM_LOCKED_TYPE_HYPER_NOFREE:
384 case MM_LOCKED_TYPE_HYPER_PAGES:
385 case MM_LOCKED_TYPE_PHYS:
386 /* nothing to do. */
387 break;
388 }
389 /* next */
390 pLockedMem = pLockedMem->pNext;
391 }
392
393 /*
394 * Zero stuff to detect after termination use of the MM interface
395 */
396 pVM->mm.s.offLookupHyper = NIL_OFFSET;
397 pVM->mm.s.pLockedMem = NULL;
398 pVM->mm.s.pHyperHeapHC = NULL; /* freed above. */
399 pVM->mm.s.pHyperHeapGC = 0; /* freed above. */
400 pVM->mm.s.offVM = 0; /* init assertion on this */
401
402 return 0;
403}
404
405
406/**
407 * Terminates the UVM part of MM.
408 *
409 * Termination means cleaning up and freeing all resources,
410 * the VM it self is at this point powered off or suspended.
411 *
412 * @returns VBox status code.
413 * @param pUVM Pointer to the user mode VM structure.
414 */
415MMR3DECL(void) MMR3TermUVM(PUVM pUVM)
416{
417 /*
418 * Destroy the heap.
419 */
420 mmR3HeapDestroy(pUVM->mm.s.pHeap);
421 pUVM->mm.s.pHeap = NULL;
422}
423
424
425/**
426 * Reset notification.
427 *
428 * MM will reload shadow ROMs into RAM at this point and make
429 * the ROM writable.
430 *
431 * @param pVM The VM handle.
432 */
433MMR3DECL(void) MMR3Reset(PVM pVM)
434{
435 mmR3PhysRomReset(pVM);
436}
437
438
439/**
440 * Execute state save operation.
441 *
442 * @returns VBox status code.
443 * @param pVM VM Handle.
444 * @param pSSM SSM operation handle.
445 */
446static DECLCALLBACK(int) mmR3Save(PVM pVM, PSSMHANDLE pSSM)
447{
448 LogFlow(("mmR3Save:\n"));
449
450 /* (PGM saves the physical memory.) */
451 SSMR3PutU64(pSSM, pVM->mm.s.cBasePages);
452 return SSMR3PutU64(pSSM, pVM->mm.s.cbRamBase);
453}
454
455
456/**
457 * Execute state load operation.
458 *
459 * @returns VBox status code.
460 * @param pVM VM Handle.
461 * @param pSSM SSM operation handle.
462 * @param u32Version Data layout version.
463 */
464static DECLCALLBACK(int) mmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
465{
466 LogFlow(("mmR3Load:\n"));
467
468 /*
469 * Validate version.
470 */
471 if ( SSM_VERSION_MAJOR_CHANGED(u32Version, MM_SAVED_STATE_VERSION)
472 || !u32Version)
473 {
474 Log(("mmR3Load: Invalid version u32Version=%d!\n", u32Version));
475 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
476 }
477
478 /*
479 * Check the cBasePages and cbRamBase values.
480 */
481 int rc;
482 RTUINT cb1;
483
484 /* cBasePages */
485 uint64_t cPages;
486 if (u32Version != 1)
487 rc = SSMR3GetU64(pSSM, &cPages);
488 else
489 {
490 rc = SSMR3GetUInt(pSSM, &cb1);
491 cPages = cb1 >> PAGE_SHIFT;
492 }
493 if (VBOX_FAILURE(rc))
494 return rc;
495 if (cPages != pVM->mm.s.cBasePages)
496 {
497 Log(("mmR3Load: Memory configuration has changed. cPages=%#RX64 saved=%#RX64\n", pVM->mm.s.cBasePages, cPages));
498 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
499 }
500
501 /* cbRamBase */
502 uint64_t cb;
503 if (u32Version != 1)
504 rc = SSMR3GetU64(pSSM, &cb);
505 else
506 {
507 rc = SSMR3GetUInt(pSSM, &cb1);
508 cb = cb1;
509 }
510 if (VBOX_FAILURE(rc))
511 return rc;
512 if (cb != pVM->mm.s.cbRamBase)
513 {
514 Log(("mmR3Load: Memory configuration has changed. cbRamBase=%#RX64 save=%#RX64\n", pVM->mm.s.cbRamBase, cb));
515 return VERR_SSM_LOAD_MEMORY_SIZE_MISMATCH;
516 }
517
518 /* (PGM restores the physical memory.) */
519 return rc;
520}
521
522
523/**
524 * Updates GMM with memory reservation changes.
525 *
526 * Called when MM::cbRamRegistered, MM::cShadowPages or MM::cFixedPages changes.
527 *
528 * @returns VBox status code - see GMMR0UpdateReservation.
529 * @param pVM The shared VM structure.
530 */
531int mmR3UpdateReservation(PVM pVM)
532{
533 VM_ASSERT_EMT(pVM);
534 if (pVM->mm.s.fDoneMMR3InitPaging)
535 return GMMR3UpdateReservation(pVM,
536 RT_MAX(pVM->mm.s.cBasePages, 1),
537 RT_MAX(pVM->mm.s.cShadowPages, 1),
538 RT_MAX(pVM->mm.s.cFixedPages, 1));
539 return VINF_SUCCESS;
540}
541
542
543/**
544 * Interface for PGM to increase the reservation of RAM and ROM pages.
545 *
546 * This can be called before MMR3InitPaging.
547 *
548 * @returns VBox status code. Will set VM error on failure.
549 * @param pVM The shared VM structure.
550 * @param cAddBasePages The number of pages to add.
551 */
552MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages)
553{
554 uint64_t cOld = pVM->mm.s.cBasePages;
555 pVM->mm.s.cBasePages += cAddBasePages;
556 LogFlow(("MMR3IncreaseBaseReservation: +%RU64 (%RU64 -> %RU64\n", cAddBasePages, cOld, pVM->mm.s.cBasePages));
557 int rc = mmR3UpdateReservation(pVM);
558 if (RT_FAILURE(rc))
559 {
560 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserved physical memory for the RAM (%#RX64 -> %#RX64)"), cOld, pVM->mm.s.cBasePages);
561 pVM->mm.s.cBasePages = cOld;
562 }
563 return rc;
564}
565
566
567/**
568 * Interface for PGM to adjust the reservation of fixed pages.
569 *
570 * This can be called before MMR3InitPaging.
571 *
572 * @returns VBox status code. Will set VM error on failure.
573 * @param pVM The shared VM structure.
574 * @param cDeltaFixedPages The number of pages to add (positive) or subtract (negative).
575 * @param pszDesc Some description associated with the reservation.
576 */
577MMR3DECL(int) MMR3AdjustFixedReservation(PVM pVM, int32_t cDeltaFixedPages, const char *pszDesc)
578{
579 const uint32_t cOld = pVM->mm.s.cFixedPages;
580 pVM->mm.s.cFixedPages += cDeltaFixedPages;
581 LogFlow(("MMR3AdjustFixedReservation: %d (%u -> %u)\n", cDeltaFixedPages, cOld, pVM->mm.s.cFixedPages));
582 int rc = mmR3UpdateReservation(pVM);
583 if (RT_FAILURE(rc))
584 {
585 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory (%#x -> %#x; %s)"),
586 cOld, pVM->mm.s.cFixedPages, pszDesc);
587 pVM->mm.s.cFixedPages = cOld;
588 }
589 return rc;
590}
591
592
593/**
594 * Interface for PGM to update the reservation of shadow pages.
595 *
596 * This can be called before MMR3InitPaging.
597 *
598 * @returns VBox status code. Will set VM error on failure.
599 * @param pVM The shared VM structure.
600 * @param cShadowPages The new page count.
601 */
602MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages)
603{
604 const uint32_t cOld = pVM->mm.s.cShadowPages;
605 pVM->mm.s.cShadowPages = cShadowPages;
606 LogFlow(("MMR3UpdateShadowReservation: %u -> %u\n", cOld, pVM->mm.s.cShadowPages));
607 int rc = mmR3UpdateReservation(pVM);
608 if (RT_FAILURE(rc))
609 {
610 VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to reserve physical memory for shadow page tables (%#x -> %#x)"), cOld, pVM->mm.s.cShadowPages);
611 pVM->mm.s.cShadowPages = cOld;
612 }
613 return rc;
614}
615
616
617/**
618 * Locks physical memory which backs a virtual memory range (HC) adding
619 * the required records to the pLockedMem list.
620 *
621 * @returns VBox status code.
622 * @param pVM The VM handle.
623 * @param pv Pointer to memory range which shall be locked down.
624 * This pointer is page aligned.
625 * @param cb Size of memory range (in bytes). This size is page aligned.
626 * @param eType Memory type.
627 * @param ppLockedMem Where to store the pointer to the created locked memory record.
628 * This is optional, pass NULL if not used.
629 * @param fSilentFailure Don't raise an error when unsuccessful. Upper layer with deal with it.
630 */
631int mmR3LockMem(PVM pVM, void *pv, size_t cb, MMLOCKEDTYPE eType, PMMLOCKEDMEM *ppLockedMem, bool fSilentFailure)
632{
633 Assert(RT_ALIGN_P(pv, PAGE_SIZE) == pv);
634 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
635
636 if (ppLockedMem)
637 *ppLockedMem = NULL;
638
639 /*
640 * Allocate locked mem structure.
641 */
642 unsigned cPages = cb >> PAGE_SHIFT;
643 AssertReturn(cPages == (cb >> PAGE_SHIFT), VERR_OUT_OF_RANGE);
644 PMMLOCKEDMEM pLockedMem = (PMMLOCKEDMEM)MMR3HeapAlloc(pVM, MM_TAG_MM, RT_OFFSETOF(MMLOCKEDMEM, aPhysPages[cPages]));
645 if (!pLockedMem)
646 return VERR_NO_MEMORY;
647 pLockedMem->pv = pv;
648 pLockedMem->cb = cb;
649 pLockedMem->eType = eType;
650 memset(&pLockedMem->u, 0, sizeof(pLockedMem->u));
651
652 /*
653 * Lock the memory.
654 */
655 int rc = SUPPageLock(pv, cPages, &pLockedMem->aPhysPages[0]);
656 if (VBOX_SUCCESS(rc))
657 {
658 /*
659 * Setup the reserved field.
660 */
661 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[0];
662 for (unsigned c = cPages; c > 0; c--, pPhysPage++)
663 pPhysPage->uReserved = (RTHCUINTPTR)pLockedMem;
664
665 /*
666 * Insert into the list.
667 *
668 * ASSUME no protected needed here as only one thread in the system can possibly
669 * be doing this. No other threads will walk this list either we assume.
670 */
671 pLockedMem->pNext = pVM->mm.s.pLockedMem;
672 pVM->mm.s.pLockedMem = pLockedMem;
673 /* Set return value. */
674 if (ppLockedMem)
675 *ppLockedMem = pLockedMem;
676 }
677 else
678 {
679 AssertMsgFailed(("SUPPageLock failed with rc=%d\n", rc));
680 MMR3HeapFree(pLockedMem);
681 if (!fSilentFailure)
682 rc = VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to lock %d bytes of host memory (out of memory)"), cb);
683 }
684
685 return rc;
686}
687
688
689/**
690 * Maps a part of or an entire locked memory region into the guest context.
691 *
692 * @returns VBox status.
693 * God knows what happens if we fail...
694 * @param pVM VM handle.
695 * @param pLockedMem Locked memory structure.
696 * @param Addr GC Address where to start the mapping.
697 * @param iPage Page number in the locked memory region.
698 * @param cPages Number of pages to map.
699 * @param fFlags See the fFlags argument of PGR3Map().
700 */
701int mmR3MapLocked(PVM pVM, PMMLOCKEDMEM pLockedMem, RTGCPTR Addr, unsigned iPage, size_t cPages, unsigned fFlags)
702{
703 /*
704 * Adjust ~0 argument
705 */
706 if (cPages == ~(size_t)0)
707 cPages = (pLockedMem->cb >> PAGE_SHIFT) - iPage;
708 Assert(cPages != ~0U);
709 /* no incorrect arguments are accepted */
710 Assert(RT_ALIGN_GCPT(Addr, PAGE_SIZE, RTGCPTR) == Addr);
711 AssertMsg(iPage < (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad iPage(=%d)\n", iPage));
712 AssertMsg(iPage + cPages <= (pLockedMem->cb >> PAGE_SHIFT), ("never even think about giving me a bad cPages(=%d)\n", cPages));
713
714 /*
715 * Map the the pages.
716 */
717 PSUPPAGE pPhysPage = &pLockedMem->aPhysPages[iPage];
718 while (cPages)
719 {
720 RTHCPHYS HCPhys = pPhysPage->Phys;
721 int rc = PGMMap(pVM, Addr, HCPhys, PAGE_SIZE, fFlags);
722 if (VBOX_FAILURE(rc))
723 {
724 /** @todo how the hell can we do a proper bailout here. */
725 return rc;
726 }
727
728 /* next */
729 cPages--;
730 iPage++;
731 pPhysPage++;
732 Addr += PAGE_SIZE;
733 }
734
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Convert HC Physical address to HC Virtual address.
741 *
742 * @returns VBox status.
743 * @param pVM VM handle.
744 * @param HCPhys The host context virtual address.
745 * @param ppv Where to store the resulting address.
746 * @thread The Emulation Thread.
747 */
748MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv)
749{
750 /*
751 * Try page tables.
752 */
753 int rc = MMPagePhys2PageTry(pVM, HCPhys, ppv);
754 if (VBOX_SUCCESS(rc))
755 return rc;
756
757 /*
758 * Iterate the locked memory - very slow.
759 */
760 uint32_t off = HCPhys & PAGE_OFFSET_MASK;
761 HCPhys &= X86_PTE_PAE_PG_MASK;
762 for (PMMLOCKEDMEM pCur = pVM->mm.s.pLockedMem; pCur; pCur = pCur->pNext)
763 {
764 size_t iPage = pCur->cb >> PAGE_SHIFT;
765 while (iPage-- > 0)
766 if ((pCur->aPhysPages[iPage].Phys & X86_PTE_PAE_PG_MASK) == HCPhys)
767 {
768 *ppv = (char *)pCur->pv + (iPage << PAGE_SHIFT) + off;
769 return VINF_SUCCESS;
770 }
771 }
772 /* give up */
773 return VERR_INVALID_POINTER;
774}
775
776
777/**
778 * Read memory from GC virtual address using the current guest CR3.
779 *
780 * @returns VBox status.
781 * @param pVM VM handle.
782 * @param pvDst Destination address (HC of course).
783 * @param GCPtr GC virtual address.
784 * @param cb Number of bytes to read.
785 */
786MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
787{
788 if (GCPtr - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
789 return MMR3HyperReadGCVirt(pVM, pvDst, GCPtr, cb);
790 return PGMPhysReadGCPtr(pVM, pvDst, GCPtr, cb);
791}
792
793
794/**
795 * Write to memory at GC virtual address translated using the current guest CR3.
796 *
797 * @returns VBox status.
798 * @param pVM VM handle.
799 * @param GCPtrDst GC virtual address.
800 * @param pvSrc The source address (HC of course).
801 * @param cb Number of bytes to read.
802 */
803MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
804{
805 if (GCPtrDst - pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea)
806 return VERR_ACCESS_DENIED;
807 return PGMPhysWriteGCPtr(pVM, GCPtrDst, pvSrc, cb);
808}
809
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette