VirtualBox

source: vbox/trunk/include/VBox/mm.h@ 6837

Last change on this file since 6837 was 6818, checked in by vboxsync, 17 years ago

New tag: MM_TAG_PGM_PHYS.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.7 KB
Line 
1/** @file
2 * MM - The Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2006-2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_mm_h
27#define ___VBox_mm_h
28
29#include <VBox/cdefs.h>
30#include <VBox/types.h>
31#include <VBox/x86.h>
32#include <VBox/sup.h>
33
34
35__BEGIN_DECLS
36
37/** @defgroup grp_mm The Memory Manager API
38 * @{
39 */
40
41/** @name RAM Page Flags
42 * Since internal ranges have a byte granularity it's possible for a
43 * page be flagged for several uses. The access virtualization in PGM
44 * will choose the most restricted one and use EM to emulate access to
45 * the less restricted areas of the page.
46 *
47 * Bits 0-11 only since they are fitted into the offset part of a physical memory address.
48 * @{
49 */
50/** Reserved - Not RAM, ROM nor MMIO2.
51 * If this bit is cleared the memory is assumed to be some kind of RAM.
52 * Normal MMIO may set it but that depends on whether the RAM range was
53 * created specially for the MMIO or not.
54 *
55 * @remarks The current implementation will always reserve backing
56 * memory for reserved ranges to simplify things.
57 */
58#define MM_RAM_FLAGS_RESERVED RT_BIT(0)
59/** ROM - Read Only Memory.
60 * The page have a HC physical address which contains the BIOS code. All write
61 * access is trapped and ignored.
62 *
63 * HACK: Writable shadow ROM is indicated by both ROM and MMIO2 being
64 * set. (We're out of bits.)
65 */
66#define MM_RAM_FLAGS_ROM RT_BIT(1)
67/** MMIO - Memory Mapped I/O.
68 * All access is trapped and emulated. No physical backing is required, but
69 * might for various reasons be present.
70 */
71#define MM_RAM_FLAGS_MMIO RT_BIT(2)
72/** MMIO2 - Memory Mapped I/O, variation 2.
73 * The virtualization is performed using real memory and only catching
74 * a few accesses for like keeping track for dirty pages.
75 * @remark Involved in the shadow ROM hack.
76 */
77#define MM_RAM_FLAGS_MMIO2 RT_BIT(3)
78
79/** PGM has virtual page access handler(s) defined for pages with this flag. */
80#define MM_RAM_FLAGS_VIRTUAL_HANDLER RT_BIT(4)
81/** PGM has virtual page access handler(s) for write access. */
82#define MM_RAM_FLAGS_VIRTUAL_WRITE RT_BIT(5)
83/** PGM has virtual page access handler(s) for all access. */
84#define MM_RAM_FLAGS_VIRTUAL_ALL RT_BIT(6)
85/** PGM has physical page access handler(s) defined for pages with this flag. */
86#define MM_RAM_FLAGS_PHYSICAL_HANDLER RT_BIT(7)
87/** PGM has physical page access handler(s) for write access. */
88#define MM_RAM_FLAGS_PHYSICAL_WRITE RT_BIT(8)
89/** PGM has physical page access handler(s) for all access. */
90#define MM_RAM_FLAGS_PHYSICAL_ALL RT_BIT(9)
91/** PGM has physical page access handler(s) for this page and has temporarily disabled it. */
92#define MM_RAM_FLAGS_PHYSICAL_TEMP_OFF RT_BIT(10)
93#ifndef VBOX_WITH_NEW_PHYS_CODE
94/** Physical backing memory is allocated dynamically. Not set implies a one time static allocation. */
95#define MM_RAM_FLAGS_DYNAMIC_ALLOC RT_BIT(11)
96#endif /* !VBOX_WITH_NEW_PHYS_CODE */
97
98/** The shift used to get the reference count. */
99#define MM_RAM_FLAGS_CREFS_SHIFT 62
100/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_CREFS_SHIFT to shift it down. */
101#define MM_RAM_FLAGS_CREFS_MASK 0x3
102/** The (shifted) cRef value used to indiciate that the idx is the head of a
103 * physical cross reference extent list. */
104#define MM_RAM_FLAGS_CREFS_PHYSEXT MM_RAM_FLAGS_CREFS_MASK
105/** The shift used to get the page pool idx. (Apply MM_RAM_FLAGS_IDX_MASK to the result when shifting down). */
106#define MM_RAM_FLAGS_IDX_SHIFT 48
107/** The mask applied to the the page pool idx after using MM_RAM_FLAGS_IDX_SHIFT to shift it down. */
108#define MM_RAM_FLAGS_IDX_MASK 0x3fff
109/** The idx value when we're out of of extents or there are simply too many mappings of this page. */
110#define MM_RAM_FLAGS_IDX_OVERFLOWED MM_RAM_FLAGS_IDX_MASK
111
112/** Mask for masking off any references to the page. */
113#define MM_RAM_FLAGS_NO_REFS_MASK UINT64_C(0x0000ffffffffffff)
114/** @} */
115
116#ifndef VBOX_WITH_NEW_PHYS_CODE
117/** @name MMR3PhysRegisterEx registration type
118 * @{
119 */
120typedef enum
121{
122 /** Normal physical region (flags specify exact page type) */
123 MM_PHYS_TYPE_NORMAL = 0,
124 /** Allocate part of a dynamically allocated physical region */
125 MM_PHYS_TYPE_DYNALLOC_CHUNK,
126
127 MM_PHYS_TYPE_32BIT_HACK = 0x7fffffff
128} MMPHYSREG;
129/** @} */
130#endif
131
132/**
133 * Memory Allocation Tags.
134 * For use with MMHyperAlloc(), MMR3HeapAlloc(), MMR3HeapAllocEx(),
135 * MMR3HeapAllocZ() and MMR3HeapAllocZEx().
136 *
137 * @remark Don't forget to update the dump command in MMHeap.cpp!
138 */
139typedef enum MMTAG
140{
141 MM_TAG_INVALID = 0,
142
143 MM_TAG_CFGM,
144 MM_TAG_CFGM_BYTES,
145 MM_TAG_CFGM_STRING,
146 MM_TAG_CFGM_USER,
147
148 MM_TAG_CSAM,
149 MM_TAG_CSAM_PATCH,
150
151 MM_TAG_DBGF,
152 MM_TAG_DBGF_INFO,
153 MM_TAG_DBGF_LINE,
154 MM_TAG_DBGF_LINE_DUP,
155 MM_TAG_DBGF_STACK,
156 MM_TAG_DBGF_SYMBOL,
157 MM_TAG_DBGF_SYMBOL_DUP,
158 MM_TAG_DBGF_MODULE,
159
160 MM_TAG_EM,
161
162 MM_TAG_IOM,
163 MM_TAG_IOM_STATS,
164
165 MM_TAG_MM,
166 MM_TAG_MM_LOOKUP_GUEST,
167 MM_TAG_MM_LOOKUP_PHYS,
168 MM_TAG_MM_LOOKUP_VIRT,
169 MM_TAG_MM_PAGE,
170
171 MM_TAG_PATM,
172 MM_TAG_PATM_PATCH,
173
174 MM_TAG_PDM,
175 MM_TAG_PDM_ASYNC_COMPLETION,
176 MM_TAG_PDM_DEVICE,
177 MM_TAG_PDM_DEVICE_USER,
178 MM_TAG_PDM_DRIVER,
179 MM_TAG_PDM_DRIVER_USER,
180 MM_TAG_PDM_USB,
181 MM_TAG_PDM_USB_USER,
182 MM_TAG_PDM_LUN,
183 MM_TAG_PDM_QUEUE,
184 MM_TAG_PDM_THREAD,
185
186 MM_TAG_PGM,
187 MM_TAG_PGM_CHUNK_MAPPING,
188 MM_TAG_PGM_HANDLERS,
189 MM_TAG_PGM_PHYS,
190 MM_TAG_PGM_POOL,
191
192 MM_TAG_REM,
193
194 MM_TAG_SELM,
195
196 MM_TAG_SSM,
197
198 MM_TAG_STAM,
199
200 MM_TAG_TM,
201
202 MM_TAG_TRPM,
203
204 MM_TAG_VM,
205 MM_TAG_VM_REQ,
206
207 MM_TAG_VMM,
208
209 MM_TAG_HWACCM,
210
211 MM_TAG_32BIT_HACK = 0x7fffffff
212} MMTAG;
213
214
215
216
217/** @defgroup grp_mm_hyper Hypervisor Memory Management
218 * @ingroup grp_mm
219 * @{ */
220
221MMDECL(RTR3PTR) MMHyperR0ToR3(PVM pVM, RTR0PTR R0Ptr);
222MMDECL(RTGCPTR) MMHyperR0ToGC(PVM pVM, RTR0PTR R0Ptr);
223#ifndef IN_RING0
224MMDECL(void *) MMHyperR0ToCC(PVM pVM, RTR0PTR R0Ptr);
225#endif
226MMDECL(RTR0PTR) MMHyperR3ToR0(PVM pVM, RTR3PTR R3Ptr);
227MMDECL(RTGCPTR) MMHyperR3ToGC(PVM pVM, RTR3PTR R3Ptr);
228MMDECL(RTR3PTR) MMHyperGCToR3(PVM pVM, RTGCPTR GCPtr);
229MMDECL(RTR0PTR) MMHyperGCToR0(PVM pVM, RTGCPTR GCPtr);
230
231#ifndef IN_RING3
232MMDECL(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr);
233#else
234DECLINLINE(void *) MMHyperR3ToCC(PVM pVM, RTR3PTR R3Ptr)
235{
236 NOREF(pVM);
237 return R3Ptr;
238}
239#endif
240
241
242#ifndef IN_GC
243MMDECL(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr);
244#else
245DECLINLINE(void *) MMHyperGCToCC(PVM pVM, RTGCPTR GCPtr)
246{
247 NOREF(pVM);
248 return GCPtr;
249}
250#endif
251
252#ifndef IN_RING3
253MMDECL(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv);
254#else
255DECLINLINE(RTR3PTR) MMHyperCCToR3(PVM pVM, void *pv)
256{
257 NOREF(pVM);
258 return pv;
259}
260#endif
261
262#ifndef IN_RING0
263MMDECL(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv);
264#else
265DECLINLINE(RTR0PTR) MMHyperCCToR0(PVM pVM, void *pv)
266{
267 NOREF(pVM);
268 return pv;
269}
270#endif
271
272#ifndef IN_GC
273MMDECL(RTGCPTR) MMHyperCCToGC(PVM pVM, void *pv);
274#else
275DECLINLINE(RTGCPTR) MMHyperCCToGC(PVM pVM, void *pv)
276{
277 NOREF(pVM);
278 return pv;
279}
280#endif
281
282
283#ifdef IN_GC
284MMDECL(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr);
285#else
286DECLINLINE(RTHCPTR) MMHyper2HC(PVM pVM, uintptr_t Ptr)
287{
288 NOREF(pVM);
289 return (RTHCPTR)Ptr;
290}
291#endif
292
293#ifndef IN_GC
294MMDECL(RTGCPTR) MMHyper2GC(PVM pVM, uintptr_t Ptr);
295#else
296DECLINLINE(RTGCPTR) MMHyper2GC(PVM pVM, uintptr_t Ptr)
297{
298 NOREF(pVM);
299 return (RTGCPTR)Ptr;
300}
301#endif
302
303MMDECL(RTGCPTR) MMHyperHC2GC(PVM pVM, RTHCPTR HCPtr);
304MMDECL(RTHCPTR) MMHyperGC2HC(PVM pVM, RTGCPTR GCPtr);
305MMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
306MMDECL(int) MMHyperFree(PVM pVM, void *pv);
307#ifdef DEBUG
308MMDECL(void) MMHyperHeapDump(PVM pVM);
309#endif
310MMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM);
311MMDECL(size_t) MMHyperHeapGetSize(PVM pVM);
312MMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb);
313MMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr);
314
315
316MMDECL(RTHCPHYS) MMPage2Phys(PVM pVM, void *pvPage);
317MMDECL(void *) MMPagePhys2Page(PVM pVM, RTHCPHYS HCPhysPage);
318MMDECL(int) MMPagePhys2PageEx(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
319MMDECL(int) MMPagePhys2PageTry(PVM pVM, RTHCPHYS HCPhysPage, void **ppvPage);
320MMDECL(void *) MMPhysGCPhys2HCVirt(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
321
322
323/** @def MMHYPER_GC_ASSERT_GCPTR
324 * Asserts that an address is either NULL or inside the hypervisor memory area.
325 * This assertion only works while IN_GC, it's a NOP everywhere else.
326 * @thread The Emulation Thread.
327 */
328#ifdef IN_GC
329# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) Assert(MMHyperIsInsideArea((pVM), (GCPtr)) || !(GCPtr))
330#else
331# define MMHYPER_GC_ASSERT_GCPTR(pVM, GCPtr) do { } while (0)
332#endif
333
334/** @} */
335
336
337#ifdef IN_RING3
338/** @defgroup grp_mm_r3 The MM Host Context Ring-3 API
339 * @ingroup grp_mm
340 * @{
341 */
342
343MMR3DECL(int) MMR3InitUVM(PUVM pUVM);
344MMR3DECL(int) MMR3Init(PVM pVM);
345MMR3DECL(int) MMR3InitPaging(PVM pVM);
346MMR3DECL(int) MMR3HyperInitFinalize(PVM pVM);
347MMR3DECL(int) MMR3Term(PVM pVM);
348MMR3DECL(void) MMR3TermUVM(PUVM pUVM);
349MMR3DECL(void) MMR3Reset(PVM pVM);
350MMR3DECL(int) MMR3IncreaseBaseReservation(PVM pVM, uint64_t cAddBasePages);
351MMR3DECL(int) MMR3IncreaseFixedReservation(PVM pVM, uint32_t cAddFixedPages);
352MMR3DECL(int) MMR3UpdateShadowReservation(PVM pVM, uint32_t cShadowPages);
353
354MMR3DECL(int) MMR3HCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys, void **ppv);
355MMR3DECL(int) MMR3ReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
356MMR3DECL(int) MMR3WriteGCVirt(PVM pVM, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb);
357
358
359/** @defgroup grp_mm_r3_hyper Hypervisor Memory Manager (HC R3 Portion)
360 * @ingroup grp_mm_r3
361 * @{ */
362MMDECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, uint32_t uAlignment, MMTAG enmTag, void **ppv);
363MMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvHC, RTHCPHYS HCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
364MMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr);
365MMR3DECL(int) MMR3HyperMapHCRam(PVM pVM, void *pvHC, size_t cb, bool fFree, const char *pszDesc, PRTGCPTR pGCPtr);
366MMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages, const char *pszDesc, PRTGCPTR pGCPtr);
367MMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr);
368MMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvHC);
369MMR3DECL(int) MMR3HyperHCVirt2HCPhysEx(PVM pVM, void *pvHC, PRTHCPHYS pHCPhys);
370MMR3DECL(void *) MMR3HyperHCPhys2HCVirt(PVM pVM, RTHCPHYS HCPhys);
371MMR3DECL(int) MMR3HyperHCPhys2HCVirtEx(PVM pVM, RTHCPHYS HCPhys, void **ppv);
372MMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb);
373/** @} */
374
375
376/** @defgroup grp_mm_phys Guest Physical Memory Manager
377 * @ingroup grp_mm_r3
378 * @{ */
379MMR3DECL(int) MMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, const char *pszDesc);
380#ifndef VBOX_WITH_NEW_PHYS_CODE
381MMR3DECL(int) MMR3PhysRegisterEx(PVM pVM, void *pvRam, RTGCPHYS GCPhys, unsigned cb, unsigned fFlags, MMPHYSREG enmType, const char *pszDesc);
382#endif
383MMR3DECL(int) MMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, unsigned cb);
384MMR3DECL(int) MMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTUINT cbRange, const void *pvBinary, bool fShadow, const char *pszDesc);
385MMR3DECL(int) MMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange);
386MMR3DECL(int) MMR3PhysReserve(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, const char *pszDesc);
387MMR3DECL(uint64_t) MMR3PhysGetRamSize(PVM pVM);
388/** @} */
389
390
391/** @defgroup grp_mm_page Physical Page Pool
392 * @ingroup grp_mm_r3
393 * @{ */
394MMR3DECL(void *) MMR3PageAlloc(PVM pVM);
395MMR3DECL(RTHCPHYS) MMR3PageAllocPhys(PVM pVM);
396MMR3DECL(void) MMR3PageFree(PVM pVM, void *pvPage);
397MMR3DECL(void *) MMR3PageAllocLow(PVM pVM);
398MMR3DECL(void) MMR3PageFreeLow(PVM pVM, void *pvPage);
399MMR3DECL(void) MMR3PageFreeByPhys(PVM pVM, RTHCPHYS HCPhysPage);
400MMR3DECL(void *) MMR3PageDummyHCPtr(PVM pVM);
401MMR3DECL(RTHCPHYS) MMR3PageDummyHCPhys(PVM pVM);
402/** @} */
403
404
405/** @defgroup grp_mm_heap Heap Manager
406 * @ingroup grp_mm_r3
407 * @{ */
408MMR3DECL(void *) MMR3HeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize);
409MMR3DECL(void *) MMR3HeapAllocU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
410MMR3DECL(int) MMR3HeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
411MMR3DECL(int) MMR3HeapAllocExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
412MMR3DECL(void *) MMR3HeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize);
413MMR3DECL(void *) MMR3HeapAllocZU(PUVM pUVM, MMTAG enmTag, size_t cbSize);
414MMR3DECL(int) MMR3HeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv);
415MMR3DECL(int) MMR3HeapAllocZExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv);
416MMR3DECL(void *) MMR3HeapRealloc(void *pv, size_t cbNewSize);
417MMR3DECL(char *) MMR3HeapStrDup(PVM pVM, MMTAG enmTag, const char *psz);
418MMR3DECL(char *) MMR3HeapStrDupU(PUVM pUVM, MMTAG enmTag, const char *psz);
419MMR3DECL(void) MMR3HeapFree(void *pv);
420/** @} */
421
422/** @} */
423#endif /* IN_RING3 */
424
425
426
427#ifdef IN_GC
428/** @defgroup grp_mm_gc The MM Guest Context API
429 * @ingroup grp_mm
430 * @{
431 */
432
433MMGCDECL(void) MMGCRamRegisterTrapHandler(PVM pVM);
434MMGCDECL(void) MMGCRamDeregisterTrapHandler(PVM pVM);
435MMGCDECL(int) MMGCRamReadNoTrapHandler(void *pDst, void *pSrc, size_t cb);
436MMGCDECL(int) MMGCRamWriteNoTrapHandler(void *pDst, void *pSrc, size_t cb);
437MMGCDECL(int) MMGCRamRead(PVM pVM, void *pDst, void *pSrc, size_t cb);
438MMGCDECL(int) MMGCRamWrite(PVM pVM, void *pDst, void *pSrc, size_t cb);
439
440/** @} */
441#endif /* IN_GC */
442
443/** @} */
444__END_DECLS
445
446
447#endif
448
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette