VirtualBox

source: vbox/trunk/include/VBox/gmm.h@ 6168

Last change on this file since 6168 was 5999, checked in by vboxsync, 17 years ago

The Giant CDDL Dual-License Header Change.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.7 KB
Line 
1/** @file
2 * GMM - The Global Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2007 innotek GmbH
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_gmm_h
27#define ___VBox_gmm_h
28
29#include <VBox/types.h>
30#include <VBox/gvmm.h>
31#include <VBox/sup.h>
32
33__BEGIN_DECLS
34
35/** @defgroup grp_gmm GMM - The Global Memory Manager
36 * @{
37 */
38
39/** @def IN_GMM_R0
40 * Used to indicate whether we're inside the same link module as the ring 0
41 * part of the Global Memory Manager or not.
42 */
43/** @def GMMR0DECL
44 * Ring 0 GMM export or import declaration.
45 * @param type The return type of the function declaration.
46 */
47#ifdef IN_GMM_R0
48# define GMMR0DECL(type) DECLEXPORT(type) VBOXCALL
49#else
50# define GMMR0DECL(type) DECLIMPORT(type) VBOXCALL
51#endif
52
53
54/** The chunk shift. (2^20 = 1 MB) */
55#define GMM_CHUNK_SHIFT 20
56/** The allocation chunk size. */
57#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
58/** The allocation chunk size in pages. */
59#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - PAGE_SHIFT))
60/** The shift factor for converting a page id into a chunk id. */
61#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT)
62/** The last valid Chunk ID value. */
63#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
64/** The last valid Page ID value.
65 * The current limit is 2^28 - 1, or almost 1TB if you like.
66 * The constraints are currently dictated by PGMPAGE. */
67#define GMM_PAGEID_LAST (RT_BIT_32(28) - 1)
68/** Mask out the page index from the Page ID. */
69#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
70/** The NIL Chunk ID value. */
71#define NIL_GMM_CHUNKID 0
72/** The NIL Page ID value. */
73#define NIL_GMM_PAGEID 0
74
75#if 0 /* wrong - these are guest page pfns and not page ids! */
76/** Special Page ID used by unassigned pages. */
77#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
78/** Special Page ID used by unsharable pages.
79 * Like MMIO2, shadow and heap. This is for later, obviously. */
80#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
81/** The end of the valid Page IDs. This is the first special one. */
82#define GMM_PAGEID_END 0x0ffffff0U
83#endif
84
85
86/**
87 * Over-commitment policy.
88 */
89typedef enum GMMOCPOLICY
90{
91 /** The usual invalid 0 value. */
92 GMMOCPOLICY_INVALID = 0,
93 /** No over-commitment, fully backed.
94 * The GMM guarantees that it will be able to allocate all of the
95 * guest RAM for a VM with OC policy. */
96 GMMOCPOLICY_NO_OC,
97 /** to-be-determined. */
98 GMMOCPOLICY_TBD,
99 /** The end of the valid policy range. */
100 GMMOCPOLICY_END,
101 /** The usual 32-bit hack. */
102 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
103} GMMOCPOLICY;
104
105/**
106 * VM / Memory priority.
107 */
108typedef enum GMMPRIORITY
109{
110 /** The usual invalid 0 value. */
111 GMMPRIORITY_INVALID = 0,
112 /** High.
113 * When ballooning, ask these VMs last.
114 * When running out of memory, try not to interrupt these VMs. */
115 GMMPRIORITY_HIGH,
116 /** Normal.
117 * When ballooning, don't wait to ask these.
118 * When running out of memory, pause, save and/or kill these VMs. */
119 GMMPRIORITY_NORMAL,
120 /** Low.
121 * When ballooning, maximize these first.
122 * When running out of memory, save or kill these VMs. */
123 GMMPRIORITY_LOW,
124 /** The end of the valid priority range. */
125 GMMPRIORITY_END = 0,
126 /** The custom 32-bit type blowup. */
127 GMMPRIORITY_32BIT_HACK = 0x7fffffff
128} GMMPRIORITY;
129
130
131/**
132 * GMM Memory Accounts.
133 */
134typedef enum GMMACCOUNT
135{
136 /** The customary invalid zero entry. */
137 GMMACCOUNT_INVALID = 0,
138 /** Account with the base allocations. */
139 GMMACCOUNT_BASE,
140 /** Account with the shadow allocations. */
141 GMMACCOUNT_SHADOW,
142 /** Account with the fixed allocations. */
143 GMMACCOUNT_FIXED,
144 /** The end of the valid values. */
145 GMMACCOUNT_END,
146 /** The usual 32-bit value to finish it off. */
147 GMMACCOUNT_32BIT_HACK = 0x7fffffff
148} GMMACCOUNT;
149
150
151/**
152 * A page descriptor for use when freeing pages.
153 * See GMMR0FreePages, GMMR0BalloonedPages.
154 */
155typedef struct GMMFREEPAGEDESC
156{
157 /** The Page ID of the page to be freed. */
158 uint32_t idPage;
159} GMMFREEPAGEDESC;
160/** Pointer to a page descriptor for freeing pages. */
161typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
162
163
164/**
165 * A page descriptor for use when updating and allocating pages.
166 *
167 * This is a bit complicated because we want to do as much as possible
168 * with the same structure.
169 */
170typedef struct GMMPAGEDESC
171{
172 /** The physical address of the page.
173 *
174 * @input GMMR0AllocateHandyPages expects the guest physical address
175 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
176 * when appropriate and NIL_RTHCPHYS when the page wasn't used
177 * for any specific guest address.
178 *
179 * GMMR0AllocatePage expects the guest physical address to put in
180 * the GMMPAGE structure for the page it allocates for this entry.
181 * Pass NIL_RTHCPHYS and GMM_GCPHYS_UNSHAREABLE as above.
182 *
183 * @output The host physical address of the allocated page.
184 * NIL_RTHCPHYS on allocation failure.
185 *
186 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS).
187 */
188 RTHCPHYS HCPhysGCPhys;
189
190 /** The Page ID.
191 *
192 * @intput GMMR0AllocateHandyPages expects the Page ID of the page to
193 * update here. NIL_GMM_PAGEID means no page should be updated.
194 *
195 * GMMR0AllocatePages requires this to be initialized to
196 * NIL_GMM_PAGEID currently.
197 *
198 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
199 */
200 uint32_t idPage;
201
202 /** The Page ID of the shared page was replaced by this page.
203 *
204 * @input GMMR0AllocateHandyPages expects this to indicate a shared
205 * page that has been replaced by this page and should have its
206 * reference counter decremented and perhaps be freed up. Use
207 * NIL_GMM_PAGEID if no shared page was involved.
208 *
209 * All other APIs expects NIL_GMM_PAGEID here.
210 *
211 * @output All APIs sets this to NIL_GMM_PAGEID.
212 */
213 uint32_t idSharedPage;
214} GMMPAGEDESC;
215AssertCompileSize(GMMPAGEDESC, 16);
216/** Pointer to a page allocation. */
217typedef GMMPAGEDESC *PGMMPAGEDESC;
218
219/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is shared. */
220#define GMM_GCPHYS_UNSHAREABLE (RTHCPHYS)(0xfffffff0)
221
222GMMR0DECL(int) GMMR0Init(void);
223GMMR0DECL(void) GMMR0Term(void);
224GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM);
225GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM);
226GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
227 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
228GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
229GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
230GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
231GMMR0DECL(int) GMMR0FreePages(PVM pVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
232GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, uint32_t cBalloonedPages, uint32_t cPagesToFree, PGMMFREEPAGEDESC paPages, bool fCompleted);
233GMMR0DECL(int) GMMR0DeflatedBalloon(PVM pVM, uint32_t cPages);
234GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
235GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, RTR3PTR pvR3);
236
237
238/**
239 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
240 * @see GMMR0InitialReservation
241 */
242typedef struct GMMINITIALRESERVATIONREQ
243{
244 /** The header. */
245 SUPVMMR0REQHDR Hdr;
246 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
247 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
248 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
249 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
250 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
251} GMMINITIALRESERVATIONREQ;
252/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
253typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
254
255GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, PGMMINITIALRESERVATIONREQ pReq);
256
257
258/**
259 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
260 * @see GMMR0UpdateReservation
261 */
262typedef struct GMMUPDATERESERVATIONREQ
263{
264 /** The header. */
265 SUPVMMR0REQHDR Hdr;
266 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
267 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
268 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
269} GMMUPDATERESERVATIONREQ;
270/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
271typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
272
273GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, PGMMUPDATERESERVATIONREQ pReq);
274
275
276/**
277 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
278 * @see GMMR0AllocatePages.
279 */
280typedef struct GMMALLOCATEPAGESREQ
281{
282 /** The header. */
283 SUPVMMR0REQHDR Hdr;
284 /** The account to charge the allocation to. */
285 GMMACCOUNT enmAccount;
286 /** The number of pages to allocate. */
287 uint32_t cPages;
288 /** Array of page descriptors. */
289 GMMPAGEDESC aPages[1];
290} GMMALLOCATEPAGESREQ;
291/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
292typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
293
294GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
295
296
297/**
298 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
299 * @see GMMR0FreePages.
300 */
301typedef struct GMMFREEPAGESREQ
302{
303 /** The header. */
304 SUPVMMR0REQHDR Hdr;
305 /** The account this relates to. */
306 GMMACCOUNT enmAccount;
307 /** The number of pages to free. */
308 uint32_t cPages;
309 /** Array of free page descriptors. */
310 GMMFREEPAGEDESC aPages[1];
311} GMMFREEPAGESREQ;
312/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
313typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
314
315GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, PGMMFREEPAGESREQ pReq);
316
317
318/**
319 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
320 * @see GMMR0BalloonedPages.
321 */
322typedef struct GMMBALLOONEDPAGESREQ
323{
324 /** The header. */
325 SUPVMMR0REQHDR Hdr;
326 /** The number of ballooned pages. */
327 uint32_t cBalloonedPages;
328 /** The number of pages to free. */
329 uint32_t cPagesToFree;
330 /** Whether the ballooning request is completed or more pages are still to come. */
331 bool fCompleted;
332 /** Array of free page descriptors. */
333 GMMFREEPAGEDESC aPages[1];
334} GMMBALLOONEDPAGESREQ;
335/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
336typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
337
338GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, PGMMBALLOONEDPAGESREQ pReq);
339
340
341/**
342 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
343 * @see GMMR0MapUnmapChunk
344 */
345typedef struct GMMMAPUNMAPCHUNKREQ
346{
347 /** The header. */
348 SUPVMMR0REQHDR Hdr;
349 /** The chunk to map, UINT32_MAX if unmap only. (IN) */
350 uint32_t idChunkMap;
351 /** The chunk to unmap, UINT32_MAX if map only. (IN) */
352 uint32_t idChunkUnmap;
353 /** Where the mapping address is returned. (OUT) */
354 RTR3PTR pvR3;
355} GMMMAPUNMAPCHUNKREQ;
356/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
357typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
358
359GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq);
360
361
362/** @} */
363
364__END_DECLS
365
366#endif
367
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette