VirtualBox

source: vbox/trunk/include/VBox/gmm.h@ 29457

Last change on this file since 29457 was 29457, checked in by vboxsync, 15 years ago

Updates for proper log flushing

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 23.0 KB
Line 
1/** @file
2 * GMM - The Global Memory Manager. (VMM)
3 */
4
5/*
6 * Copyright (C) 2007 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_gmm_h
27#define ___VBox_gmm_h
28
29#include <VBox/types.h>
30#include <VBox/gvmm.h>
31#include <VBox/sup.h>
32#include <VBox/VMMDev.h> /* for VMMDEVSHAREDREGIONDESC */
33#include <iprt/avl.h>
34RT_C_DECLS_BEGIN
35
36/** @defgroup grp_gmm GMM - The Global Memory Manager
37 * @{
38 */
39
40/** @def IN_GMM_R0
41 * Used to indicate whether we're inside the same link module as the ring 0
42 * part of the Global Memory Manager or not.
43 */
44#ifdef DOXYGEN_RUNNING
45# define IN_GMM_R0
46#endif
47/** @def GMMR0DECL
48 * Ring 0 GMM export or import declaration.
49 * @param type The return type of the function declaration.
50 */
51#ifdef IN_GMM_R0
52# define GMMR0DECL(type) DECLEXPORT(type) VBOXCALL
53#else
54# define GMMR0DECL(type) DECLIMPORT(type) VBOXCALL
55#endif
56
57/** @def IN_GMM_R3
58 * Used to indicate whether we're inside the same link module as the ring 3
59 * part of the Global Memory Manager or not.
60 */
61#ifdef DOXYGEN_RUNNING
62# define IN_GMM_R3
63#endif
64/** @def GMMR3DECL
65 * Ring 3 GMM export or import declaration.
66 * @param type The return type of the function declaration.
67 */
68#ifdef IN_GMM_R3
69# define GMMR3DECL(type) DECLEXPORT(type) VBOXCALL
70#else
71# define GMMR3DECL(type) DECLIMPORT(type) VBOXCALL
72#endif
73
74
75/** The chunk shift. (2^21 = 2 MB) */
76#define GMM_CHUNK_SHIFT 21
77/** The allocation chunk size. */
78#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
79/** The allocation chunk size in pages. */
80#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - PAGE_SHIFT))
81/** The shift factor for converting a page id into a chunk id. */
82#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT)
83/** The last valid Chunk ID value. */
84#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
85/** The last valid Page ID value.
86 * The current limit is 2^28 - 1, or almost 1TB if you like.
87 * The constraints are currently dictated by PGMPAGE. */
88#define GMM_PAGEID_LAST (RT_BIT_32(28) - 1)
89/** Mask out the page index from the Page ID. */
90#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
91/** The NIL Chunk ID value. */
92#define NIL_GMM_CHUNKID 0
93/** The NIL Page ID value. */
94#define NIL_GMM_PAGEID 0
95
96#if 0 /* wrong - these are guest page pfns and not page ids! */
97/** Special Page ID used by unassigned pages. */
98#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
99/** Special Page ID used by unsharable pages.
100 * Like MMIO2, shadow and heap. This is for later, obviously. */
101#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
102/** The end of the valid Page IDs. This is the first special one. */
103#define GMM_PAGEID_END 0x0ffffff0U
104#endif
105
106
107/** @def GMM_GCPHYS_LAST
108 * The last of the valid guest physical address as it applies to GMM pages.
109 *
110 * This must reflect the constraints imposed by the RTGCPHYS type and
111 * the guest page frame number used internally in GMMPAGE.
112 *
113 * @note Note this corresponds to GMM_PAGE_PFN_LAST. */
114#if HC_ARCH_BITS == 64
115# define GMM_GCPHYS_LAST UINT64_C(0x00000fffffff0000) /* 2^44 (16TB) - 0x10000 */
116#else
117# define GMM_GCPHYS_LAST UINT64_C(0x0000000fffff0000) /* 2^36 (64GB) - 0x10000 */
118#endif
119
120/**
121 * Over-commitment policy.
122 */
123typedef enum GMMOCPOLICY
124{
125 /** The usual invalid 0 value. */
126 GMMOCPOLICY_INVALID = 0,
127 /** No over-commitment, fully backed.
128 * The GMM guarantees that it will be able to allocate all of the
129 * guest RAM for a VM with OC policy. */
130 GMMOCPOLICY_NO_OC,
131 /** to-be-determined. */
132 GMMOCPOLICY_TBD,
133 /** The end of the valid policy range. */
134 GMMOCPOLICY_END,
135 /** The usual 32-bit hack. */
136 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
137} GMMOCPOLICY;
138
139/**
140 * VM / Memory priority.
141 */
142typedef enum GMMPRIORITY
143{
144 /** The usual invalid 0 value. */
145 GMMPRIORITY_INVALID = 0,
146 /** High.
147 * When ballooning, ask these VMs last.
148 * When running out of memory, try not to interrupt these VMs. */
149 GMMPRIORITY_HIGH,
150 /** Normal.
151 * When ballooning, don't wait to ask these.
152 * When running out of memory, pause, save and/or kill these VMs. */
153 GMMPRIORITY_NORMAL,
154 /** Low.
155 * When ballooning, maximize these first.
156 * When running out of memory, save or kill these VMs. */
157 GMMPRIORITY_LOW,
158 /** The end of the valid priority range. */
159 GMMPRIORITY_END,
160 /** The custom 32-bit type blowup. */
161 GMMPRIORITY_32BIT_HACK = 0x7fffffff
162} GMMPRIORITY;
163
164
165/**
166 * GMM Memory Accounts.
167 */
168typedef enum GMMACCOUNT
169{
170 /** The customary invalid zero entry. */
171 GMMACCOUNT_INVALID = 0,
172 /** Account with the base allocations. */
173 GMMACCOUNT_BASE,
174 /** Account with the shadow allocations. */
175 GMMACCOUNT_SHADOW,
176 /** Account with the fixed allocations. */
177 GMMACCOUNT_FIXED,
178 /** The end of the valid values. */
179 GMMACCOUNT_END,
180 /** The usual 32-bit value to finish it off. */
181 GMMACCOUNT_32BIT_HACK = 0x7fffffff
182} GMMACCOUNT;
183
184
185/**
186 * Balloon actions.
187 */
188typedef enum
189{
190 /** Invalid zero entry. */
191 GMMBALLOONACTION_INVALID = 0,
192 /** Inflate the balloon. */
193 GMMBALLOONACTION_INFLATE,
194 /** Deflate the balloon. */
195 GMMBALLOONACTION_DEFLATE,
196 /** Puncture the balloon because of VM reset. */
197 GMMBALLOONACTION_RESET,
198 /** End of the valid actions. */
199 GMMBALLOONACTION_END,
200 /** hack forcing the size of the enum to 32-bits. */
201 GMMBALLOONACTION_MAKE_32BIT_HACK = 0x7fffffff
202} GMMBALLOONACTION;
203
204
205/**
206 * A page descriptor for use when freeing pages.
207 * See GMMR0FreePages, GMMR0BalloonedPages.
208 */
209typedef struct GMMFREEPAGEDESC
210{
211 /** The Page ID of the page to be freed. */
212 uint32_t idPage;
213} GMMFREEPAGEDESC;
214/** Pointer to a page descriptor for freeing pages. */
215typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
216
217
218/**
219 * A page descriptor for use when updating and allocating pages.
220 *
221 * This is a bit complicated because we want to do as much as possible
222 * with the same structure.
223 */
224typedef struct GMMPAGEDESC
225{
226 /** The physical address of the page.
227 *
228 * @input GMMR0AllocateHandyPages expects the guest physical address
229 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
230 * when appropriate and NIL_RTHCPHYS when the page wasn't used
231 * for any specific guest address.
232 *
233 * GMMR0AllocatePage expects the guest physical address to put in
234 * the GMMPAGE structure for the page it allocates for this entry.
235 * Pass NIL_RTHCPHYS and GMM_GCPHYS_UNSHAREABLE as above.
236 *
237 * @output The host physical address of the allocated page.
238 * NIL_RTHCPHYS on allocation failure.
239 *
240 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS).
241 */
242 RTHCPHYS HCPhysGCPhys;
243
244 /** The Page ID.
245 *
246 * @intput GMMR0AllocateHandyPages expects the Page ID of the page to
247 * update here. NIL_GMM_PAGEID means no page should be updated.
248 *
249 * GMMR0AllocatePages requires this to be initialized to
250 * NIL_GMM_PAGEID currently.
251 *
252 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
253 */
254 uint32_t idPage;
255
256 /** The Page ID of the shared page was replaced by this page.
257 *
258 * @input GMMR0AllocateHandyPages expects this to indicate a shared
259 * page that has been replaced by this page and should have its
260 * reference counter decremented and perhaps be freed up. Use
261 * NIL_GMM_PAGEID if no shared page was involved.
262 *
263 * All other APIs expects NIL_GMM_PAGEID here.
264 *
265 * @output All APIs sets this to NIL_GMM_PAGEID.
266 */
267 uint32_t idSharedPage;
268} GMMPAGEDESC;
269AssertCompileSize(GMMPAGEDESC, 16);
270/** Pointer to a page allocation. */
271typedef GMMPAGEDESC *PGMMPAGEDESC;
272
273/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is unsharable.
274 * @note This corresponds to GMM_PAGE_PFN_UNSHAREABLE. */
275#if HC_ARCH_BITS == 64
276# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x00000fffffff1000)
277#else
278# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x0000000fffff1000)
279#endif
280
281
282GMMR0DECL(int) GMMR0Init(void);
283GMMR0DECL(void) GMMR0Term(void);
284GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM);
285GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM);
286GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
287 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
288GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
289GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
290GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
291GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys);
292GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
293GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage);
294GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
295GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
296GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3);
297GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule, unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions);
298GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule);
299GMMR0DECL(int) GMMR0UnregisterAllSharedModules(PVM pVM, VMCPUID idCpu);
300GMMR0DECL(int) GMMR0CheckSharedModules(PVM pVM, PVMCPU pVCpu);
301GMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu);
302
303
304
305/**
306 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
307 * @see GMMR0InitialReservation
308 */
309typedef struct GMMINITIALRESERVATIONREQ
310{
311 /** The header. */
312 SUPVMMR0REQHDR Hdr;
313 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
314 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
315 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
316 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
317 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
318} GMMINITIALRESERVATIONREQ;
319/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
320typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
321
322GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq);
323
324
325/**
326 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
327 * @see GMMR0UpdateReservation
328 */
329typedef struct GMMUPDATERESERVATIONREQ
330{
331 /** The header. */
332 SUPVMMR0REQHDR Hdr;
333 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
334 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
335 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
336} GMMUPDATERESERVATIONREQ;
337/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
338typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
339
340GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq);
341
342
343/**
344 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
345 * @see GMMR0AllocatePages.
346 */
347typedef struct GMMALLOCATEPAGESREQ
348{
349 /** The header. */
350 SUPVMMR0REQHDR Hdr;
351 /** The account to charge the allocation to. */
352 GMMACCOUNT enmAccount;
353 /** The number of pages to allocate. */
354 uint32_t cPages;
355 /** Array of page descriptors. */
356 GMMPAGEDESC aPages[1];
357} GMMALLOCATEPAGESREQ;
358/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
359typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
360
361GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq);
362
363
364/**
365 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
366 * @see GMMR0FreePages.
367 */
368typedef struct GMMFREEPAGESREQ
369{
370 /** The header. */
371 SUPVMMR0REQHDR Hdr;
372 /** The account this relates to. */
373 GMMACCOUNT enmAccount;
374 /** The number of pages to free. */
375 uint32_t cPages;
376 /** Array of free page descriptors. */
377 GMMFREEPAGEDESC aPages[1];
378} GMMFREEPAGESREQ;
379/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
380typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
381
382GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq);
383
384/**
385 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
386 * @see GMMR0BalloonedPages.
387 */
388typedef struct GMMBALLOONEDPAGESREQ
389{
390 /** The header. */
391 SUPVMMR0REQHDR Hdr;
392 /** The number of ballooned pages. */
393 uint32_t cBalloonedPages;
394 /** Inflate or deflate the balloon. */
395 GMMBALLOONACTION enmAction;
396} GMMBALLOONEDPAGESREQ;
397/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
398typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
399
400GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq);
401
402
403/**
404 * Request buffer for GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_VMM_MEM_STATS.
405 * @see GMMR0QueryHypervisorMemoryStatsReq.
406 */
407typedef struct GMMMEMSTATSREQ
408{
409 /** The header. */
410 SUPVMMR0REQHDR Hdr;
411 /** The number of allocated pages (out). */
412 uint64_t cAllocPages;
413 /** The number of free pages (out). */
414 uint64_t cFreePages;
415 /** The number of ballooned pages (out). */
416 uint64_t cBalloonedPages;
417 /** Maximum nr of pages (out). */
418 uint64_t cMaxPages;
419} GMMMEMSTATSREQ;
420/** Pointer to a GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS request buffer. */
421typedef GMMMEMSTATSREQ *PGMMMEMSTATSREQ;
422
423GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq);
424GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq);
425
426/**
427 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
428 * @see GMMR0MapUnmapChunk
429 */
430typedef struct GMMMAPUNMAPCHUNKREQ
431{
432 /** The header. */
433 SUPVMMR0REQHDR Hdr;
434 /** The chunk to map, NIL_GMM_CHUNKID if unmap only. (IN) */
435 uint32_t idChunkMap;
436 /** The chunk to unmap, NIL_GMM_CHUNKID if map only. (IN) */
437 uint32_t idChunkUnmap;
438 /** Where the mapping address is returned. (OUT) */
439 RTR3PTR pvR3;
440} GMMMAPUNMAPCHUNKREQ;
441/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
442typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
443
444GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, VMCPUID idCpu, PGMMMAPUNMAPCHUNKREQ pReq);
445
446
447/**
448 * Request buffer for GMMR0FreeLargePageReq / VMMR0_DO_GMM_FREE_LARGE_PAGE.
449 * @see GMMR0FreeLargePage.
450 */
451typedef struct GMMFREELARGEPAGEREQ
452{
453 /** The header. */
454 SUPVMMR0REQHDR Hdr;
455 /** The Page ID. */
456 uint32_t idPage;
457} GMMFREELARGEPAGEREQ;
458/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
459typedef GMMFREELARGEPAGEREQ *PGMMFREELARGEPAGEREQ;
460
461GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq);
462
463/** Maximum length of the shared module name string. */
464#define GMM_SHARED_MODULE_MAX_NAME_STRING 128
465/** Maximum length of the shared module version string. */
466#define GMM_SHARED_MODULE_MAX_VERSION_STRING 16
467
468/**
469 * Request buffer for GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE.
470 * @see GMMR0RegisterSharedModule.
471 */
472typedef struct GMMREGISTERSHAREDMODULEREQ
473{
474 /** The header. */
475 SUPVMMR0REQHDR Hdr;
476 /** Shared module size. */
477 uint32_t cbModule;
478 /** Number of included region descriptors */
479 uint32_t cRegions;
480 /** Base address of the shared module. */
481 RTGCPTR64 GCBaseAddr;
482 /** Guest OS type. */
483 VBOXOSFAMILY enmGuestOS;
484 /** Alignment. */
485 uint32_t u32Alignment;
486 /** Module name */
487 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
488 /** Module version */
489 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
490 /** Shared region descriptor(s). */
491 VMMDEVSHAREDREGIONDESC aRegions[1];
492} GMMREGISTERSHAREDMODULEREQ;
493/** Pointer to a GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE request buffer. */
494typedef GMMREGISTERSHAREDMODULEREQ *PGMMREGISTERSHAREDMODULEREQ;
495
496GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq);
497
498/**
499 * Shared region descriptor
500 */
501typedef struct GMMSHAREDREGIONDESC
502{
503 /** Region base address. */
504 RTGCPTR64 GCRegionAddr;
505 /** Region size. */
506 uint32_t cbRegion;
507 /** Alignment. */
508 uint32_t u32Alignment;
509 /** Pointer to physical page id array. */
510 uint32_t *paHCPhysPageID;
511} GMMSHAREDREGIONDESC;
512/** Pointer to a GMMSHAREDREGIONDESC. */
513typedef GMMSHAREDREGIONDESC *PGMMSHAREDREGIONDESC;
514
515
516/**
517 * Shared module registration info (global)
518 */
519typedef struct GMMSHAREDMODULE
520{
521 /* Tree node. */
522 AVLGCPTRNODECORE Core;
523 /** Shared module size. */
524 uint32_t cbModule;
525 /** Number of included region descriptors */
526 uint32_t cRegions;
527 /** Number of users (VMs). */
528 uint32_t cUsers;
529 /** Guest OS family type. */
530 VBOXOSFAMILY enmGuestOS;
531 /** Module name */
532 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
533 /** Module version */
534 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
535 /** Shared region descriptor(s). */
536 GMMSHAREDREGIONDESC aRegions[1];
537} GMMSHAREDMODULE;
538/** Pointer to a GMMSHAREDMODULE. */
539typedef GMMSHAREDMODULE *PGMMSHAREDMODULE;
540
541/**
542 * Page descriptor for GMMR0SharedModuleCheckRange
543 */
544typedef struct GMMSHAREDPAGEDESC
545{
546 /** HC Physical address (in/out) */
547 RTHCPHYS HCPhys;
548 /** GC Physical address (in) */
549 RTGCPHYS GCPhys;
550 /** GMM page id. (in/out) */
551 uint32_t uHCPhysPageId;
552 /** Align at 8 byte boundary. */
553 uint32_t uAlignment;
554} GMMSHAREDPAGEDESC;
555/** Pointer to a GMMSHAREDPAGEDESC. */
556typedef GMMSHAREDPAGEDESC *PGMMSHAREDPAGEDESC;
557
558GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc);
559
560/**
561 * Request buffer for GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE.
562 * @see GMMR0UnregisterSharedModule.
563 */
564typedef struct GMMUNREGISTERSHAREDMODULEREQ
565{
566 /** The header. */
567 SUPVMMR0REQHDR Hdr;
568 /** Shared module size. */
569 uint32_t cbModule;
570 /** Align at 8 byte boundary. */
571 uint32_t u32Alignment;
572 /** Base address of the shared module. */
573 RTGCPTR64 GCBaseAddr;
574 /** Module name */
575 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
576 /** Module version */
577 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
578} GMMUNREGISTERSHAREDMODULEREQ;
579/** Pointer to a GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE request buffer. */
580typedef GMMUNREGISTERSHAREDMODULEREQ *PGMMUNREGISTERSHAREDMODULEREQ;
581
582GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq);
583
584
585#ifdef IN_RING3
586/** @defgroup grp_gmm_r3 The Global Memory Manager Ring-3 API Wrappers
587 * @ingroup grp_gmm
588 * @{
589 */
590GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
591 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
592GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
593GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
594GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
595GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq);
596GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
597GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount);
598GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages);
599GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq);
600GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq);
601GMMR3DECL(int) GMMR3AllocateLargePage(PVM pVM, uint32_t cbPage);
602GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage);
603GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
604GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);
605GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages);
606GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages);
607GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
608GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq);
609GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq);
610GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM);
611GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM);
612/** @} */
613#endif /* IN_RING3 */
614
615/** @} */
616
617RT_C_DECLS_END
618
619#endif
620
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette