VirtualBox

source: vbox/trunk/include/VBox/vmm/gmm.h@ 55461

Last change on this file since 55461 was 53615, checked in by vboxsync, 10 years ago

doxygen fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 29.9 KB
Line 
1/** @file
2 * GMM - The Global Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2007-2015 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef ___VBox_vmm_gmm_h
27#define ___VBox_vmm_gmm_h
28
29#include <VBox/vmm/gvmm.h>
30#include <VBox/sup.h>
31#include <VBox/param.h>
32#include <VBox/ostypes.h>
33#include <VBox/VMMDev.h>
34#include <iprt/avl.h>
35
36
37RT_C_DECLS_BEGIN
38
39/** @defgroup grp_gmm GMM - The Global Memory Manager
40 * @{
41 */
42
43/** @def IN_GMM_R0
44 * Used to indicate whether we're inside the same link module as the ring 0
45 * part of the Global Memory Manager or not.
46 */
47#ifdef DOXYGEN_RUNNING
48# define IN_GMM_R0
49#endif
50/** @def GMMR0DECL
51 * Ring 0 GMM export or import declaration.
52 * @param type The return type of the function declaration.
53 */
54#ifdef IN_GMM_R0
55# define GMMR0DECL(type) DECLEXPORT(type) VBOXCALL
56#else
57# define GMMR0DECL(type) DECLIMPORT(type) VBOXCALL
58#endif
59
60/** @def IN_GMM_R3
61 * Used to indicate whether we're inside the same link module as the ring 3
62 * part of the Global Memory Manager or not.
63 */
64#ifdef DOXYGEN_RUNNING
65# define IN_GMM_R3
66#endif
67/** @def GMMR3DECL
68 * Ring 3 GMM export or import declaration.
69 * @param type The return type of the function declaration.
70 */
71#ifdef IN_GMM_R3
72# define GMMR3DECL(type) DECLEXPORT(type) VBOXCALL
73#else
74# define GMMR3DECL(type) DECLIMPORT(type) VBOXCALL
75#endif
76
77
78/** The chunk shift. (2^21 = 2 MB) */
79#define GMM_CHUNK_SHIFT 21
80/** The allocation chunk size. */
81#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
82/** The allocation chunk size in pages. */
83#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - PAGE_SHIFT))
84/** The shift factor for converting a page id into a chunk id. */
85#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT)
86/** The last valid Chunk ID value. */
87#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
88/** The last valid Page ID value.
89 * The current limit is 2^28 - 1, or almost 1TB if you like.
90 * The constraints are currently dictated by PGMPAGE. */
91#define GMM_PAGEID_LAST (RT_BIT_32(28) - 1)
92/** Mask out the page index from the Page ID. */
93#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
94/** The NIL Chunk ID value. */
95#define NIL_GMM_CHUNKID 0
96/** The NIL Page ID value. */
97#define NIL_GMM_PAGEID 0
98
99#if 0 /* wrong - these are guest page pfns and not page ids! */
100/** Special Page ID used by unassigned pages. */
101#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
102/** Special Page ID used by unsharable pages.
103 * Like MMIO2, shadow and heap. This is for later, obviously. */
104#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
105/** The end of the valid Page IDs. This is the first special one. */
106#define GMM_PAGEID_END 0x0ffffff0U
107#endif
108
109
110/** @def GMM_GCPHYS_LAST
111 * The last of the valid guest physical address as it applies to GMM pages.
112 *
113 * This must reflect the constraints imposed by the RTGCPHYS type and
114 * the guest page frame number used internally in GMMPAGE.
115 *
116 * @note Note this corresponds to GMM_PAGE_PFN_LAST. */
117#if HC_ARCH_BITS == 64
118# define GMM_GCPHYS_LAST UINT64_C(0x00000fffffff0000) /* 2^44 (16TB) - 0x10000 */
119#else
120# define GMM_GCPHYS_LAST UINT64_C(0x0000000fffff0000) /* 2^36 (64GB) - 0x10000 */
121#endif
122
123/**
124 * Over-commitment policy.
125 */
126typedef enum GMMOCPOLICY
127{
128 /** The usual invalid 0 value. */
129 GMMOCPOLICY_INVALID = 0,
130 /** No over-commitment, fully backed.
131 * The GMM guarantees that it will be able to allocate all of the
132 * guest RAM for a VM with OC policy. */
133 GMMOCPOLICY_NO_OC,
134 /** to-be-determined. */
135 GMMOCPOLICY_TBD,
136 /** The end of the valid policy range. */
137 GMMOCPOLICY_END,
138 /** The usual 32-bit hack. */
139 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
140} GMMOCPOLICY;
141
142/**
143 * VM / Memory priority.
144 */
145typedef enum GMMPRIORITY
146{
147 /** The usual invalid 0 value. */
148 GMMPRIORITY_INVALID = 0,
149 /** High.
150 * When ballooning, ask these VMs last.
151 * When running out of memory, try not to interrupt these VMs. */
152 GMMPRIORITY_HIGH,
153 /** Normal.
154 * When ballooning, don't wait to ask these.
155 * When running out of memory, pause, save and/or kill these VMs. */
156 GMMPRIORITY_NORMAL,
157 /** Low.
158 * When ballooning, maximize these first.
159 * When running out of memory, save or kill these VMs. */
160 GMMPRIORITY_LOW,
161 /** The end of the valid priority range. */
162 GMMPRIORITY_END,
163 /** The custom 32-bit type blowup. */
164 GMMPRIORITY_32BIT_HACK = 0x7fffffff
165} GMMPRIORITY;
166
167
168/**
169 * GMM Memory Accounts.
170 */
171typedef enum GMMACCOUNT
172{
173 /** The customary invalid zero entry. */
174 GMMACCOUNT_INVALID = 0,
175 /** Account with the base allocations. */
176 GMMACCOUNT_BASE,
177 /** Account with the shadow allocations. */
178 GMMACCOUNT_SHADOW,
179 /** Account with the fixed allocations. */
180 GMMACCOUNT_FIXED,
181 /** The end of the valid values. */
182 GMMACCOUNT_END,
183 /** The usual 32-bit value to finish it off. */
184 GMMACCOUNT_32BIT_HACK = 0x7fffffff
185} GMMACCOUNT;
186
187
188/**
189 * Balloon actions.
190 */
191typedef enum
192{
193 /** Invalid zero entry. */
194 GMMBALLOONACTION_INVALID = 0,
195 /** Inflate the balloon. */
196 GMMBALLOONACTION_INFLATE,
197 /** Deflate the balloon. */
198 GMMBALLOONACTION_DEFLATE,
199 /** Puncture the balloon because of VM reset. */
200 GMMBALLOONACTION_RESET,
201 /** End of the valid actions. */
202 GMMBALLOONACTION_END,
203 /** hack forcing the size of the enum to 32-bits. */
204 GMMBALLOONACTION_MAKE_32BIT_HACK = 0x7fffffff
205} GMMBALLOONACTION;
206
207
208/**
209 * A page descriptor for use when freeing pages.
210 * See GMMR0FreePages, GMMR0BalloonedPages.
211 */
212typedef struct GMMFREEPAGEDESC
213{
214 /** The Page ID of the page to be freed. */
215 uint32_t idPage;
216} GMMFREEPAGEDESC;
217/** Pointer to a page descriptor for freeing pages. */
218typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
219
220
221/**
222 * A page descriptor for use when updating and allocating pages.
223 *
224 * This is a bit complicated because we want to do as much as possible
225 * with the same structure.
226 */
227typedef struct GMMPAGEDESC
228{
229 /** The physical address of the page.
230 *
231 * @input GMMR0AllocateHandyPages expects the guest physical address
232 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
233 * when appropriate and NIL_RTHCPHYS when the page wasn't used
234 * for any specific guest address.
235 *
236 * GMMR0AllocatePage expects the guest physical address to put in
237 * the GMMPAGE structure for the page it allocates for this entry.
238 * Pass NIL_RTHCPHYS and GMM_GCPHYS_UNSHAREABLE as above.
239 *
240 * @output The host physical address of the allocated page.
241 * NIL_RTHCPHYS on allocation failure.
242 *
243 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS).
244 */
245 RTHCPHYS HCPhysGCPhys;
246
247 /** The Page ID.
248 *
249 * @intput GMMR0AllocateHandyPages expects the Page ID of the page to
250 * update here. NIL_GMM_PAGEID means no page should be updated.
251 *
252 * GMMR0AllocatePages requires this to be initialized to
253 * NIL_GMM_PAGEID currently.
254 *
255 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
256 */
257 uint32_t idPage;
258
259 /** The Page ID of the shared page was replaced by this page.
260 *
261 * @input GMMR0AllocateHandyPages expects this to indicate a shared
262 * page that has been replaced by this page and should have its
263 * reference counter decremented and perhaps be freed up. Use
264 * NIL_GMM_PAGEID if no shared page was involved.
265 *
266 * All other APIs expects NIL_GMM_PAGEID here.
267 *
268 * @output All APIs sets this to NIL_GMM_PAGEID.
269 */
270 uint32_t idSharedPage;
271} GMMPAGEDESC;
272AssertCompileSize(GMMPAGEDESC, 16);
273/** Pointer to a page allocation. */
274typedef GMMPAGEDESC *PGMMPAGEDESC;
275
276/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is unsharable.
277 * @note This corresponds to GMM_PAGE_PFN_UNSHAREABLE. */
278#if HC_ARCH_BITS == 64
279# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x00000fffffff1000)
280#else
281# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x0000000fffff1000)
282#endif
283
284
285/**
286 * The allocation sizes.
287 */
288typedef struct GMMVMSIZES
289{
290 /** The number of pages of base memory.
291 * This is the sum of RAM, ROMs and handy pages. */
292 uint64_t cBasePages;
293 /** The number of pages for the shadow pool. (Can be squeezed for memory.) */
294 uint32_t cShadowPages;
295 /** The number of pages for fixed allocations like MMIO2 and the hyper heap. */
296 uint32_t cFixedPages;
297} GMMVMSIZES;
298/** Pointer to a GMMVMSIZES. */
299typedef GMMVMSIZES *PGMMVMSIZES;
300
301
302/**
303 * GMM VM statistics.
304 */
305typedef struct GMMVMSTATS
306{
307 /** The reservations. */
308 GMMVMSIZES Reserved;
309 /** The actual allocations.
310 * This includes both private and shared page allocations. */
311 GMMVMSIZES Allocated;
312
313 /** The current number of private pages. */
314 uint64_t cPrivatePages;
315 /** The current number of shared pages. */
316 uint64_t cSharedPages;
317 /** The current number of ballooned pages. */
318 uint64_t cBalloonedPages;
319 /** The max number of pages that can be ballooned. */
320 uint64_t cMaxBalloonedPages;
321 /** The number of pages we've currently requested the guest to give us.
322 * This is 0 if no pages currently requested. */
323 uint64_t cReqBalloonedPages;
324 /** The number of pages the guest has given us in response to the request.
325 * This is not reset on request completed and may be used in later decisions. */
326 uint64_t cReqActuallyBalloonedPages;
327 /** The number of pages we've currently requested the guest to take back. */
328 uint64_t cReqDeflatePages;
329 /** The number of shareable module tracked by this VM. */
330 uint32_t cShareableModules;
331
332 /** The current over-commitment policy. */
333 GMMOCPOLICY enmPolicy;
334 /** The VM priority for arbitrating VMs in low and out of memory situation.
335 * Like which VMs to start squeezing first. */
336 GMMPRIORITY enmPriority;
337 /** Whether ballooning is enabled or not. */
338 bool fBallooningEnabled;
339 /** Whether shared paging is enabled or not. */
340 bool fSharedPagingEnabled;
341 /** Whether the VM is allowed to allocate memory or not.
342 * This is used when the reservation update request fails or when the VM has
343 * been told to suspend/save/die in an out-of-memory case. */
344 bool fMayAllocate;
345 /** Explicit alignment. */
346 bool afReserved[1];
347
348
349} GMMVMSTATS;
350
351
352/**
353 * The GMM statistics.
354 */
355typedef struct GMMSTATS
356{
357 /** The maximum number of pages we're allowed to allocate
358 * (GMM::cMaxPages). */
359 uint64_t cMaxPages;
360 /** The number of pages that has been reserved (GMM::cReservedPages). */
361 uint64_t cReservedPages;
362 /** The number of pages that we have over-committed in reservations
363 * (GMM::cOverCommittedPages). */
364 uint64_t cOverCommittedPages;
365 /** The number of actually allocated (committed if you like) pages
366 * (GMM::cAllocatedPages). */
367 uint64_t cAllocatedPages;
368 /** The number of pages that are shared. A subset of cAllocatedPages.
369 * (GMM::cSharedPages) */
370 uint64_t cSharedPages;
371 /** The number of pages that are actually shared between VMs.
372 * (GMM:cDuplicatePages) */
373 uint64_t cDuplicatePages;
374 /** The number of pages that are shared that has been left behind by
375 * VMs not doing proper cleanups (GMM::cLeftBehindSharedPages). */
376 uint64_t cLeftBehindSharedPages;
377 /** The number of current ballooned pages (GMM::cBalloonedPages). */
378 uint64_t cBalloonedPages;
379 /** The number of allocation chunks (GMM::cChunks). */
380 uint32_t cChunks;
381 /** The number of freed chunks ever (GMM::cFreedChunks). */
382 uint32_t cFreedChunks;
383 /** The number of shareable modules (GMM:cShareableModules). */
384 uint64_t cShareableModules;
385 /** Space reserved for later. */
386 uint64_t au64Reserved[2];
387
388 /** Statistics for the specified VM. (Zero filled if not requested.) */
389 GMMVMSTATS VMStats;
390} GMMSTATS;
391/** Pointer to the GMM statistics. */
392typedef GMMSTATS *PGMMSTATS;
393/** Const pointer to the GMM statistics. */
394typedef const GMMSTATS *PCGMMSTATS;
395
396
397GMMR0DECL(int) GMMR0Init(void);
398GMMR0DECL(void) GMMR0Term(void);
399GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM);
400GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM);
401GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
402 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
403GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
404GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
405GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
406GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys);
407GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
408GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage);
409GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
410GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
411GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3);
412GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion,
413 RTGCPTR GCBaseAddr, uint32_t cbModule, uint32_t cRegions,
414 struct VMMDEVSHAREDREGIONDESC const *paRegions);
415GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule);
416GMMR0DECL(int) GMMR0UnregisterAllSharedModules(PVM pVM, VMCPUID idCpu);
417GMMR0DECL(int) GMMR0CheckSharedModules(PVM pVM, PVMCPU pVCpu);
418GMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu);
419GMMR0DECL(int) GMMR0CheckSharedModulesStart(PVM pVM);
420GMMR0DECL(int) GMMR0CheckSharedModulesEnd(PVM pVM);
421GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession);
422GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession);
423
424/**
425 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
426 * @see GMMR0InitialReservation
427 */
428typedef struct GMMINITIALRESERVATIONREQ
429{
430 /** The header. */
431 SUPVMMR0REQHDR Hdr;
432 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
433 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
434 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
435 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
436 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
437} GMMINITIALRESERVATIONREQ;
438/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
439typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
440
441GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq);
442
443
444/**
445 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
446 * @see GMMR0UpdateReservation
447 */
448typedef struct GMMUPDATERESERVATIONREQ
449{
450 /** The header. */
451 SUPVMMR0REQHDR Hdr;
452 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
453 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
454 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
455} GMMUPDATERESERVATIONREQ;
456/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
457typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
458
459GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq);
460
461
462/**
463 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
464 * @see GMMR0AllocatePages.
465 */
466typedef struct GMMALLOCATEPAGESREQ
467{
468 /** The header. */
469 SUPVMMR0REQHDR Hdr;
470 /** The account to charge the allocation to. */
471 GMMACCOUNT enmAccount;
472 /** The number of pages to allocate. */
473 uint32_t cPages;
474 /** Array of page descriptors. */
475 GMMPAGEDESC aPages[1];
476} GMMALLOCATEPAGESREQ;
477/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
478typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
479
480GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq);
481
482
483/**
484 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
485 * @see GMMR0FreePages.
486 */
487typedef struct GMMFREEPAGESREQ
488{
489 /** The header. */
490 SUPVMMR0REQHDR Hdr;
491 /** The account this relates to. */
492 GMMACCOUNT enmAccount;
493 /** The number of pages to free. */
494 uint32_t cPages;
495 /** Array of free page descriptors. */
496 GMMFREEPAGEDESC aPages[1];
497} GMMFREEPAGESREQ;
498/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
499typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
500
501GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq);
502
503/**
504 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
505 * @see GMMR0BalloonedPages.
506 */
507typedef struct GMMBALLOONEDPAGESREQ
508{
509 /** The header. */
510 SUPVMMR0REQHDR Hdr;
511 /** The number of ballooned pages. */
512 uint32_t cBalloonedPages;
513 /** Inflate or deflate the balloon. */
514 GMMBALLOONACTION enmAction;
515} GMMBALLOONEDPAGESREQ;
516/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
517typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
518
519GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq);
520
521
522/**
523 * Request buffer for GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_VMM_MEM_STATS.
524 * @see GMMR0QueryHypervisorMemoryStatsReq.
525 */
526typedef struct GMMMEMSTATSREQ
527{
528 /** The header. */
529 SUPVMMR0REQHDR Hdr;
530 /** The number of allocated pages (out). */
531 uint64_t cAllocPages;
532 /** The number of free pages (out). */
533 uint64_t cFreePages;
534 /** The number of ballooned pages (out). */
535 uint64_t cBalloonedPages;
536 /** The number of shared pages (out). */
537 uint64_t cSharedPages;
538 /** Maximum nr of pages (out). */
539 uint64_t cMaxPages;
540} GMMMEMSTATSREQ;
541/** Pointer to a GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS request buffer. */
542typedef GMMMEMSTATSREQ *PGMMMEMSTATSREQ;
543
544GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq);
545GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq);
546
547/**
548 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
549 * @see GMMR0MapUnmapChunk
550 */
551typedef struct GMMMAPUNMAPCHUNKREQ
552{
553 /** The header. */
554 SUPVMMR0REQHDR Hdr;
555 /** The chunk to map, NIL_GMM_CHUNKID if unmap only. (IN) */
556 uint32_t idChunkMap;
557 /** The chunk to unmap, NIL_GMM_CHUNKID if map only. (IN) */
558 uint32_t idChunkUnmap;
559 /** Where the mapping address is returned. (OUT) */
560 RTR3PTR pvR3;
561} GMMMAPUNMAPCHUNKREQ;
562/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
563typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
564
565GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq);
566
567
568/**
569 * Request buffer for GMMR0FreeLargePageReq / VMMR0_DO_GMM_FREE_LARGE_PAGE.
570 * @see GMMR0FreeLargePage.
571 */
572typedef struct GMMFREELARGEPAGEREQ
573{
574 /** The header. */
575 SUPVMMR0REQHDR Hdr;
576 /** The Page ID. */
577 uint32_t idPage;
578} GMMFREELARGEPAGEREQ;
579/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
580typedef GMMFREELARGEPAGEREQ *PGMMFREELARGEPAGEREQ;
581
582GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq);
583
584/** Maximum length of the shared module name string, terminator included. */
585#define GMM_SHARED_MODULE_MAX_NAME_STRING 128
586/** Maximum length of the shared module version string, terminator included. */
587#define GMM_SHARED_MODULE_MAX_VERSION_STRING 16
588
589/**
590 * Request buffer for GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE.
591 * @see GMMR0RegisterSharedModule.
592 */
593typedef struct GMMREGISTERSHAREDMODULEREQ
594{
595 /** The header. */
596 SUPVMMR0REQHDR Hdr;
597 /** Shared module size. */
598 uint32_t cbModule;
599 /** Number of included region descriptors */
600 uint32_t cRegions;
601 /** Base address of the shared module. */
602 RTGCPTR64 GCBaseAddr;
603 /** Guest OS type. */
604 VBOXOSFAMILY enmGuestOS;
605 /** return code. */
606 uint32_t rc;
607 /** Module name */
608 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
609 /** Module version */
610 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
611 /** Shared region descriptor(s). */
612 VMMDEVSHAREDREGIONDESC aRegions[1];
613} GMMREGISTERSHAREDMODULEREQ;
614/** Pointer to a GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE request buffer. */
615typedef GMMREGISTERSHAREDMODULEREQ *PGMMREGISTERSHAREDMODULEREQ;
616
617GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq);
618
619/**
620 * Shared region descriptor
621 */
622typedef struct GMMSHAREDREGIONDESC
623{
624 /** The page offset where the region starts. */
625 uint32_t off;
626 /** Region size - adjusted by the region offset and rounded up to a
627 * page. */
628 uint32_t cb;
629 /** Pointer to physical GMM page ID array. */
630 uint32_t *paidPages;
631} GMMSHAREDREGIONDESC;
632/** Pointer to a GMMSHAREDREGIONDESC. */
633typedef GMMSHAREDREGIONDESC *PGMMSHAREDREGIONDESC;
634
635
636/**
637 * Shared module registration info (global)
638 */
639typedef struct GMMSHAREDMODULE
640{
641 /** Tree node (keyed by a hash of name & version). */
642 AVLLU32NODECORE Core;
643 /** Shared module size. */
644 uint32_t cbModule;
645 /** Number of included region descriptors */
646 uint32_t cRegions;
647 /** Number of users (VMs). */
648 uint32_t cUsers;
649 /** Guest OS family type. */
650 VBOXOSFAMILY enmGuestOS;
651 /** Module name */
652 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
653 /** Module version */
654 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
655 /** Shared region descriptor(s). */
656 GMMSHAREDREGIONDESC aRegions[1];
657} GMMSHAREDMODULE;
658/** Pointer to a GMMSHAREDMODULE. */
659typedef GMMSHAREDMODULE *PGMMSHAREDMODULE;
660
661/**
662 * Page descriptor for GMMR0SharedModuleCheckRange
663 */
664typedef struct GMMSHAREDPAGEDESC
665{
666 /** HC Physical address (in/out) */
667 RTHCPHYS HCPhys;
668 /** GC Physical address (in) */
669 RTGCPHYS GCPhys;
670 /** GMM page id. (in/out) */
671 uint32_t idPage;
672 /** CRC32 of the page in strict builds (0 if page not available).
673 * In non-strict build this serves as structure alignment. */
674 uint32_t u32StrictChecksum;
675} GMMSHAREDPAGEDESC;
676/** Pointer to a GMMSHAREDPAGEDESC. */
677typedef GMMSHAREDPAGEDESC *PGMMSHAREDPAGEDESC;
678
679GMMR0DECL(int) GMMR0SharedModuleCheckPage(PGVM pGVM, PGMMSHAREDMODULE pModule, uint32_t idxRegion, uint32_t idxPage,
680 PGMMSHAREDPAGEDESC pPageDesc);
681
682/**
683 * Request buffer for GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE.
684 * @see GMMR0UnregisterSharedModule.
685 */
686typedef struct GMMUNREGISTERSHAREDMODULEREQ
687{
688 /** The header. */
689 SUPVMMR0REQHDR Hdr;
690 /** Shared module size. */
691 uint32_t cbModule;
692 /** Align at 8 byte boundary. */
693 uint32_t u32Alignment;
694 /** Base address of the shared module. */
695 RTGCPTR64 GCBaseAddr;
696 /** Module name */
697 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
698 /** Module version */
699 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
700} GMMUNREGISTERSHAREDMODULEREQ;
701/** Pointer to a GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE request buffer. */
702typedef GMMUNREGISTERSHAREDMODULEREQ *PGMMUNREGISTERSHAREDMODULEREQ;
703
704GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq);
705
706#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
707/**
708 * Request buffer for GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE.
709 * @see GMMR0FindDuplicatePage.
710 */
711typedef struct GMMFINDDUPLICATEPAGEREQ
712{
713 /** The header. */
714 SUPVMMR0REQHDR Hdr;
715 /** Page id. */
716 uint32_t idPage;
717 /** Duplicate flag (out) */
718 bool fDuplicate;
719} GMMFINDDUPLICATEPAGEREQ;
720/** Pointer to a GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE request buffer. */
721typedef GMMFINDDUPLICATEPAGEREQ *PGMMFINDDUPLICATEPAGEREQ;
722
723GMMR0DECL(int) GMMR0FindDuplicatePageReq(PVM pVM, PGMMFINDDUPLICATEPAGEREQ pReq);
724#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
725
726
727/**
728 * Request buffer for GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS.
729 * @see GMMR0QueryStatistics.
730 */
731typedef struct GMMQUERYSTATISTICSSREQ
732{
733 /** The header. */
734 SUPVMMR0REQHDR Hdr;
735 /** The support driver session. */
736 PSUPDRVSESSION pSession;
737 /** The statistics. */
738 GMMSTATS Stats;
739} GMMQUERYSTATISTICSSREQ;
740/** Pointer to a GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS
741 * request buffer. */
742typedef GMMQUERYSTATISTICSSREQ *PGMMQUERYSTATISTICSSREQ;
743
744GMMR0DECL(int) GMMR0QueryStatisticsReq(PVM pVM, PGMMQUERYSTATISTICSSREQ pReq);
745
746
747/**
748 * Request buffer for GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS.
749 * @see GMMR0ResetStatistics.
750 */
751typedef struct GMMRESETSTATISTICSSREQ
752{
753 /** The header. */
754 SUPVMMR0REQHDR Hdr;
755 /** The support driver session. */
756 PSUPDRVSESSION pSession;
757 /** The statistics to reset.
758 * Any non-zero entry will be reset (if permitted). */
759 GMMSTATS Stats;
760} GMMRESETSTATISTICSSREQ;
761/** Pointer to a GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS
762 * request buffer. */
763typedef GMMRESETSTATISTICSSREQ *PGMMRESETSTATISTICSSREQ;
764
765GMMR0DECL(int) GMMR0ResetStatisticsReq(PVM pVM, PGMMRESETSTATISTICSSREQ pReq);
766
767
768
769#ifdef IN_RING3
770/** @defgroup grp_gmm_r3 The Global Memory Manager Ring-3 API Wrappers
771 * @{
772 */
773GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
774 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
775GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
776GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
777GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
778GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq);
779GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
780GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount);
781GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages);
782GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq);
783GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq);
784GMMR3DECL(int) GMMR3AllocateLargePage(PVM pVM, uint32_t cbPage);
785GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage);
786GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
787GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);
788GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize);
789GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages);
790GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
791GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq);
792GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq);
793GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM);
794GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM);
795
796# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
797GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage);
798# endif
799
800/** @} */
801#endif /* IN_RING3 */
802
803/** @} */
804
805RT_C_DECLS_END
806
807#endif
808
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette