VirtualBox

source: vbox/trunk/include/VBox/vmm/gmm.h@ 82968

Last change on this file since 82968 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.1 KB
Line 
1/** @file
2 * GMM - The Global Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2007-2020 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.virtualbox.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 */
25
26#ifndef VBOX_INCLUDED_vmm_gmm_h
27#define VBOX_INCLUDED_vmm_gmm_h
28#ifndef RT_WITHOUT_PRAGMA_ONCE
29# pragma once
30#endif
31
32#include <VBox/vmm/gvmm.h>
33#include <VBox/sup.h>
34#include <VBox/param.h>
35#include <VBox/ostypes.h>
36#include <iprt/avl.h>
37
38
39RT_C_DECLS_BEGIN
40
41/** @defgroup grp_gmm GMM - The Global Memory Manager
42 * @ingroup grp_vmm
43 * @{
44 */
45
46/** @def IN_GMM_R0
47 * Used to indicate whether we're inside the same link module as the ring 0
48 * part of the Global Memory Manager or not.
49 */
50#ifdef DOXYGEN_RUNNING
51# define IN_GMM_R0
52#endif
53/** @def GMMR0DECL
54 * Ring 0 GMM export or import declaration.
55 * @param type The return type of the function declaration.
56 */
57#ifdef IN_GMM_R0
58# define GMMR0DECL(type) DECLEXPORT(type) VBOXCALL
59#else
60# define GMMR0DECL(type) DECLIMPORT(type) VBOXCALL
61#endif
62
63/** @def IN_GMM_R3
64 * Used to indicate whether we're inside the same link module as the ring 3
65 * part of the Global Memory Manager or not.
66 */
67#ifdef DOXYGEN_RUNNING
68# define IN_GMM_R3
69#endif
70/** @def GMMR3DECL
71 * Ring 3 GMM export or import declaration.
72 * @param type The return type of the function declaration.
73 */
74#ifdef IN_GMM_R3
75# define GMMR3DECL(type) DECLEXPORT(type) VBOXCALL
76#else
77# define GMMR3DECL(type) DECLIMPORT(type) VBOXCALL
78#endif
79
80
81/** The chunk shift. (2^21 = 2 MB) */
82#define GMM_CHUNK_SHIFT 21
83/** The allocation chunk size. */
84#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
85/** The allocation chunk size in pages. */
86#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - PAGE_SHIFT))
87/** The shift factor for converting a page id into a chunk id. */
88#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT)
89/** The last valid Chunk ID value. */
90#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
91/** The last valid Page ID value.
92 * The current limit is 2^28 - 1, or almost 1TB if you like.
93 * The constraints are currently dictated by PGMPAGE. */
94#define GMM_PAGEID_LAST (RT_BIT_32(28) - 1)
95/** Mask out the page index from the Page ID. */
96#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
97/** The NIL Chunk ID value. */
98#define NIL_GMM_CHUNKID 0
99/** The NIL Page ID value. */
100#define NIL_GMM_PAGEID 0
101
102#if 0 /* wrong - these are guest page pfns and not page ids! */
103/** Special Page ID used by unassigned pages. */
104#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
105/** Special Page ID used by unsharable pages.
106 * Like MMIO2, shadow and heap. This is for later, obviously. */
107#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
108/** The end of the valid Page IDs. This is the first special one. */
109#define GMM_PAGEID_END 0x0ffffff0U
110#endif
111
112
113/** @def GMM_GCPHYS_LAST
114 * The last of the valid guest physical address as it applies to GMM pages.
115 *
116 * This must reflect the constraints imposed by the RTGCPHYS type and
117 * the guest page frame number used internally in GMMPAGE.
118 *
119 * @note Note this corresponds to GMM_PAGE_PFN_LAST. */
120#if HC_ARCH_BITS == 64
121# define GMM_GCPHYS_LAST UINT64_C(0x00000fffffff0000) /* 2^44 (16TB) - 0x10000 */
122#else
123# define GMM_GCPHYS_LAST UINT64_C(0x0000000fffff0000) /* 2^36 (64GB) - 0x10000 */
124#endif
125
126/**
127 * Over-commitment policy.
128 */
129typedef enum GMMOCPOLICY
130{
131 /** The usual invalid 0 value. */
132 GMMOCPOLICY_INVALID = 0,
133 /** No over-commitment, fully backed.
134 * The GMM guarantees that it will be able to allocate all of the
135 * guest RAM for a VM with OC policy. */
136 GMMOCPOLICY_NO_OC,
137 /** to-be-determined. */
138 GMMOCPOLICY_TBD,
139 /** The end of the valid policy range. */
140 GMMOCPOLICY_END,
141 /** The usual 32-bit hack. */
142 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
143} GMMOCPOLICY;
144
145/**
146 * VM / Memory priority.
147 */
148typedef enum GMMPRIORITY
149{
150 /** The usual invalid 0 value. */
151 GMMPRIORITY_INVALID = 0,
152 /** High.
153 * When ballooning, ask these VMs last.
154 * When running out of memory, try not to interrupt these VMs. */
155 GMMPRIORITY_HIGH,
156 /** Normal.
157 * When ballooning, don't wait to ask these.
158 * When running out of memory, pause, save and/or kill these VMs. */
159 GMMPRIORITY_NORMAL,
160 /** Low.
161 * When ballooning, maximize these first.
162 * When running out of memory, save or kill these VMs. */
163 GMMPRIORITY_LOW,
164 /** The end of the valid priority range. */
165 GMMPRIORITY_END,
166 /** The custom 32-bit type blowup. */
167 GMMPRIORITY_32BIT_HACK = 0x7fffffff
168} GMMPRIORITY;
169
170
171/**
172 * GMM Memory Accounts.
173 */
174typedef enum GMMACCOUNT
175{
176 /** The customary invalid zero entry. */
177 GMMACCOUNT_INVALID = 0,
178 /** Account with the base allocations. */
179 GMMACCOUNT_BASE,
180 /** Account with the shadow allocations. */
181 GMMACCOUNT_SHADOW,
182 /** Account with the fixed allocations. */
183 GMMACCOUNT_FIXED,
184 /** The end of the valid values. */
185 GMMACCOUNT_END,
186 /** The usual 32-bit value to finish it off. */
187 GMMACCOUNT_32BIT_HACK = 0x7fffffff
188} GMMACCOUNT;
189
190
191/**
192 * Balloon actions.
193 */
194typedef enum
195{
196 /** Invalid zero entry. */
197 GMMBALLOONACTION_INVALID = 0,
198 /** Inflate the balloon. */
199 GMMBALLOONACTION_INFLATE,
200 /** Deflate the balloon. */
201 GMMBALLOONACTION_DEFLATE,
202 /** Puncture the balloon because of VM reset. */
203 GMMBALLOONACTION_RESET,
204 /** End of the valid actions. */
205 GMMBALLOONACTION_END,
206 /** hack forcing the size of the enum to 32-bits. */
207 GMMBALLOONACTION_MAKE_32BIT_HACK = 0x7fffffff
208} GMMBALLOONACTION;
209
210
211/**
212 * A page descriptor for use when freeing pages.
213 * See GMMR0FreePages, GMMR0BalloonedPages.
214 */
215typedef struct GMMFREEPAGEDESC
216{
217 /** The Page ID of the page to be freed. */
218 uint32_t idPage;
219} GMMFREEPAGEDESC;
220/** Pointer to a page descriptor for freeing pages. */
221typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
222
223
224/**
225 * A page descriptor for use when updating and allocating pages.
226 *
227 * This is a bit complicated because we want to do as much as possible
228 * with the same structure.
229 */
230typedef struct GMMPAGEDESC
231{
232 /** The physical address of the page.
233 *
234 * @input GMMR0AllocateHandyPages expects the guest physical address
235 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
236 * when appropriate and NIL_RTHCPHYS when the page wasn't used
237 * for any specific guest address.
238 *
239 * GMMR0AllocatePage expects the guest physical address to put in
240 * the GMMPAGE structure for the page it allocates for this entry.
241 * Pass NIL_RTHCPHYS and GMM_GCPHYS_UNSHAREABLE as above.
242 *
243 * @output The host physical address of the allocated page.
244 * NIL_RTHCPHYS on allocation failure.
245 *
246 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS).
247 */
248 RTHCPHYS HCPhysGCPhys;
249
250 /** The Page ID.
251 *
252 * @input GMMR0AllocateHandyPages expects the Page ID of the page to
253 * update here. NIL_GMM_PAGEID means no page should be updated.
254 *
255 * GMMR0AllocatePages requires this to be initialized to
256 * NIL_GMM_PAGEID currently.
257 *
258 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
259 */
260 uint32_t idPage;
261
262 /** The Page ID of the shared page was replaced by this page.
263 *
264 * @input GMMR0AllocateHandyPages expects this to indicate a shared
265 * page that has been replaced by this page and should have its
266 * reference counter decremented and perhaps be freed up. Use
267 * NIL_GMM_PAGEID if no shared page was involved.
268 *
269 * All other APIs expects NIL_GMM_PAGEID here.
270 *
271 * @output All APIs sets this to NIL_GMM_PAGEID.
272 */
273 uint32_t idSharedPage;
274} GMMPAGEDESC;
275AssertCompileSize(GMMPAGEDESC, 16);
276/** Pointer to a page allocation. */
277typedef GMMPAGEDESC *PGMMPAGEDESC;
278
279/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is unsharable.
280 * @note This corresponds to GMM_PAGE_PFN_UNSHAREABLE. */
281#if HC_ARCH_BITS == 64
282# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x00000fffffff1000)
283#else
284# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x0000000fffff1000)
285#endif
286
287
288/**
289 * The allocation sizes.
290 */
291typedef struct GMMVMSIZES
292{
293 /** The number of pages of base memory.
294 * This is the sum of RAM, ROMs and handy pages. */
295 uint64_t cBasePages;
296 /** The number of pages for the shadow pool. (Can be squeezed for memory.) */
297 uint32_t cShadowPages;
298 /** The number of pages for fixed allocations like MMIO2 and the hyper heap. */
299 uint32_t cFixedPages;
300} GMMVMSIZES;
301/** Pointer to a GMMVMSIZES. */
302typedef GMMVMSIZES *PGMMVMSIZES;
303
304
305/**
306 * GMM VM statistics.
307 */
308typedef struct GMMVMSTATS
309{
310 /** The reservations. */
311 GMMVMSIZES Reserved;
312 /** The actual allocations.
313 * This includes both private and shared page allocations. */
314 GMMVMSIZES Allocated;
315
316 /** The current number of private pages. */
317 uint64_t cPrivatePages;
318 /** The current number of shared pages. */
319 uint64_t cSharedPages;
320 /** The current number of ballooned pages. */
321 uint64_t cBalloonedPages;
322 /** The max number of pages that can be ballooned. */
323 uint64_t cMaxBalloonedPages;
324 /** The number of pages we've currently requested the guest to give us.
325 * This is 0 if no pages currently requested. */
326 uint64_t cReqBalloonedPages;
327 /** The number of pages the guest has given us in response to the request.
328 * This is not reset on request completed and may be used in later decisions. */
329 uint64_t cReqActuallyBalloonedPages;
330 /** The number of pages we've currently requested the guest to take back. */
331 uint64_t cReqDeflatePages;
332 /** The number of shareable module tracked by this VM. */
333 uint32_t cShareableModules;
334
335 /** The current over-commitment policy. */
336 GMMOCPOLICY enmPolicy;
337 /** The VM priority for arbitrating VMs in low and out of memory situation.
338 * Like which VMs to start squeezing first. */
339 GMMPRIORITY enmPriority;
340 /** Whether ballooning is enabled or not. */
341 bool fBallooningEnabled;
342 /** Whether shared paging is enabled or not. */
343 bool fSharedPagingEnabled;
344 /** Whether the VM is allowed to allocate memory or not.
345 * This is used when the reservation update request fails or when the VM has
346 * been told to suspend/save/die in an out-of-memory case. */
347 bool fMayAllocate;
348 /** Explicit alignment. */
349 bool afReserved[1];
350
351
352} GMMVMSTATS;
353
354
355/**
356 * The GMM statistics.
357 */
358typedef struct GMMSTATS
359{
360 /** The maximum number of pages we're allowed to allocate
361 * (GMM::cMaxPages). */
362 uint64_t cMaxPages;
363 /** The number of pages that has been reserved (GMM::cReservedPages). */
364 uint64_t cReservedPages;
365 /** The number of pages that we have over-committed in reservations
366 * (GMM::cOverCommittedPages). */
367 uint64_t cOverCommittedPages;
368 /** The number of actually allocated (committed if you like) pages
369 * (GMM::cAllocatedPages). */
370 uint64_t cAllocatedPages;
371 /** The number of pages that are shared. A subset of cAllocatedPages.
372 * (GMM::cSharedPages) */
373 uint64_t cSharedPages;
374 /** The number of pages that are actually shared between VMs.
375 * (GMM:cDuplicatePages) */
376 uint64_t cDuplicatePages;
377 /** The number of pages that are shared that has been left behind by
378 * VMs not doing proper cleanups (GMM::cLeftBehindSharedPages). */
379 uint64_t cLeftBehindSharedPages;
380 /** The number of current ballooned pages (GMM::cBalloonedPages). */
381 uint64_t cBalloonedPages;
382 /** The number of allocation chunks (GMM::cChunks). */
383 uint32_t cChunks;
384 /** The number of freed chunks ever (GMM::cFreedChunks). */
385 uint32_t cFreedChunks;
386 /** The number of shareable modules (GMM:cShareableModules). */
387 uint64_t cShareableModules;
388 /** Space reserved for later. */
389 uint64_t au64Reserved[2];
390
391 /** Statistics for the specified VM. (Zero filled if not requested.) */
392 GMMVMSTATS VMStats;
393} GMMSTATS;
394/** Pointer to the GMM statistics. */
395typedef GMMSTATS *PGMMSTATS;
396/** Const pointer to the GMM statistics. */
397typedef const GMMSTATS *PCGMMSTATS;
398
399
400GMMR0DECL(int) GMMR0Init(void);
401GMMR0DECL(void) GMMR0Term(void);
402GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM);
403GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM);
404GMMR0DECL(int) GMMR0InitialReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
405 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
406GMMR0DECL(int) GMMR0UpdateReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
407GMMR0DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, VMCPUID idCpu, uint32_t cPagesToUpdate,
408 uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
409GMMR0DECL(int) GMMR0AllocatePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
410GMMR0DECL(int) GMMR0AllocateLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys);
411GMMR0DECL(int) GMMR0FreePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
412GMMR0DECL(int) GMMR0FreeLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t idPage);
413GMMR0DECL(int) GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
414GMMR0DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
415GMMR0DECL(int) GMMR0SeedChunk(PGVM pGVM, VMCPUID idCpu, RTR3PTR pvR3);
416GMMR0DECL(int) GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv);
417GMMR0DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
418 char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule, uint32_t cRegions,
419 struct VMMDEVSHAREDREGIONDESC const *paRegions);
420GMMR0DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion,
421 RTGCPTR GCBaseAddr, uint32_t cbModule);
422GMMR0DECL(int) GMMR0UnregisterAllSharedModules(PGVM pGVM, VMCPUID idCpu);
423GMMR0DECL(int) GMMR0CheckSharedModules(PGVM pGVM, VMCPUID idCpu);
424GMMR0DECL(int) GMMR0ResetSharedModules(PGVM pGVM, VMCPUID idCpu);
425GMMR0DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession);
426GMMR0DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession);
427
428/**
429 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
430 * @see GMMR0InitialReservation
431 */
432typedef struct GMMINITIALRESERVATIONREQ
433{
434 /** The header. */
435 SUPVMMR0REQHDR Hdr;
436 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
437 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
438 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
439 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
440 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
441} GMMINITIALRESERVATIONREQ;
442/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
443typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
444
445GMMR0DECL(int) GMMR0InitialReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq);
446
447
448/**
449 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
450 * @see GMMR0UpdateReservation
451 */
452typedef struct GMMUPDATERESERVATIONREQ
453{
454 /** The header. */
455 SUPVMMR0REQHDR Hdr;
456 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
457 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
458 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
459} GMMUPDATERESERVATIONREQ;
460/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
461typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
462
463GMMR0DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq);
464
465
466/**
467 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
468 * @see GMMR0AllocatePages.
469 */
470typedef struct GMMALLOCATEPAGESREQ
471{
472 /** The header. */
473 SUPVMMR0REQHDR Hdr;
474 /** The account to charge the allocation to. */
475 GMMACCOUNT enmAccount;
476 /** The number of pages to allocate. */
477 uint32_t cPages;
478 /** Array of page descriptors. */
479 GMMPAGEDESC aPages[1];
480} GMMALLOCATEPAGESREQ;
481/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
482typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
483
484GMMR0DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq);
485
486
487/**
488 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
489 * @see GMMR0FreePages.
490 */
491typedef struct GMMFREEPAGESREQ
492{
493 /** The header. */
494 SUPVMMR0REQHDR Hdr;
495 /** The account this relates to. */
496 GMMACCOUNT enmAccount;
497 /** The number of pages to free. */
498 uint32_t cPages;
499 /** Array of free page descriptors. */
500 GMMFREEPAGEDESC aPages[1];
501} GMMFREEPAGESREQ;
502/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
503typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
504
505GMMR0DECL(int) GMMR0FreePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq);
506
507/**
508 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
509 * @see GMMR0BalloonedPages.
510 */
511typedef struct GMMBALLOONEDPAGESREQ
512{
513 /** The header. */
514 SUPVMMR0REQHDR Hdr;
515 /** The number of ballooned pages. */
516 uint32_t cBalloonedPages;
517 /** Inflate or deflate the balloon. */
518 GMMBALLOONACTION enmAction;
519} GMMBALLOONEDPAGESREQ;
520/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
521typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
522
523GMMR0DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq);
524
525
526/**
527 * Request buffer for GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_VMM_MEM_STATS.
528 * @see GMMR0QueryHypervisorMemoryStatsReq.
529 */
530typedef struct GMMMEMSTATSREQ
531{
532 /** The header. */
533 SUPVMMR0REQHDR Hdr;
534 /** The number of allocated pages (out). */
535 uint64_t cAllocPages;
536 /** The number of free pages (out). */
537 uint64_t cFreePages;
538 /** The number of ballooned pages (out). */
539 uint64_t cBalloonedPages;
540 /** The number of shared pages (out). */
541 uint64_t cSharedPages;
542 /** Maximum nr of pages (out). */
543 uint64_t cMaxPages;
544} GMMMEMSTATSREQ;
545/** Pointer to a GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS request buffer. */
546typedef GMMMEMSTATSREQ *PGMMMEMSTATSREQ;
547
548GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PGMMMEMSTATSREQ pReq);
549GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq);
550
551/**
552 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
553 * @see GMMR0MapUnmapChunk
554 */
555typedef struct GMMMAPUNMAPCHUNKREQ
556{
557 /** The header. */
558 SUPVMMR0REQHDR Hdr;
559 /** The chunk to map, NIL_GMM_CHUNKID if unmap only. (IN) */
560 uint32_t idChunkMap;
561 /** The chunk to unmap, NIL_GMM_CHUNKID if map only. (IN) */
562 uint32_t idChunkUnmap;
563 /** Where the mapping address is returned. (OUT) */
564 RTR3PTR pvR3;
565} GMMMAPUNMAPCHUNKREQ;
566/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
567typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
568
569GMMR0DECL(int) GMMR0MapUnmapChunkReq(PGVM pGVM, PGMMMAPUNMAPCHUNKREQ pReq);
570
571
572/**
573 * Request buffer for GMMR0FreeLargePageReq / VMMR0_DO_GMM_FREE_LARGE_PAGE.
574 * @see GMMR0FreeLargePage.
575 */
576typedef struct GMMFREELARGEPAGEREQ
577{
578 /** The header. */
579 SUPVMMR0REQHDR Hdr;
580 /** The Page ID. */
581 uint32_t idPage;
582} GMMFREELARGEPAGEREQ;
583/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
584typedef GMMFREELARGEPAGEREQ *PGMMFREELARGEPAGEREQ;
585
586GMMR0DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq);
587
588/** Maximum length of the shared module name string, terminator included. */
589#define GMM_SHARED_MODULE_MAX_NAME_STRING 128
590/** Maximum length of the shared module version string, terminator included. */
591#define GMM_SHARED_MODULE_MAX_VERSION_STRING 16
592
593/**
594 * Request buffer for GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE.
595 * @see GMMR0RegisterSharedModule.
596 */
597typedef struct GMMREGISTERSHAREDMODULEREQ
598{
599 /** The header. */
600 SUPVMMR0REQHDR Hdr;
601 /** Shared module size. */
602 uint32_t cbModule;
603 /** Number of included region descriptors */
604 uint32_t cRegions;
605 /** Base address of the shared module. */
606 RTGCPTR64 GCBaseAddr;
607 /** Guest OS type. */
608 VBOXOSFAMILY enmGuestOS;
609 /** return code. */
610 uint32_t rc;
611 /** Module name */
612 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
613 /** Module version */
614 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
615 /** Shared region descriptor(s). */
616 VMMDEVSHAREDREGIONDESC aRegions[1];
617} GMMREGISTERSHAREDMODULEREQ;
618/** Pointer to a GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE request buffer. */
619typedef GMMREGISTERSHAREDMODULEREQ *PGMMREGISTERSHAREDMODULEREQ;
620
621GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq);
622
623/**
624 * Shared region descriptor
625 */
626typedef struct GMMSHAREDREGIONDESC
627{
628 /** The page offset where the region starts. */
629 uint32_t off;
630 /** Region size - adjusted by the region offset and rounded up to a
631 * page. */
632 uint32_t cb;
633 /** Pointer to physical GMM page ID array. */
634 uint32_t *paidPages;
635} GMMSHAREDREGIONDESC;
636/** Pointer to a GMMSHAREDREGIONDESC. */
637typedef GMMSHAREDREGIONDESC *PGMMSHAREDREGIONDESC;
638
639
640/**
641 * Shared module registration info (global)
642 */
643typedef struct GMMSHAREDMODULE
644{
645 /** Tree node (keyed by a hash of name & version). */
646 AVLLU32NODECORE Core;
647 /** Shared module size. */
648 uint32_t cbModule;
649 /** Number of included region descriptors */
650 uint32_t cRegions;
651 /** Number of users (VMs). */
652 uint32_t cUsers;
653 /** Guest OS family type. */
654 VBOXOSFAMILY enmGuestOS;
655 /** Module name */
656 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
657 /** Module version */
658 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
659 /** Shared region descriptor(s). */
660 GMMSHAREDREGIONDESC aRegions[1];
661} GMMSHAREDMODULE;
662/** Pointer to a GMMSHAREDMODULE. */
663typedef GMMSHAREDMODULE *PGMMSHAREDMODULE;
664
665/**
666 * Page descriptor for GMMR0SharedModuleCheckRange
667 */
668typedef struct GMMSHAREDPAGEDESC
669{
670 /** HC Physical address (in/out) */
671 RTHCPHYS HCPhys;
672 /** GC Physical address (in) */
673 RTGCPHYS GCPhys;
674 /** GMM page id. (in/out) */
675 uint32_t idPage;
676 /** CRC32 of the page in strict builds (0 if page not available).
677 * In non-strict build this serves as structure alignment. */
678 uint32_t u32StrictChecksum;
679} GMMSHAREDPAGEDESC;
680/** Pointer to a GMMSHAREDPAGEDESC. */
681typedef GMMSHAREDPAGEDESC *PGMMSHAREDPAGEDESC;
682
683GMMR0DECL(int) GMMR0SharedModuleCheckPage(PGVM pGVM, PGMMSHAREDMODULE pModule, uint32_t idxRegion, uint32_t idxPage,
684 PGMMSHAREDPAGEDESC pPageDesc);
685
686/**
687 * Request buffer for GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE.
688 * @see GMMR0UnregisterSharedModule.
689 */
690typedef struct GMMUNREGISTERSHAREDMODULEREQ
691{
692 /** The header. */
693 SUPVMMR0REQHDR Hdr;
694 /** Shared module size. */
695 uint32_t cbModule;
696 /** Align at 8 byte boundary. */
697 uint32_t u32Alignment;
698 /** Base address of the shared module. */
699 RTGCPTR64 GCBaseAddr;
700 /** Module name */
701 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
702 /** Module version */
703 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
704} GMMUNREGISTERSHAREDMODULEREQ;
705/** Pointer to a GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE request buffer. */
706typedef GMMUNREGISTERSHAREDMODULEREQ *PGMMUNREGISTERSHAREDMODULEREQ;
707
708GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq);
709
710#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
711/**
712 * Request buffer for GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE.
713 * @see GMMR0FindDuplicatePage.
714 */
715typedef struct GMMFINDDUPLICATEPAGEREQ
716{
717 /** The header. */
718 SUPVMMR0REQHDR Hdr;
719 /** Page id. */
720 uint32_t idPage;
721 /** Duplicate flag (out) */
722 bool fDuplicate;
723} GMMFINDDUPLICATEPAGEREQ;
724/** Pointer to a GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE request buffer. */
725typedef GMMFINDDUPLICATEPAGEREQ *PGMMFINDDUPLICATEPAGEREQ;
726
727GMMR0DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, PGMMFINDDUPLICATEPAGEREQ pReq);
728#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
729
730
731/**
732 * Request buffer for GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS.
733 * @see GMMR0QueryStatistics.
734 */
735typedef struct GMMQUERYSTATISTICSSREQ
736{
737 /** The header. */
738 SUPVMMR0REQHDR Hdr;
739 /** The support driver session. */
740 PSUPDRVSESSION pSession;
741 /** The statistics. */
742 GMMSTATS Stats;
743} GMMQUERYSTATISTICSSREQ;
744/** Pointer to a GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS
745 * request buffer. */
746typedef GMMQUERYSTATISTICSSREQ *PGMMQUERYSTATISTICSSREQ;
747
748GMMR0DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, PGMMQUERYSTATISTICSSREQ pReq);
749
750
751/**
752 * Request buffer for GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS.
753 * @see GMMR0ResetStatistics.
754 */
755typedef struct GMMRESETSTATISTICSSREQ
756{
757 /** The header. */
758 SUPVMMR0REQHDR Hdr;
759 /** The support driver session. */
760 PSUPDRVSESSION pSession;
761 /** The statistics to reset.
762 * Any non-zero entry will be reset (if permitted). */
763 GMMSTATS Stats;
764} GMMRESETSTATISTICSSREQ;
765/** Pointer to a GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS
766 * request buffer. */
767typedef GMMRESETSTATISTICSSREQ *PGMMRESETSTATISTICSSREQ;
768
769GMMR0DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, PGMMRESETSTATISTICSSREQ pReq);
770
771
772
773#ifdef IN_RING3
774/** @defgroup grp_gmm_r3 The Global Memory Manager Ring-3 API Wrappers
775 * @{
776 */
777GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
778 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
779GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
780GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
781GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
782GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq);
783GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
784GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount);
785GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages);
786GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq);
787GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq);
788GMMR3DECL(int) GMMR3AllocateLargePage(PVM pVM, uint32_t cbPage);
789GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage);
790GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
791GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);
792GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize);
793GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages);
794GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
795GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq);
796GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq);
797GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM);
798GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM);
799
800# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
801GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage);
802# endif
803
804/** @} */
805#endif /* IN_RING3 */
806
807/** @} */
808
809RT_C_DECLS_END
810
811#endif /* !VBOX_INCLUDED_vmm_gmm_h */
812
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette