VirtualBox

source: vbox/trunk/include/VBox/vmm/gmm.h@ 108994

Last change on this file since 108994 was 108963, checked in by vboxsync, 7 days ago

VMM/GMM: Eliminated IN_GMM_R3/R0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.2 KB
Line 
1/** @file
2 * GMM - The Global Memory Manager.
3 */
4
5/*
6 * Copyright (C) 2007-2024 Oracle and/or its affiliates.
7 *
8 * This file is part of VirtualBox base platform packages, as
9 * available from https://www.virtualbox.org.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation, in version 3 of the
14 * License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see <https://www.gnu.org/licenses>.
23 *
24 * The contents of this file may alternatively be used under the terms
25 * of the Common Development and Distribution License Version 1.0
26 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
27 * in the VirtualBox distribution, in which case the provisions of the
28 * CDDL are applicable instead of those of the GPL.
29 *
30 * You may elect to license modified versions of this file under the
31 * terms and conditions of either the GPL or the CDDL or both.
32 *
33 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
34 */
35
36#ifndef VBOX_INCLUDED_vmm_gmm_h
37#define VBOX_INCLUDED_vmm_gmm_h
38#ifndef RT_WITHOUT_PRAGMA_ONCE
39# pragma once
40#endif
41
42#include <VBox/vmm/gvmm.h>
43#include <VBox/sup.h>
44#include <VBox/param.h>
45#include <VBox/ostypes.h>
46#include <iprt/avl.h>
47
48
49RT_C_DECLS_BEGIN
50
51/** @defgroup grp_gmm GMM - The Global Memory Manager
52 * @ingroup grp_vmm
53 * @{
54 */
55
56/** The chunk shift. (2^21 = 2 MB) */
57#define GMM_CHUNK_SHIFT 21
58/** The allocation chunk size. */
59#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
60/** The allocation chunk size in (guest) pages. */
61#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - GUEST_PAGE_SHIFT))
62/** The shift factor for converting a page id into a chunk id. */
63#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - GUEST_PAGE_SHIFT)
64/** The last valid Chunk ID value. */
65#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
66/** The last valid Page ID value. */
67#define GMM_PAGEID_LAST UINT32_C(0xfffffff0)
68/** Mask out the page index from the Page ID. */
69#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
70/** The NIL Chunk ID value. */
71#define NIL_GMM_CHUNKID 0
72/** The NIL Page ID value. */
73#define NIL_GMM_PAGEID 0
74
75#if 0 /* wrong - these are guest page pfns and not page ids! */
76/** Special Page ID used by unassigned pages. */
77#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
78/** Special Page ID used by unsharable pages.
79 * Like MMIO2, shadow and heap. This is for later, obviously. */
80#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
81/** The end of the valid Page IDs. This is the first special one. */
82#define GMM_PAGEID_END 0x0ffffff0U
83#endif
84
85
86/** @def GMM_GCPHYS_LAST
87 * The last of the valid guest physical address as it applies to GMM pages.
88 *
89 * This must reflect the constraints imposed by the RTGCPHYS type and
90 * the guest page frame number used internally in GMMPAGE.
91 *
92 * @note Note this corresponds to GMM_PAGE_PFN_LAST. */
93#if HC_ARCH_BITS == 64
94# define GMM_GCPHYS_LAST UINT64_C(0x00000fffffff0000) /* 2^44 (16TB) - 0x10000 */
95#else
96# define GMM_GCPHYS_LAST UINT64_C(0x0000000fffff0000) /* 2^36 (64GB) - 0x10000 */
97#endif
98
99/**
100 * Over-commitment policy.
101 */
102typedef enum GMMOCPOLICY
103{
104 /** The usual invalid 0 value. */
105 GMMOCPOLICY_INVALID = 0,
106 /** No over-commitment, fully backed.
107 * The GMM guarantees that it will be able to allocate all of the
108 * guest RAM for a VM with OC policy. */
109 GMMOCPOLICY_NO_OC,
110 /** to-be-determined. */
111 GMMOCPOLICY_TBD,
112 /** The end of the valid policy range. */
113 GMMOCPOLICY_END,
114 /** The usual 32-bit hack. */
115 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
116} GMMOCPOLICY;
117
118/**
119 * VM / Memory priority.
120 */
121typedef enum GMMPRIORITY
122{
123 /** The usual invalid 0 value. */
124 GMMPRIORITY_INVALID = 0,
125 /** High.
126 * When ballooning, ask these VMs last.
127 * When running out of memory, try not to interrupt these VMs. */
128 GMMPRIORITY_HIGH,
129 /** Normal.
130 * When ballooning, don't wait to ask these.
131 * When running out of memory, pause, save and/or kill these VMs. */
132 GMMPRIORITY_NORMAL,
133 /** Low.
134 * When ballooning, maximize these first.
135 * When running out of memory, save or kill these VMs. */
136 GMMPRIORITY_LOW,
137 /** The end of the valid priority range. */
138 GMMPRIORITY_END,
139 /** The custom 32-bit type blowup. */
140 GMMPRIORITY_32BIT_HACK = 0x7fffffff
141} GMMPRIORITY;
142
143
144/**
145 * GMM Memory Accounts.
146 */
147typedef enum GMMACCOUNT
148{
149 /** The customary invalid zero entry. */
150 GMMACCOUNT_INVALID = 0,
151 /** Account with the base allocations. */
152 GMMACCOUNT_BASE,
153 /** Account with the shadow allocations. */
154 GMMACCOUNT_SHADOW,
155 /** Account with the fixed allocations. */
156 GMMACCOUNT_FIXED,
157 /** The end of the valid values. */
158 GMMACCOUNT_END,
159 /** The usual 32-bit value to finish it off. */
160 GMMACCOUNT_32BIT_HACK = 0x7fffffff
161} GMMACCOUNT;
162
163
164/**
165 * Balloon actions.
166 */
167typedef enum
168{
169 /** Invalid zero entry. */
170 GMMBALLOONACTION_INVALID = 0,
171 /** Inflate the balloon. */
172 GMMBALLOONACTION_INFLATE,
173 /** Deflate the balloon. */
174 GMMBALLOONACTION_DEFLATE,
175 /** Puncture the balloon because of VM reset. */
176 GMMBALLOONACTION_RESET,
177 /** End of the valid actions. */
178 GMMBALLOONACTION_END,
179 /** hack forcing the size of the enum to 32-bits. */
180 GMMBALLOONACTION_MAKE_32BIT_HACK = 0x7fffffff
181} GMMBALLOONACTION;
182
183
184/**
185 * A page descriptor for use when freeing pages.
186 * See GMMR0FreePages, GMMR0BalloonedPages.
187 */
188typedef struct GMMFREEPAGEDESC
189{
190 /** The Page ID of the page to be freed. */
191 uint32_t idPage;
192} GMMFREEPAGEDESC;
193/** Pointer to a page descriptor for freeing pages. */
194typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
195
196
197/**
198 * A page descriptor for use when updating and allocating pages.
199 *
200 * This is a bit complicated because we want to do as much as possible
201 * with the same structure.
202 */
203typedef struct GMMPAGEDESC
204{
205 /** The physical address of the page.
206 *
207 * @input GMMR0AllocateHandyPages expects the guest physical address
208 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
209 * when appropriate and NIL_GMMPAGEDESC_PHYS when the page wasn't used
210 * for any specific guest address.
211 *
212 * GMMR0AllocatePage expects the guest physical address to put in
213 * the GMMPAGE structure for the page it allocates for this entry.
214 * Pass NIL_GMMPAGEDESC_PHYS and GMM_GCPHYS_UNSHAREABLE as above.
215 *
216 * @output The host physical address of the allocated page.
217 * NIL_GMMPAGEDESC_PHYS on allocation failure.
218 *
219 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS) and that physical addresses are
220 * limited to 63 or fewer bits (52 by AMD64 arch spec).
221 */
222 RT_GCC_EXTENSION
223 RTHCPHYS HCPhysGCPhys : 63;
224 /** Set if the memory was zeroed. */
225 RT_GCC_EXTENSION
226 RTHCPHYS fZeroed : 1;
227
228 /** The Page ID.
229 *
230 * @input GMMR0AllocateHandyPages expects the Page ID of the page to
231 * update here. NIL_GMM_PAGEID means no page should be updated.
232 *
233 * GMMR0AllocatePages requires this to be initialized to
234 * NIL_GMM_PAGEID currently.
235 *
236 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
237 */
238 uint32_t idPage;
239
240 /** The Page ID of the shared page was replaced by this page.
241 *
242 * @input GMMR0AllocateHandyPages expects this to indicate a shared
243 * page that has been replaced by this page and should have its
244 * reference counter decremented and perhaps be freed up. Use
245 * NIL_GMM_PAGEID if no shared page was involved.
246 *
247 * All other APIs expects NIL_GMM_PAGEID here.
248 *
249 * @output All APIs sets this to NIL_GMM_PAGEID.
250 */
251 uint32_t idSharedPage;
252} GMMPAGEDESC;
253AssertCompileSize(GMMPAGEDESC, 16);
254/** Pointer to a page allocation. */
255typedef GMMPAGEDESC *PGMMPAGEDESC;
256
257/** Special NIL value for GMMPAGEDESC::HCPhysGCPhys. */
258#define NIL_GMMPAGEDESC_PHYS UINT64_C(0x7fffffffffffffff)
259
260/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is unsharable.
261 * @note This corresponds to GMM_PAGE_PFN_UNSHAREABLE. */
262#if HC_ARCH_BITS == 64
263# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x00000fffffff1000)
264#else
265# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x0000000fffff1000)
266#endif
267
268
269/**
270 * The allocation sizes.
271 */
272typedef struct GMMVMSIZES
273{
274 /** The number of pages of base memory.
275 * This is the sum of RAM, ROMs and handy pages. */
276 uint64_t cBasePages;
277 /** The number of pages for the shadow pool. (Can be squeezed for memory.) */
278 uint32_t cShadowPages;
279 /** The number of pages for fixed allocations like MMIO2 and the hyper heap. */
280 uint32_t cFixedPages;
281} GMMVMSIZES;
282/** Pointer to a GMMVMSIZES. */
283typedef GMMVMSIZES *PGMMVMSIZES;
284
285
286/**
287 * GMM VM statistics.
288 */
289typedef struct GMMVMSTATS
290{
291 /** The reservations. */
292 GMMVMSIZES Reserved;
293 /** The actual allocations.
294 * This includes both private and shared page allocations. */
295 GMMVMSIZES Allocated;
296
297 /** The current number of private pages. */
298 uint64_t cPrivatePages;
299 /** The current number of shared pages. */
300 uint64_t cSharedPages;
301 /** The current number of ballooned pages. */
302 uint64_t cBalloonedPages;
303 /** The max number of pages that can be ballooned. */
304 uint64_t cMaxBalloonedPages;
305 /** The number of pages we've currently requested the guest to give us.
306 * This is 0 if no pages currently requested. */
307 uint64_t cReqBalloonedPages;
308 /** The number of pages the guest has given us in response to the request.
309 * This is not reset on request completed and may be used in later decisions. */
310 uint64_t cReqActuallyBalloonedPages;
311 /** The number of pages we've currently requested the guest to take back. */
312 uint64_t cReqDeflatePages;
313 /** The number of shareable module tracked by this VM. */
314 uint32_t cShareableModules;
315
316 /** The current over-commitment policy. */
317 GMMOCPOLICY enmPolicy;
318 /** The VM priority for arbitrating VMs in low and out of memory situation.
319 * Like which VMs to start squeezing first. */
320 GMMPRIORITY enmPriority;
321 /** Whether ballooning is enabled or not. */
322 bool fBallooningEnabled;
323 /** Whether shared paging is enabled or not. */
324 bool fSharedPagingEnabled;
325 /** Whether the VM is allowed to allocate memory or not.
326 * This is used when the reservation update request fails or when the VM has
327 * been told to suspend/save/die in an out-of-memory case. */
328 bool fMayAllocate;
329 /** Explicit alignment. */
330 bool afReserved[1];
331
332
333} GMMVMSTATS;
334
335
336/**
337 * The GMM statistics.
338 */
339typedef struct GMMSTATS
340{
341 /** The maximum number of pages we're allowed to allocate
342 * (GMM::cMaxPages). */
343 uint64_t cMaxPages;
344 /** The number of pages that has been reserved (GMM::cReservedPages). */
345 uint64_t cReservedPages;
346 /** The number of pages that we have over-committed in reservations
347 * (GMM::cOverCommittedPages). */
348 uint64_t cOverCommittedPages;
349 /** The number of actually allocated (committed if you like) pages
350 * (GMM::cAllocatedPages). */
351 uint64_t cAllocatedPages;
352 /** The number of pages that are shared. A subset of cAllocatedPages.
353 * (GMM::cSharedPages) */
354 uint64_t cSharedPages;
355 /** The number of pages that are actually shared between VMs.
356 * (GMM:cDuplicatePages) */
357 uint64_t cDuplicatePages;
358 /** The number of pages that are shared that has been left behind by
359 * VMs not doing proper cleanups (GMM::cLeftBehindSharedPages). */
360 uint64_t cLeftBehindSharedPages;
361 /** The number of current ballooned pages (GMM::cBalloonedPages). */
362 uint64_t cBalloonedPages;
363 /** The number of allocation chunks (GMM::cChunks). */
364 uint32_t cChunks;
365 /** The number of freed chunks ever (GMM::cFreedChunks). */
366 uint32_t cFreedChunks;
367 /** The number of shareable modules (GMM:cShareableModules). */
368 uint64_t cShareableModules;
369 /** The current chunk freeing generation use by the per-VM TLB validation (GMM::idFreeGeneration). */
370 uint64_t idFreeGeneration;
371 /** Space reserved for later. */
372 uint64_t au64Reserved[1];
373
374 /** Statistics for the specified VM. (Zero filled if not requested.) */
375 GMMVMSTATS VMStats;
376} GMMSTATS;
377/** Pointer to the GMM statistics. */
378typedef GMMSTATS *PGMMSTATS;
379/** Const pointer to the GMM statistics. */
380typedef const GMMSTATS *PCGMMSTATS;
381
382VMMR0_INT_DECL(int) GMMR0Init(void);
383VMMR0_INT_DECL(void) GMMR0Term(void);
384VMMR0_INT_DECL(int) GMMR0InitPerVMData(PGVM pGVM);
385VMMR0_INT_DECL(void) GMMR0CleanupVM(PGVM pGVM);
386VMMR0_INT_DECL(int) GMMR0InitialReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages,
387 uint32_t cFixedPages, GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
388VMMR0_INT_DECL(int) GMMR0UpdateReservation(PGVM pGVM, VMCPUID idCpu, uint64_t cBasePages,
389 uint32_t cShadowPages, uint32_t cFixedPages);
390VMMR0_INT_DECL(int) GMMR0AllocateHandyPages(PGVM pGVM, VMCPUID idCpu, uint32_t cPagesToUpdate,
391 uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
392VMMR0_INT_DECL(int) GMMR0AllocatePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
393VMMR0_INT_DECL(int) GMMR0AllocateLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys);
394VMMR0_INT_DECL(int) GMMR0FreePages(PGVM pGVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
395VMMR0_INT_DECL(int) GMMR0FreeLargePage(PGVM pGVM, VMCPUID idCpu, uint32_t idPage);
396VMMR0_INT_DECL(int) GMMR0BalloonedPages(PGVM pGVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
397VMMR0_INT_DECL(int) GMMR0MapUnmapChunk(PGVM pGVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
398VMMR0_INT_DECL(int) GMMR0PageIdToVirt(PGVM pGVM, uint32_t idPage, void **ppv);
399VMMR0_INT_DECL(int) GMMR0RegisterSharedModule(PGVM pGVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName,
400 char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule, uint32_t cRegions,
401 struct VMMDEVSHAREDREGIONDESC const *paRegions);
402VMMR0_INT_DECL(int) GMMR0UnregisterSharedModule(PGVM pGVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion,
403 RTGCPTR GCBaseAddr, uint32_t cbModule);
404VMMR0_INT_DECL(int) GMMR0UnregisterAllSharedModules(PGVM pGVM, VMCPUID idCpu);
405VMMR0_INT_DECL(int) GMMR0CheckSharedModules(PGVM pGVM, VMCPUID idCpu);
406VMMR0_INT_DECL(int) GMMR0ResetSharedModules(PGVM pGVM, VMCPUID idCpu);
407VMMR0_INT_DECL(int) GMMR0QueryStatistics(PGMMSTATS pStats, PSUPDRVSESSION pSession);
408VMMR0_INT_DECL(int) GMMR0ResetStatistics(PCGMMSTATS pStats, PSUPDRVSESSION pSession);
409
410/**
411 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
412 * @see GMMR0InitialReservation
413 */
414typedef struct GMMINITIALRESERVATIONREQ
415{
416 /** The header. */
417 SUPVMMR0REQHDR Hdr;
418 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
419 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
420 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
421 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
422 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
423} GMMINITIALRESERVATIONREQ;
424/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
425typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
426
427VMMR0_INT_DECL(int) GMMR0InitialReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq);
428
429
430/**
431 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
432 * @see GMMR0UpdateReservation
433 */
434typedef struct GMMUPDATERESERVATIONREQ
435{
436 /** The header. */
437 SUPVMMR0REQHDR Hdr;
438 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
439 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
440 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
441} GMMUPDATERESERVATIONREQ;
442/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
443typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
444
445VMMR0_INT_DECL(int) GMMR0UpdateReservationReq(PGVM pGVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq);
446
447
448/**
449 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
450 * @see GMMR0AllocatePages.
451 */
452typedef struct GMMALLOCATEPAGESREQ
453{
454 /** The header. */
455 SUPVMMR0REQHDR Hdr;
456 /** The account to charge the allocation to. */
457 GMMACCOUNT enmAccount;
458 /** The number of pages to allocate. */
459 uint32_t cPages;
460 /** Array of page descriptors. */
461 GMMPAGEDESC aPages[1];
462} GMMALLOCATEPAGESREQ;
463/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
464typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
465
466VMMR0_INT_DECL(int) GMMR0AllocatePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq);
467
468
469/**
470 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
471 * @see GMMR0FreePages.
472 */
473typedef struct GMMFREEPAGESREQ
474{
475 /** The header. */
476 SUPVMMR0REQHDR Hdr;
477 /** The account this relates to. */
478 GMMACCOUNT enmAccount;
479 /** The number of pages to free. */
480 uint32_t cPages;
481 /** Array of free page descriptors. */
482 GMMFREEPAGEDESC aPages[1];
483} GMMFREEPAGESREQ;
484/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
485typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
486
487VMMR0_INT_DECL(int) GMMR0FreePagesReq(PGVM pGVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq);
488
489/**
490 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
491 * @see GMMR0BalloonedPages.
492 */
493typedef struct GMMBALLOONEDPAGESREQ
494{
495 /** The header. */
496 SUPVMMR0REQHDR Hdr;
497 /** The number of ballooned pages. */
498 uint32_t cBalloonedPages;
499 /** Inflate or deflate the balloon. */
500 GMMBALLOONACTION enmAction;
501} GMMBALLOONEDPAGESREQ;
502/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
503typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
504
505VMMR0_INT_DECL(int) GMMR0BalloonedPagesReq(PGVM pGVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq);
506
507
508/**
509 * Request buffer for GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_VMM_MEM_STATS.
510 * @see GMMR0QueryHypervisorMemoryStatsReq.
511 */
512typedef struct GMMMEMSTATSREQ
513{
514 /** The header. */
515 SUPVMMR0REQHDR Hdr;
516 /** The number of allocated pages (out). */
517 uint64_t cAllocPages;
518 /** The number of free pages (out). */
519 uint64_t cFreePages;
520 /** The number of ballooned pages (out). */
521 uint64_t cBalloonedPages;
522 /** The number of shared pages (out). */
523 uint64_t cSharedPages;
524 /** Maximum nr of pages (out). */
525 uint64_t cMaxPages;
526} GMMMEMSTATSREQ;
527/** Pointer to a GMMR0QueryHypervisorMemoryStatsReq / VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS request buffer. */
528typedef GMMMEMSTATSREQ *PGMMMEMSTATSREQ;
529
530VMMR0_INT_DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PGMMMEMSTATSREQ pReq);
531VMMR0_INT_DECL(int) GMMR0QueryMemoryStatsReq(PGVM pGVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq);
532
533/**
534 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
535 * @see GMMR0MapUnmapChunk
536 */
537typedef struct GMMMAPUNMAPCHUNKREQ
538{
539 /** The header. */
540 SUPVMMR0REQHDR Hdr;
541 /** The chunk to map, NIL_GMM_CHUNKID if unmap only. (IN) */
542 uint32_t idChunkMap;
543 /** The chunk to unmap, NIL_GMM_CHUNKID if map only. (IN) */
544 uint32_t idChunkUnmap;
545 /** Where the mapping address is returned. (OUT) */
546 RTR3PTR pvR3;
547} GMMMAPUNMAPCHUNKREQ;
548/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
549typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
550
551VMMR0_INT_DECL(int) GMMR0MapUnmapChunkReq(PGVM pGVM, PGMMMAPUNMAPCHUNKREQ pReq);
552
553
554/**
555 * Request buffer for GMMR0FreeLargePageReq / VMMR0_DO_GMM_FREE_LARGE_PAGE.
556 * @see GMMR0FreeLargePage.
557 */
558typedef struct GMMFREELARGEPAGEREQ
559{
560 /** The header. */
561 SUPVMMR0REQHDR Hdr;
562 /** The Page ID. */
563 uint32_t idPage;
564} GMMFREELARGEPAGEREQ;
565/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
566typedef GMMFREELARGEPAGEREQ *PGMMFREELARGEPAGEREQ;
567
568VMMR0_INT_DECL(int) GMMR0FreeLargePageReq(PGVM pGVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq);
569
570/** Maximum length of the shared module name string, terminator included. */
571#define GMM_SHARED_MODULE_MAX_NAME_STRING 128
572/** Maximum length of the shared module version string, terminator included. */
573#define GMM_SHARED_MODULE_MAX_VERSION_STRING 16
574
575/**
576 * Request buffer for GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE.
577 * @see GMMR0RegisterSharedModule.
578 */
579typedef struct GMMREGISTERSHAREDMODULEREQ
580{
581 /** The header. */
582 SUPVMMR0REQHDR Hdr;
583 /** Shared module size. */
584 uint32_t cbModule;
585 /** Number of included region descriptors */
586 uint32_t cRegions;
587 /** Base address of the shared module. */
588 RTGCPTR64 GCBaseAddr;
589 /** Guest OS type. */
590 VBOXOSFAMILY enmGuestOS;
591 /** return code. */
592 uint32_t rc;
593 /** Module name */
594 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
595 /** Module version */
596 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
597 /** Shared region descriptor(s). */
598 VMMDEVSHAREDREGIONDESC aRegions[1];
599} GMMREGISTERSHAREDMODULEREQ;
600/** Pointer to a GMMR0RegisterSharedModuleReq / VMMR0_DO_GMM_REGISTER_SHARED_MODULE request buffer. */
601typedef GMMREGISTERSHAREDMODULEREQ *PGMMREGISTERSHAREDMODULEREQ;
602
603VMMR0_INT_DECL(int) GMMR0RegisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq);
604
605/**
606 * Shared region descriptor
607 */
608typedef struct GMMSHAREDREGIONDESC
609{
610 /** The page offset where the region starts. */
611 uint32_t off;
612 /** Region size - adjusted by the region offset and rounded up to a
613 * page. */
614 uint32_t cb;
615 /** Pointer to physical GMM page ID array. */
616 uint32_t *paidPages;
617} GMMSHAREDREGIONDESC;
618/** Pointer to a GMMSHAREDREGIONDESC. */
619typedef GMMSHAREDREGIONDESC *PGMMSHAREDREGIONDESC;
620
621
622/**
623 * Shared module registration info (global)
624 */
625typedef struct GMMSHAREDMODULE
626{
627 /** Tree node (keyed by a hash of name & version). */
628 AVLLU32NODECORE Core;
629 /** Shared module size. */
630 uint32_t cbModule;
631 /** Number of included region descriptors */
632 uint32_t cRegions;
633 /** Number of users (VMs). */
634 uint32_t cUsers;
635 /** Guest OS family type. */
636 VBOXOSFAMILY enmGuestOS;
637 /** Module name */
638 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
639 /** Module version */
640 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
641 /** Shared region descriptor(s). */
642 GMMSHAREDREGIONDESC aRegions[1];
643} GMMSHAREDMODULE;
644/** Pointer to a GMMSHAREDMODULE. */
645typedef GMMSHAREDMODULE *PGMMSHAREDMODULE;
646
647/**
648 * Page descriptor for GMMR0SharedModuleCheckRange
649 */
650typedef struct GMMSHAREDPAGEDESC
651{
652 /** HC Physical address (in/out) */
653 RTHCPHYS HCPhys;
654 /** GC Physical address (in) */
655 RTGCPHYS GCPhys;
656 /** GMM page id. (in/out) */
657 uint32_t idPage;
658 /** CRC32 of the page in strict builds (0 if page not available).
659 * In non-strict build this serves as structure alignment. */
660 uint32_t u32StrictChecksum;
661} GMMSHAREDPAGEDESC;
662/** Pointer to a GMMSHAREDPAGEDESC. */
663typedef GMMSHAREDPAGEDESC *PGMMSHAREDPAGEDESC;
664
665VMMR0_INT_DECL(int) GMMR0SharedModuleCheckPage(PGVM pGVM, PGMMSHAREDMODULE pModule, uint32_t idxRegion, uint32_t idxPage,
666 PGMMSHAREDPAGEDESC pPageDesc);
667
668/**
669 * Request buffer for GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE.
670 * @see GMMR0UnregisterSharedModule.
671 */
672typedef struct GMMUNREGISTERSHAREDMODULEREQ
673{
674 /** The header. */
675 SUPVMMR0REQHDR Hdr;
676 /** Shared module size. */
677 uint32_t cbModule;
678 /** Align at 8 byte boundary. */
679 uint32_t u32Alignment;
680 /** Base address of the shared module. */
681 RTGCPTR64 GCBaseAddr;
682 /** Module name */
683 char szName[GMM_SHARED_MODULE_MAX_NAME_STRING];
684 /** Module version */
685 char szVersion[GMM_SHARED_MODULE_MAX_VERSION_STRING];
686} GMMUNREGISTERSHAREDMODULEREQ;
687/** Pointer to a GMMR0UnregisterSharedModuleReq / VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE request buffer. */
688typedef GMMUNREGISTERSHAREDMODULEREQ *PGMMUNREGISTERSHAREDMODULEREQ;
689
690VMMR0_INT_DECL(int) GMMR0UnregisterSharedModuleReq(PGVM pGVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq);
691
692#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
693/**
694 * Request buffer for GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE.
695 * @see GMMR0FindDuplicatePage.
696 */
697typedef struct GMMFINDDUPLICATEPAGEREQ
698{
699 /** The header. */
700 SUPVMMR0REQHDR Hdr;
701 /** Page id. */
702 uint32_t idPage;
703 /** Duplicate flag (out) */
704 bool fDuplicate;
705} GMMFINDDUPLICATEPAGEREQ;
706/** Pointer to a GMMR0FindDuplicatePageReq / VMMR0_DO_GMM_FIND_DUPLICATE_PAGE request buffer. */
707typedef GMMFINDDUPLICATEPAGEREQ *PGMMFINDDUPLICATEPAGEREQ;
708
709VMMR0_INT_DECL(int) GMMR0FindDuplicatePageReq(PGVM pGVM, PGMMFINDDUPLICATEPAGEREQ pReq);
710#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
711
712
713/**
714 * Request buffer for GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS.
715 * @see GMMR0QueryStatistics.
716 */
717typedef struct GMMQUERYSTATISTICSSREQ
718{
719 /** The header. */
720 SUPVMMR0REQHDR Hdr;
721 /** The support driver session. */
722 PSUPDRVSESSION pSession;
723 /** The statistics. */
724 GMMSTATS Stats;
725} GMMQUERYSTATISTICSSREQ;
726/** Pointer to a GMMR0QueryStatisticsReq / VMMR0_DO_GMM_QUERY_STATISTICS
727 * request buffer. */
728typedef GMMQUERYSTATISTICSSREQ *PGMMQUERYSTATISTICSSREQ;
729
730VMMR0_INT_DECL(int) GMMR0QueryStatisticsReq(PGVM pGVM, PGMMQUERYSTATISTICSSREQ pReq);
731
732
733/**
734 * Request buffer for GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS.
735 * @see GMMR0ResetStatistics.
736 */
737typedef struct GMMRESETSTATISTICSSREQ
738{
739 /** The header. */
740 SUPVMMR0REQHDR Hdr;
741 /** The support driver session. */
742 PSUPDRVSESSION pSession;
743 /** The statistics to reset.
744 * Any non-zero entry will be reset (if permitted). */
745 GMMSTATS Stats;
746} GMMRESETSTATISTICSSREQ;
747/** Pointer to a GMMR0ResetStatisticsReq / VMMR0_DO_GMM_RESET_STATISTICS
748 * request buffer. */
749typedef GMMRESETSTATISTICSSREQ *PGMMRESETSTATISTICSSREQ;
750
751VMMR0_INT_DECL(int) GMMR0ResetStatisticsReq(PGVM pGVM, PGMMRESETSTATISTICSSREQ pReq);
752
753
754
755#ifdef IN_RING3
756/** @defgroup grp_gmm_r3 The Global Memory Manager Ring-3 API Wrappers
757 * @{
758 */
759VMMR3_INT_DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
760 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
761VMMR3_INT_DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
762VMMR3_INT_DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
763VMMR3_INT_DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
764VMMR3_INT_DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq);
765VMMR3_INT_DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
766VMMR3_INT_DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount);
767VMMR3_INT_DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages);
768VMMR3_INT_DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq);
769VMMR3_INT_DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq);
770VMMR3_INT_DECL(int) GMMR3AllocateLargePage(PVM pVM, uint32_t cbPage);
771VMMR3_INT_DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage);
772VMMR3_INT_DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
773VMMR3_INT_DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages,
774 uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize);
775VMMR3_INT_DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages);
776VMMR3_INT_DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
777VMMR3_INT_DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq);
778VMMR3_INT_DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq);
779VMMR3_INT_DECL(int) GMMR3CheckSharedModules(PVM pVM);
780VMMR3_INT_DECL(int) GMMR3ResetSharedModules(PVM pVM);
781
782# if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
783VMMR3_INT_DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage);
784# endif
785
786/** @} */
787#endif /* IN_RING3 */
788
789/** @} */
790
791RT_C_DECLS_END
792
793#endif /* !VBOX_INCLUDED_vmm_gmm_h */
794
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette