VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/GMMR0.cpp@ 30757

Last change on this file since 30757 was 30757, checked in by vboxsync, 14 years ago

Comment

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 150.1 KB
Line 
1/* $Id: GMMR0.cpp 30757 2010-07-09 12:09:46Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager.
4 */
5
6/*
7 * Copyright (C) 2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_gmm GMM - The Global Memory Manager
20 *
21 * As the name indicates, this component is responsible for global memory
22 * management. Currently only guest RAM is allocated from the GMM, but this
23 * may change to include shadow page tables and other bits later.
24 *
25 * Guest RAM is managed as individual pages, but allocated from the host OS
26 * in chunks for reasons of portability / efficiency. To minimize the memory
27 * footprint all tracking structure must be as small as possible without
28 * unnecessary performance penalties.
29 *
30 * The allocation chunks has fixed sized, the size defined at compile time
31 * by the #GMM_CHUNK_SIZE \#define.
32 *
33 * Each chunk is given an unquie ID. Each page also has a unique ID. The
34 * relation ship between the two IDs is:
35 * @code
36 * GMM_CHUNK_SHIFT = log2(GMM_CHUNK_SIZE / PAGE_SIZE);
37 * idPage = (idChunk << GMM_CHUNK_SHIFT) | iPage;
38 * @endcode
39 * Where iPage is the index of the page within the chunk. This ID scheme
40 * permits for efficient chunk and page lookup, but it relies on the chunk size
41 * to be set at compile time. The chunks are organized in an AVL tree with their
42 * IDs being the keys.
43 *
44 * The physical address of each page in an allocation chunk is maintained by
45 * the #RTR0MEMOBJ and obtained using #RTR0MemObjGetPagePhysAddr. There is no
46 * need to duplicate this information (it'll cost 8-bytes per page if we did).
47 *
48 * So what do we need to track per page? Most importantly we need to know
49 * which state the page is in:
50 * - Private - Allocated for (eventually) backing one particular VM page.
51 * - Shared - Readonly page that is used by one or more VMs and treated
52 * as COW by PGM.
53 * - Free - Not used by anyone.
54 *
55 * For the page replacement operations (sharing, defragmenting and freeing)
56 * to be somewhat efficient, private pages needs to be associated with a
57 * particular page in a particular VM.
58 *
59 * Tracking the usage of shared pages is impractical and expensive, so we'll
60 * settle for a reference counting system instead.
61 *
62 * Free pages will be chained on LIFOs
63 *
64 * On 64-bit systems we will use a 64-bit bitfield per page, while on 32-bit
65 * systems a 32-bit bitfield will have to suffice because of address space
66 * limitations. The #GMMPAGE structure shows the details.
67 *
68 *
69 * @section sec_gmm_alloc_strat Page Allocation Strategy
70 *
71 * The strategy for allocating pages has to take fragmentation and shared
72 * pages into account, or we may end up with with 2000 chunks with only
73 * a few pages in each. Shared pages cannot easily be reallocated because
74 * of the inaccurate usage accounting (see above). Private pages can be
75 * reallocated by a defragmentation thread in the same manner that sharing
76 * is done.
77 *
78 * The first approach is to manage the free pages in two sets depending on
79 * whether they are mainly for the allocation of shared or private pages.
80 * In the initial implementation there will be almost no possibility for
81 * mixing shared and private pages in the same chunk (only if we're really
82 * stressed on memory), but when we implement forking of VMs and have to
83 * deal with lots of COW pages it'll start getting kind of interesting.
84 *
85 * The sets are lists of chunks with approximately the same number of
86 * free pages. Say the chunk size is 1MB, meaning 256 pages, and a set
87 * consists of 16 lists. So, the first list will contain the chunks with
88 * 1-7 free pages, the second covers 8-15, and so on. The chunks will be
89 * moved between the lists as pages are freed up or allocated.
90 *
91 *
92 * @section sec_gmm_costs Costs
93 *
94 * The per page cost in kernel space is 32-bit plus whatever RTR0MEMOBJ
95 * entails. In addition there is the chunk cost of approximately
96 * (sizeof(RT0MEMOBJ) + sizof(CHUNK)) / 2^CHUNK_SHIFT bytes per page.
97 *
98 * On Windows the per page #RTR0MEMOBJ cost is 32-bit on 32-bit windows
99 * and 64-bit on 64-bit windows (a PFN_NUMBER in the MDL). So, 64-bit per page.
100 * The cost on Linux is identical, but here it's because of sizeof(struct page *).
101 *
102 *
103 * @section sec_gmm_legacy Legacy Mode for Non-Tier-1 Platforms
104 *
105 * In legacy mode the page source is locked user pages and not
106 * #RTR0MemObjAllocPhysNC, this means that a page can only be allocated
107 * by the VM that locked it. We will make no attempt at implementing
108 * page sharing on these systems, just do enough to make it all work.
109 *
110 *
111 * @subsection sub_gmm_locking Serializing
112 *
113 * One simple fast mutex will be employed in the initial implementation, not
114 * two as metioned in @ref subsec_pgmPhys_Serializing.
115 *
116 * @see @ref subsec_pgmPhys_Serializing
117 *
118 *
119 * @section sec_gmm_overcommit Memory Over-Commitment Management
120 *
121 * The GVM will have to do the system wide memory over-commitment
122 * management. My current ideas are:
123 * - Per VM oc policy that indicates how much to initially commit
124 * to it and what to do in a out-of-memory situation.
125 * - Prevent overtaxing the host.
126 *
127 * There are some challenges here, the main ones are configurability and
128 * security. Should we for instance permit anyone to request 100% memory
129 * commitment? Who should be allowed to do runtime adjustments of the
130 * config. And how to prevent these settings from being lost when the last
131 * VM process exits? The solution is probably to have an optional root
132 * daemon the will keep VMMR0.r0 in memory and enable the security measures.
133 *
134 *
135 *
136 * @section sec_gmm_numa NUMA
137 *
138 * NUMA considerations will be designed and implemented a bit later.
139 *
140 * The preliminary guesses is that we will have to try allocate memory as
141 * close as possible to the CPUs the VM is executed on (EMT and additional CPU
142 * threads). Which means it's mostly about allocation and sharing policies.
143 * Both the scheduler and allocator interface will to supply some NUMA info
144 * and we'll need to have a way to calc access costs.
145 *
146 */
147
148
149/*******************************************************************************
150* Header Files *
151*******************************************************************************/
152#define LOG_GROUP LOG_GROUP_GMM
153#include <VBox/vm.h>
154#include <VBox/gmm.h>
155#include "GMMR0Internal.h"
156#include <VBox/gvm.h>
157#include <VBox/pgm.h>
158#include <VBox/log.h>
159#include <VBox/param.h>
160#include <VBox/err.h>
161#include <iprt/asm.h>
162#include <iprt/avl.h>
163#include <iprt/mem.h>
164#include <iprt/memobj.h>
165#include <iprt/semaphore.h>
166#include <iprt/string.h>
167
168
169/*******************************************************************************
170* Structures and Typedefs *
171*******************************************************************************/
172/** Pointer to set of free chunks. */
173typedef struct GMMCHUNKFREESET *PGMMCHUNKFREESET;
174
175/** Pointer to a GMM allocation chunk. */
176typedef struct GMMCHUNK *PGMMCHUNK;
177
178/**
179 * The per-page tracking structure employed by the GMM.
180 *
181 * On 32-bit hosts we'll some trickery is necessary to compress all
182 * the information into 32-bits. When the fSharedFree member is set,
183 * the 30th bit decides whether it's a free page or not.
184 *
185 * Because of the different layout on 32-bit and 64-bit hosts, macros
186 * are used to get and set some of the data.
187 */
188typedef union GMMPAGE
189{
190#if HC_ARCH_BITS == 64
191 /** Unsigned integer view. */
192 uint64_t u;
193
194 /** The common view. */
195 struct GMMPAGECOMMON
196 {
197 uint32_t uStuff1 : 32;
198 uint32_t uStuff2 : 30;
199 /** The page state. */
200 uint32_t u2State : 2;
201 } Common;
202
203 /** The view of a private page. */
204 struct GMMPAGEPRIVATE
205 {
206 /** The guest page frame number. (Max addressable: 2 ^ 44 - 16) */
207 uint32_t pfn;
208 /** The GVM handle. (64K VMs) */
209 uint32_t hGVM : 16;
210 /** Reserved. */
211 uint32_t u16Reserved : 14;
212 /** The page state. */
213 uint32_t u2State : 2;
214 } Private;
215
216 /** The view of a shared page. */
217 struct GMMPAGESHARED
218 {
219 /** The host page frame number. (Max addressable: 2 ^ 44 - 16) */
220 uint32_t pfn;
221 /** The reference count (64K VMs). */
222 uint32_t cRefs : 16;
223 /** Reserved. Checksum or something? Two hGVMs for forking? */
224 uint32_t u14Reserved : 14;
225 /** The page state. */
226 uint32_t u2State : 2;
227 } Shared;
228
229 /** The view of a free page. */
230 struct GMMPAGEFREE
231 {
232 /** The index of the next page in the free list. UINT16_MAX is NIL. */
233 uint16_t iNext;
234 /** Reserved. Checksum or something? */
235 uint16_t u16Reserved0;
236 /** Reserved. Checksum or something? */
237 uint32_t u30Reserved1 : 30;
238 /** The page state. */
239 uint32_t u2State : 2;
240 } Free;
241
242#else /* 32-bit */
243 /** Unsigned integer view. */
244 uint32_t u;
245
246 /** The common view. */
247 struct GMMPAGECOMMON
248 {
249 uint32_t uStuff : 30;
250 /** The page state. */
251 uint32_t u2State : 2;
252 } Common;
253
254 /** The view of a private page. */
255 struct GMMPAGEPRIVATE
256 {
257 /** The guest page frame number. (Max addressable: 2 ^ 36) */
258 uint32_t pfn : 24;
259 /** The GVM handle. (127 VMs) */
260 uint32_t hGVM : 7;
261 /** The top page state bit, MBZ. */
262 uint32_t fZero : 1;
263 } Private;
264
265 /** The view of a shared page. */
266 struct GMMPAGESHARED
267 {
268 /** The reference count. */
269 uint32_t cRefs : 30;
270 /** The page state. */
271 uint32_t u2State : 2;
272 } Shared;
273
274 /** The view of a free page. */
275 struct GMMPAGEFREE
276 {
277 /** The index of the next page in the free list. UINT16_MAX is NIL. */
278 uint32_t iNext : 16;
279 /** Reserved. Checksum or something? */
280 uint32_t u14Reserved : 14;
281 /** The page state. */
282 uint32_t u2State : 2;
283 } Free;
284#endif
285} GMMPAGE;
286AssertCompileSize(GMMPAGE, sizeof(RTHCUINTPTR));
287/** Pointer to a GMMPAGE. */
288typedef GMMPAGE *PGMMPAGE;
289
290
291/** @name The Page States.
292 * @{ */
293/** A private page. */
294#define GMM_PAGE_STATE_PRIVATE 0
295/** A private page - alternative value used on the 32-bit implemenation.
296 * This will never be used on 64-bit hosts. */
297#define GMM_PAGE_STATE_PRIVATE_32 1
298/** A shared page. */
299#define GMM_PAGE_STATE_SHARED 2
300/** A free page. */
301#define GMM_PAGE_STATE_FREE 3
302/** @} */
303
304
305/** @def GMM_PAGE_IS_PRIVATE
306 *
307 * @returns true if private, false if not.
308 * @param pPage The GMM page.
309 */
310#if HC_ARCH_BITS == 64
311# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_PRIVATE )
312#else
313# define GMM_PAGE_IS_PRIVATE(pPage) ( (pPage)->Private.fZero == 0 )
314#endif
315
316/** @def GMM_PAGE_IS_SHARED
317 *
318 * @returns true if shared, false if not.
319 * @param pPage The GMM page.
320 */
321#define GMM_PAGE_IS_SHARED(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_SHARED )
322
323/** @def GMM_PAGE_IS_FREE
324 *
325 * @returns true if free, false if not.
326 * @param pPage The GMM page.
327 */
328#define GMM_PAGE_IS_FREE(pPage) ( (pPage)->Common.u2State == GMM_PAGE_STATE_FREE )
329
330/** @def GMM_PAGE_PFN_LAST
331 * The last valid guest pfn range.
332 * @remark Some of the values outside the range has special meaning,
333 * see GMM_PAGE_PFN_UNSHAREABLE.
334 */
335#if HC_ARCH_BITS == 64
336# define GMM_PAGE_PFN_LAST UINT32_C(0xfffffff0)
337#else
338# define GMM_PAGE_PFN_LAST UINT32_C(0x00fffff0)
339#endif
340AssertCompile(GMM_PAGE_PFN_LAST == (GMM_GCPHYS_LAST >> PAGE_SHIFT));
341
342/** @def GMM_PAGE_PFN_UNSHAREABLE
343 * Indicates that this page isn't used for normal guest memory and thus isn't shareable.
344 */
345#if HC_ARCH_BITS == 64
346# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0xfffffff1)
347#else
348# define GMM_PAGE_PFN_UNSHAREABLE UINT32_C(0x00fffff1)
349#endif
350AssertCompile(GMM_PAGE_PFN_UNSHAREABLE == (GMM_GCPHYS_UNSHAREABLE >> PAGE_SHIFT));
351
352
353/**
354 * A GMM allocation chunk ring-3 mapping record.
355 *
356 * This should really be associated with a session and not a VM, but
357 * it's simpler to associated with a VM and cleanup with the VM object
358 * is destroyed.
359 */
360typedef struct GMMCHUNKMAP
361{
362 /** The mapping object. */
363 RTR0MEMOBJ MapObj;
364 /** The VM owning the mapping. */
365 PGVM pGVM;
366} GMMCHUNKMAP;
367/** Pointer to a GMM allocation chunk mapping. */
368typedef struct GMMCHUNKMAP *PGMMCHUNKMAP;
369
370typedef enum GMMCHUNKTYPE
371{
372 GMMCHUNKTYPE_INVALID = 0,
373 GMMCHUNKTYPE_NON_CONTINUOUS = 1, /* 4 kb pages */
374 GMMCHUNKTYPE_CONTINUOUS = 2, /* one 2 MB continuous physical range. */
375 GMMCHUNKTYPE_32BIT_HACK = 0x7fffffff
376} GMMCHUNKTYPE;
377
378
379/**
380 * A GMM allocation chunk.
381 */
382typedef struct GMMCHUNK
383{
384 /** The AVL node core.
385 * The Key is the chunk ID. */
386 AVLU32NODECORE Core;
387 /** The memory object.
388 * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
389 * what the host can dish up with. */
390 RTR0MEMOBJ MemObj;
391 /** Pointer to the next chunk in the free list. */
392 PGMMCHUNK pFreeNext;
393 /** Pointer to the previous chunk in the free list. */
394 PGMMCHUNK pFreePrev;
395 /** Pointer to the free set this chunk belongs to. NULL for
396 * chunks with no free pages. */
397 PGMMCHUNKFREESET pSet;
398 /** Pointer to an array of mappings. */
399 PGMMCHUNKMAP paMappings;
400 /** The number of mappings. */
401 uint16_t cMappings;
402 /** The head of the list of free pages. UINT16_MAX is the NIL value. */
403 uint16_t iFreeHead;
404 /** The number of free pages. */
405 uint16_t cFree;
406 /** The GVM handle of the VM that first allocated pages from this chunk, this
407 * is used as a preference when there are several chunks to choose from.
408 * When in bound memory mode this isn't a preference any longer. */
409 uint16_t hGVM;
410 /** The number of private pages. */
411 uint16_t cPrivate;
412 /** The number of shared pages. */
413 uint16_t cShared;
414 /** Chunk type */
415 GMMCHUNKTYPE enmType;
416 /** The pages. */
417 GMMPAGE aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT];
418} GMMCHUNK;
419
420
421/**
422 * An allocation chunk TLB entry.
423 */
424typedef struct GMMCHUNKTLBE
425{
426 /** The chunk id. */
427 uint32_t idChunk;
428 /** Pointer to the chunk. */
429 PGMMCHUNK pChunk;
430} GMMCHUNKTLBE;
431/** Pointer to an allocation chunk TLB entry. */
432typedef GMMCHUNKTLBE *PGMMCHUNKTLBE;
433
434
435/** The number of entries tin the allocation chunk TLB. */
436#define GMM_CHUNKTLB_ENTRIES 32
437/** Gets the TLB entry index for the given Chunk ID. */
438#define GMM_CHUNKTLB_IDX(idChunk) ( (idChunk) & (GMM_CHUNKTLB_ENTRIES - 1) )
439
440/**
441 * An allocation chunk TLB.
442 */
443typedef struct GMMCHUNKTLB
444{
445 /** The TLB entries. */
446 GMMCHUNKTLBE aEntries[GMM_CHUNKTLB_ENTRIES];
447} GMMCHUNKTLB;
448/** Pointer to an allocation chunk TLB. */
449typedef GMMCHUNKTLB *PGMMCHUNKTLB;
450
451
452/** The GMMCHUNK::cFree shift count. */
453#define GMM_CHUNK_FREE_SET_SHIFT 4
454/** The GMMCHUNK::cFree mask for use when considering relinking a chunk. */
455#define GMM_CHUNK_FREE_SET_MASK 15
456/** The number of lists in set. */
457#define GMM_CHUNK_FREE_SET_LISTS (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
458
459/**
460 * A set of free chunks.
461 */
462typedef struct GMMCHUNKFREESET
463{
464 /** The number of free pages in the set. */
465 uint64_t cFreePages;
466 /** Chunks ordered by increasing number of free pages. */
467 PGMMCHUNK apLists[GMM_CHUNK_FREE_SET_LISTS];
468} GMMCHUNKFREESET;
469
470
471/**
472 * The GMM instance data.
473 */
474typedef struct GMM
475{
476 /** Magic / eye catcher. GMM_MAGIC */
477 uint32_t u32Magic;
478 /** The fast mutex protecting the GMM.
479 * More fine grained locking can be implemented later if necessary. */
480 RTSEMFASTMUTEX Mtx;
481 /** The chunk tree. */
482 PAVLU32NODECORE pChunks;
483 /** The chunk TLB. */
484 GMMCHUNKTLB ChunkTLB;
485 /** The private free set. */
486 GMMCHUNKFREESET Private;
487 /** The shared free set. */
488 GMMCHUNKFREESET Shared;
489
490 /** Shared module tree (global). */
491 /** todo seperate trees for distinctly different guest OSes. */
492 PAVLGCPTRNODECORE pGlobalSharedModuleTree;
493
494 /** The maximum number of pages we're allowed to allocate.
495 * @gcfgm 64-bit GMM/MaxPages Direct.
496 * @gcfgm 32-bit GMM/PctPages Relative to the number of host pages. */
497 uint64_t cMaxPages;
498 /** The number of pages that has been reserved.
499 * The deal is that cReservedPages - cOverCommittedPages <= cMaxPages. */
500 uint64_t cReservedPages;
501 /** The number of pages that we have over-committed in reservations. */
502 uint64_t cOverCommittedPages;
503 /** The number of actually allocated (committed if you like) pages. */
504 uint64_t cAllocatedPages;
505 /** The number of pages that are shared. A subset of cAllocatedPages. */
506 uint64_t cSharedPages;
507 /** The number of pages that are actually shared between VMs. */
508 uint64_t cDuplicatePages;
509 /** The number of pages that are shared that has been left behind by
510 * VMs not doing proper cleanups. */
511 uint64_t cLeftBehindSharedPages;
512 /** The number of allocation chunks.
513 * (The number of pages we've allocated from the host can be derived from this.) */
514 uint32_t cChunks;
515 /** The number of current ballooned pages. */
516 uint64_t cBalloonedPages;
517
518 /** The legacy allocation mode indicator.
519 * This is determined at initialization time. */
520 bool fLegacyAllocationMode;
521 /** The bound memory mode indicator.
522 * When set, the memory will be bound to a specific VM and never
523 * shared. This is always set if fLegacyAllocationMode is set.
524 * (Also determined at initialization time.) */
525 bool fBoundMemoryMode;
526 /** The number of registered VMs. */
527 uint16_t cRegisteredVMs;
528
529 /** The previous allocated Chunk ID.
530 * Used as a hint to avoid scanning the whole bitmap. */
531 uint32_t idChunkPrev;
532 /** Chunk ID allocation bitmap.
533 * Bits of allocated IDs are set, free ones are clear.
534 * The NIL id (0) is marked allocated. */
535 uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
536} GMM;
537/** Pointer to the GMM instance. */
538typedef GMM *PGMM;
539
540/** The value of GMM::u32Magic (Katsuhiro Otomo). */
541#define GMM_MAGIC 0x19540414
542
543
544/*******************************************************************************
545* Global Variables *
546*******************************************************************************/
547/** Pointer to the GMM instance data. */
548static PGMM g_pGMM = NULL;
549
550/** Macro for obtaining and validating the g_pGMM pointer.
551 * On failure it will return from the invoking function with the specified return value.
552 *
553 * @param pGMM The name of the pGMM variable.
554 * @param rc The return value on failure. Use VERR_INTERNAL_ERROR for
555 * VBox status codes.
556 */
557#define GMM_GET_VALID_INSTANCE(pGMM, rc) \
558 do { \
559 (pGMM) = g_pGMM; \
560 AssertPtrReturn((pGMM), (rc)); \
561 AssertMsgReturn((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic), (rc)); \
562 } while (0)
563
564/** Macro for obtaining and validating the g_pGMM pointer, void function variant.
565 * On failure it will return from the invoking function.
566 *
567 * @param pGMM The name of the pGMM variable.
568 */
569#define GMM_GET_VALID_INSTANCE_VOID(pGMM) \
570 do { \
571 (pGMM) = g_pGMM; \
572 AssertPtrReturnVoid((pGMM)); \
573 AssertMsgReturnVoid((pGMM)->u32Magic == GMM_MAGIC, ("%p - %#x\n", (pGMM), (pGMM)->u32Magic)); \
574 } while (0)
575
576
577/** @def GMM_CHECK_SANITY_UPON_ENTERING
578 * Checks the sanity of the GMM instance data before making changes.
579 *
580 * This is macro is a stub by default and must be enabled manually in the code.
581 *
582 * @returns true if sane, false if not.
583 * @param pGMM The name of the pGMM variable.
584 */
585#if defined(VBOX_STRICT) && 0
586# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
587#else
588# define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true)
589#endif
590
591/** @def GMM_CHECK_SANITY_UPON_LEAVING
592 * Checks the sanity of the GMM instance data after making changes.
593 *
594 * This is macro is a stub by default and must be enabled manually in the code.
595 *
596 * @returns true if sane, false if not.
597 * @param pGMM The name of the pGMM variable.
598 */
599#if defined(VBOX_STRICT) && 0
600# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
601#else
602# define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (true)
603#endif
604
605/** @def GMM_CHECK_SANITY_IN_LOOPS
606 * Checks the sanity of the GMM instance in the allocation loops.
607 *
608 * This is macro is a stub by default and must be enabled manually in the code.
609 *
610 * @returns true if sane, false if not.
611 * @param pGMM The name of the pGMM variable.
612 */
613#if defined(VBOX_STRICT) && 0
614# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)
615#else
616# define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (true)
617#endif
618
619
620/*******************************************************************************
621* Internal Functions *
622*******************************************************************************/
623static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
624static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
625static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
626/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
627DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
628DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk);
629static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
630static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
631static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
632static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
633
634
635
636/**
637 * Initializes the GMM component.
638 *
639 * This is called when the VMMR0.r0 module is loaded and protected by the
640 * loader semaphore.
641 *
642 * @returns VBox status code.
643 */
644GMMR0DECL(int) GMMR0Init(void)
645{
646 LogFlow(("GMMInit:\n"));
647
648 /*
649 * Allocate the instance data and the lock(s).
650 */
651 PGMM pGMM = (PGMM)RTMemAllocZ(sizeof(*pGMM));
652 if (!pGMM)
653 return VERR_NO_MEMORY;
654 pGMM->u32Magic = GMM_MAGIC;
655 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
656 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
657 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
658
659 int rc = RTSemFastMutexCreate(&pGMM->Mtx);
660 if (RT_SUCCESS(rc))
661 {
662 /*
663 * Check and see if RTR0MemObjAllocPhysNC works.
664 */
665#if 0 /* later, see #3170. */
666 RTR0MEMOBJ MemObj;
667 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
668 if (RT_SUCCESS(rc))
669 {
670 rc = RTR0MemObjFree(MemObj, true);
671 AssertRC(rc);
672 }
673 else if (rc == VERR_NOT_SUPPORTED)
674 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
675 else
676 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
677#else
678# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
679 pGMM->fLegacyAllocationMode = false;
680# if ARCH_BITS == 32
681 /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
682 pGMM->fBoundMemoryMode = true;
683# else
684 pGMM->fBoundMemoryMode = false;
685# endif
686# else
687 pGMM->fLegacyAllocationMode = true;
688 pGMM->fBoundMemoryMode = true;
689# endif
690#endif
691
692 /*
693 * Query system page count and guess a reasonable cMaxPages value.
694 */
695 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
696
697 g_pGMM = pGMM;
698 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
699 return VINF_SUCCESS;
700 }
701
702 RTMemFree(pGMM);
703 SUPR0Printf("GMMR0Init: failed! rc=%d\n", rc);
704 return rc;
705}
706
707
708/**
709 * Terminates the GMM component.
710 */
711GMMR0DECL(void) GMMR0Term(void)
712{
713 LogFlow(("GMMTerm:\n"));
714
715 /*
716 * Take care / be paranoid...
717 */
718 PGMM pGMM = g_pGMM;
719 if (!VALID_PTR(pGMM))
720 return;
721 if (pGMM->u32Magic != GMM_MAGIC)
722 {
723 SUPR0Printf("GMMR0Term: u32Magic=%#x\n", pGMM->u32Magic);
724 return;
725 }
726
727 /*
728 * Undo what init did and free all the resources we've acquired.
729 */
730 /* Destroy the fundamentals. */
731 g_pGMM = NULL;
732 pGMM->u32Magic++;
733 RTSemFastMutexDestroy(pGMM->Mtx);
734 pGMM->Mtx = NIL_RTSEMFASTMUTEX;
735
736 /* free any chunks still hanging around. */
737 RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
738
739 /* finally the instance data itself. */
740 RTMemFree(pGMM);
741 LogFlow(("GMMTerm: done\n"));
742}
743
744
745/**
746 * RTAvlU32Destroy callback.
747 *
748 * @returns 0
749 * @param pNode The node to destroy.
750 * @param pvGMM The GMM handle.
751 */
752static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM)
753{
754 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
755
756 if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
757 SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
758 pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
759
760 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
761 if (RT_FAILURE(rc))
762 {
763 SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
764 pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
765 AssertRC(rc);
766 }
767 pChunk->MemObj = NIL_RTR0MEMOBJ;
768
769 RTMemFree(pChunk->paMappings);
770 pChunk->paMappings = NULL;
771
772 RTMemFree(pChunk);
773 NOREF(pvGMM);
774 return 0;
775}
776
777
778/**
779 * Initializes the per-VM data for the GMM.
780 *
781 * This is called from within the GVMM lock (from GVMMR0CreateVM)
782 * and should only initialize the data members so GMMR0CleanupVM
783 * can deal with them. We reserve no memory or anything here,
784 * that's done later in GMMR0InitVM.
785 *
786 * @param pGVM Pointer to the Global VM structure.
787 */
788GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM)
789{
790 AssertCompile(RT_SIZEOFMEMB(GVM,gmm.s) <= RT_SIZEOFMEMB(GVM,gmm.padding));
791
792 pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
793 pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
794 pGVM->gmm.s.fMayAllocate = false;
795}
796
797
798/**
799 * Cleans up when a VM is terminating.
800 *
801 * @param pGVM Pointer to the Global VM structure.
802 */
803GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM)
804{
805 LogFlow(("GMMR0CleanupVM: pGVM=%p:{.pVM=%p, .hSelf=%#x}\n", pGVM, pGVM->pVM, pGVM->hSelf));
806
807 PGMM pGMM;
808 GMM_GET_VALID_INSTANCE_VOID(pGMM);
809
810 int rc = RTSemFastMutexRequest(pGMM->Mtx);
811 AssertRC(rc);
812 GMM_CHECK_SANITY_UPON_ENTERING(pGMM);
813
814#ifdef VBOX_WITH_PAGE_SHARING
815 /* Clean up all registered shared modules. */
816 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
817#endif
818
819 /*
820 * The policy is 'INVALID' until the initial reservation
821 * request has been serviced.
822 */
823 if ( pGVM->gmm.s.enmPolicy > GMMOCPOLICY_INVALID
824 && pGVM->gmm.s.enmPolicy < GMMOCPOLICY_END)
825 {
826 /*
827 * If it's the last VM around, we can skip walking all the chunk looking
828 * for the pages owned by this VM and instead flush the whole shebang.
829 *
830 * This takes care of the eventuality that a VM has left shared page
831 * references behind (shouldn't happen of course, but you never know).
832 */
833 Assert(pGMM->cRegisteredVMs);
834 pGMM->cRegisteredVMs--;
835#if 0 /* disabled so it won't hide bugs. */
836 if (!pGMM->cRegisteredVMs)
837 {
838 RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
839
840 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
841 {
842 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
843 pGMM->ChunkTLB.aEntries[i].pChunk = NULL;
844 }
845
846 memset(&pGMM->Private, 0, sizeof(pGMM->Private));
847 memset(&pGMM->Shared, 0, sizeof(pGMM->Shared));
848
849 memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
850 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
851
852 pGMM->cReservedPages = 0;
853 pGMM->cOverCommittedPages = 0;
854 pGMM->cAllocatedPages = 0;
855 pGMM->cSharedPages = 0;
856 pGMM->cDuplicatePages = 0;
857 pGMM->cLeftBehindSharedPages = 0;
858 pGMM->cChunks = 0;
859 pGMM->cBalloonedPages = 0;
860 }
861 else
862#endif
863 {
864 /*
865 * Walk the entire pool looking for pages that belong to this VM
866 * and left over mappings. (This'll only catch private pages, shared
867 * pages will be 'left behind'.)
868 */
869 /* todo this might be kind of expensive with a lot of VMs and memory hanging around... */
870 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
871 RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
872 if (pGVM->gmm.s.cPrivatePages)
873 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
874 pGMM->cAllocatedPages -= cPrivatePages;
875
876 /* free empty chunks. */
877 if (cPrivatePages)
878 {
879 PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
880 while (pCur)
881 {
882 PGMMCHUNK pNext = pCur->pFreeNext;
883 if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
884 && ( !pGMM->fBoundMemoryMode
885 || pCur->hGVM == pGVM->hSelf))
886 gmmR0FreeChunk(pGMM, pGVM, pCur);
887 pCur = pNext;
888 }
889 }
890
891 /* account for shared pages that weren't freed. */
892 if (pGVM->gmm.s.cSharedPages)
893 {
894 Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
895 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
896 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
897 }
898
899 /* Clean up balloon statistics in case the VM process crashed. */
900 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
901 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
902
903 /*
904 * Update the over-commitment management statistics.
905 */
906 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
907 + pGVM->gmm.s.Reserved.cFixedPages
908 + pGVM->gmm.s.Reserved.cShadowPages;
909 switch (pGVM->gmm.s.enmPolicy)
910 {
911 case GMMOCPOLICY_NO_OC:
912 break;
913 default:
914 /** @todo Update GMM->cOverCommittedPages */
915 break;
916 }
917 }
918 }
919
920 /* zap the GVM data. */
921 pGVM->gmm.s.enmPolicy = GMMOCPOLICY_INVALID;
922 pGVM->gmm.s.enmPriority = GMMPRIORITY_INVALID;
923 pGVM->gmm.s.fMayAllocate = false;
924
925 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
926 RTSemFastMutexRelease(pGMM->Mtx);
927
928 LogFlow(("GMMR0CleanupVM: returns\n"));
929}
930
931
932/**
933 * RTAvlU32DoWithAll callback.
934 *
935 * @returns 0
936 * @param pNode The node to search.
937 * @param pvGVM Pointer to the shared VM structure.
938 */
939static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
940{
941 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
942 PGVM pGVM = (PGVM)pvGVM;
943
944 /*
945 * Look for pages belonging to the VM.
946 * (Perform some internal checks while we're scanning.)
947 */
948#ifndef VBOX_STRICT
949 if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
950#endif
951 {
952 unsigned cPrivate = 0;
953 unsigned cShared = 0;
954 unsigned cFree = 0;
955
956 gmmR0UnlinkChunk(pChunk); /* avoiding cFreePages updates. */
957
958 uint16_t hGVM = pGVM->hSelf;
959 unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
960 while (iPage-- > 0)
961 if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
962 {
963 if (pChunk->aPages[iPage].Private.hGVM == hGVM)
964 {
965 /*
966 * Free the page.
967 *
968 * The reason for not using gmmR0FreePrivatePage here is that we
969 * must *not* cause the chunk to be freed from under us - we're in
970 * an AVL tree walk here.
971 */
972 pChunk->aPages[iPage].u = 0;
973 pChunk->aPages[iPage].Free.iNext = pChunk->iFreeHead;
974 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
975 pChunk->iFreeHead = iPage;
976 pChunk->cPrivate--;
977 pChunk->cFree++;
978 pGVM->gmm.s.cPrivatePages--;
979 cFree++;
980 }
981 else
982 cPrivate++;
983 }
984 else if (GMM_PAGE_IS_FREE(&pChunk->aPages[iPage]))
985 cFree++;
986 else
987 cShared++;
988
989 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
990
991 /*
992 * Did it add up?
993 */
994 if (RT_UNLIKELY( pChunk->cFree != cFree
995 || pChunk->cPrivate != cPrivate
996 || pChunk->cShared != cShared))
997 {
998 SUPR0Printf("gmmR0CleanupVMScanChunk: Chunk %p/%#x has bogus stats - free=%d/%d private=%d/%d shared=%d/%d\n",
999 pChunk->cFree, cFree, pChunk->cPrivate, cPrivate, pChunk->cShared, cShared);
1000 pChunk->cFree = cFree;
1001 pChunk->cPrivate = cPrivate;
1002 pChunk->cShared = cShared;
1003 }
1004 }
1005
1006 /*
1007 * Look for the mapping belonging to the terminating VM.
1008 */
1009 for (unsigned i = 0; i < pChunk->cMappings; i++)
1010 if (pChunk->paMappings[i].pGVM == pGVM)
1011 {
1012 RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj;
1013
1014 pChunk->cMappings--;
1015 if (i < pChunk->cMappings)
1016 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
1017 pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
1018 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
1019
1020 int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
1021 if (RT_FAILURE(rc))
1022 {
1023 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
1024 pChunk, pChunk->Core.Key, i, MemObj, rc);
1025 AssertRC(rc);
1026 }
1027 break;
1028 }
1029
1030 /*
1031 * If not in bound memory mode, we should reset the hGVM field
1032 * if it has our handle in it.
1033 */
1034 if (pChunk->hGVM == pGVM->hSelf)
1035 {
1036 if (!g_pGMM->fBoundMemoryMode)
1037 pChunk->hGVM = NIL_GVM_HANDLE;
1038 else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES)
1039 {
1040 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
1041 pChunk, pChunk->Core.Key, pChunk->cFree);
1042 AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
1043
1044 gmmR0UnlinkChunk(pChunk);
1045 pChunk->cFree = GMM_CHUNK_NUM_PAGES;
1046 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
1047 }
1048 }
1049
1050 return 0;
1051}
1052
1053
1054/**
1055 * RTAvlU32Destroy callback for GMMR0CleanupVM.
1056 *
1057 * @returns 0
1058 * @param pNode The node (allocation chunk) to destroy.
1059 * @param pvGVM Pointer to the shared VM structure.
1060 */
1061/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
1062{
1063 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
1064 PGVM pGVM = (PGVM)pvGVM;
1065
1066 for (unsigned i = 0; i < pChunk->cMappings; i++)
1067 {
1068 if (pChunk->paMappings[i].pGVM != pGVM)
1069 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p exepcted %p\n", pChunk,
1070 pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
1071 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
1072 if (RT_FAILURE(rc))
1073 {
1074 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
1075 pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
1076 AssertRC(rc);
1077 }
1078 }
1079
1080 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
1081 if (RT_FAILURE(rc))
1082 {
1083 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
1084 pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
1085 AssertRC(rc);
1086 }
1087 pChunk->MemObj = NIL_RTR0MEMOBJ;
1088
1089 RTMemFree(pChunk->paMappings);
1090 pChunk->paMappings = NULL;
1091
1092 RTMemFree(pChunk);
1093 return 0;
1094}
1095
1096
1097/**
1098 * The initial resource reservations.
1099 *
1100 * This will make memory reservations according to policy and priority. If there aren't
1101 * sufficient resources available to sustain the VM this function will fail and all
1102 * future allocations requests will fail as well.
1103 *
1104 * These are just the initial reservations made very very early during the VM creation
1105 * process and will be adjusted later in the GMMR0UpdateReservation call after the
1106 * ring-3 init has completed.
1107 *
1108 * @returns VBox status code.
1109 * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
1110 * @retval VERR_GMM_
1111 *
1112 * @param pVM Pointer to the shared VM structure.
1113 * @param idCpu VCPU id
1114 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
1115 * This does not include MMIO2 and similar.
1116 * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
1117 * @param cFixedPages The number of pages that may be allocated for fixed objects like the
1118 * hyper heap, MMIO2 and similar.
1119 * @param enmPolicy The OC policy to use on this VM.
1120 * @param enmPriority The priority in an out-of-memory situation.
1121 *
1122 * @thread The creator thread / EMT.
1123 */
1124GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
1125 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
1126{
1127 LogFlow(("GMMR0InitialReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x enmPolicy=%d enmPriority=%d\n",
1128 pVM, cBasePages, cShadowPages, cFixedPages, enmPolicy, enmPriority));
1129
1130 /*
1131 * Validate, get basics and take the semaphore.
1132 */
1133 PGMM pGMM;
1134 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
1135 PGVM pGVM;
1136 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
1137 if (RT_FAILURE(rc))
1138 return rc;
1139
1140 AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
1141 AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
1142 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
1143 AssertReturn(enmPolicy > GMMOCPOLICY_INVALID && enmPolicy < GMMOCPOLICY_END, VERR_INVALID_PARAMETER);
1144 AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
1145
1146 rc = RTSemFastMutexRequest(pGMM->Mtx);
1147 AssertRC(rc);
1148 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1149 {
1150 if ( !pGVM->gmm.s.Reserved.cBasePages
1151 && !pGVM->gmm.s.Reserved.cFixedPages
1152 && !pGVM->gmm.s.Reserved.cShadowPages)
1153 {
1154 /*
1155 * Check if we can accomodate this.
1156 */
1157 /* ... later ... */
1158 if (RT_SUCCESS(rc))
1159 {
1160 /*
1161 * Update the records.
1162 */
1163 pGVM->gmm.s.Reserved.cBasePages = cBasePages;
1164 pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
1165 pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
1166 pGVM->gmm.s.enmPolicy = enmPolicy;
1167 pGVM->gmm.s.enmPriority = enmPriority;
1168 pGVM->gmm.s.fMayAllocate = true;
1169
1170 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
1171 pGMM->cRegisteredVMs++;
1172 }
1173 }
1174 else
1175 rc = VERR_WRONG_ORDER;
1176 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1177 }
1178 else
1179 rc = VERR_INTERNAL_ERROR_5;
1180 RTSemFastMutexRelease(pGMM->Mtx);
1181 LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
1182 return rc;
1183}
1184
1185
1186/**
1187 * VMMR0 request wrapper for GMMR0InitialReservation.
1188 *
1189 * @returns see GMMR0InitialReservation.
1190 * @param pVM Pointer to the shared VM structure.
1191 * @param idCpu VCPU id
1192 * @param pReq The request packet.
1193 */
1194GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq)
1195{
1196 /*
1197 * Validate input and pass it on.
1198 */
1199 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1200 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1201 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1202
1203 return GMMR0InitialReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages, pReq->enmPolicy, pReq->enmPriority);
1204}
1205
1206
1207/**
1208 * This updates the memory reservation with the additional MMIO2 and ROM pages.
1209 *
1210 * @returns VBox status code.
1211 * @retval VERR_GMM_MEMORY_RESERVATION_DECLINED
1212 *
1213 * @param pVM Pointer to the shared VM structure.
1214 * @param idCpu VCPU id
1215 * @param cBasePages The number of pages that may be allocated for the base RAM and ROMs.
1216 * This does not include MMIO2 and similar.
1217 * @param cShadowPages The number of pages that may be allocated for shadow pageing structures.
1218 * @param cFixedPages The number of pages that may be allocated for fixed objects like the
1219 * hyper heap, MMIO2 and similar.
1220 *
1221 * @thread EMT.
1222 */
1223GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
1224{
1225 LogFlow(("GMMR0UpdateReservation: pVM=%p cBasePages=%#llx cShadowPages=%#x cFixedPages=%#x\n",
1226 pVM, cBasePages, cShadowPages, cFixedPages));
1227
1228 /*
1229 * Validate, get basics and take the semaphore.
1230 */
1231 PGMM pGMM;
1232 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
1233 PGVM pGVM;
1234 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
1235 if (RT_FAILURE(rc))
1236 return rc;
1237
1238 AssertReturn(cBasePages, VERR_INVALID_PARAMETER);
1239 AssertReturn(cShadowPages, VERR_INVALID_PARAMETER);
1240 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
1241
1242 rc = RTSemFastMutexRequest(pGMM->Mtx);
1243 AssertRC(rc);
1244 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1245 {
1246 if ( pGVM->gmm.s.Reserved.cBasePages
1247 && pGVM->gmm.s.Reserved.cFixedPages
1248 && pGVM->gmm.s.Reserved.cShadowPages)
1249 {
1250 /*
1251 * Check if we can accomodate this.
1252 */
1253 /* ... later ... */
1254 if (RT_SUCCESS(rc))
1255 {
1256 /*
1257 * Update the records.
1258 */
1259 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
1260 + pGVM->gmm.s.Reserved.cFixedPages
1261 + pGVM->gmm.s.Reserved.cShadowPages;
1262 pGMM->cReservedPages += cBasePages + cFixedPages + cShadowPages;
1263
1264 pGVM->gmm.s.Reserved.cBasePages = cBasePages;
1265 pGVM->gmm.s.Reserved.cFixedPages = cFixedPages;
1266 pGVM->gmm.s.Reserved.cShadowPages = cShadowPages;
1267 }
1268 }
1269 else
1270 rc = VERR_WRONG_ORDER;
1271 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1272 }
1273 else
1274 rc = VERR_INTERNAL_ERROR_5;
1275 RTSemFastMutexRelease(pGMM->Mtx);
1276 LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
1277 return rc;
1278}
1279
1280
1281/**
1282 * VMMR0 request wrapper for GMMR0UpdateReservation.
1283 *
1284 * @returns see GMMR0UpdateReservation.
1285 * @param pVM Pointer to the shared VM structure.
1286 * @param idCpu VCPU id
1287 * @param pReq The request packet.
1288 */
1289GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq)
1290{
1291 /*
1292 * Validate input and pass it on.
1293 */
1294 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
1295 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1296 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1297
1298 return GMMR0UpdateReservation(pVM, idCpu, pReq->cBasePages, pReq->cShadowPages, pReq->cFixedPages);
1299}
1300
1301
1302/**
1303 * Performs sanity checks on a free set.
1304 *
1305 * @returns Error count.
1306 *
1307 * @param pGMM Pointer to the GMM instance.
1308 * @param pSet Pointer to the set.
1309 * @param pszSetName The set name.
1310 * @param pszFunction The function from which it was called.
1311 * @param uLine The line number.
1312 */
1313static uint32_t gmmR0SanityCheckSet(PGMM pGMM, PGMMCHUNKFREESET pSet, const char *pszSetName,
1314 const char *pszFunction, unsigned uLineNo)
1315{
1316 uint32_t cErrors = 0;
1317
1318 /*
1319 * Count the free pages in all the chunks and match it against pSet->cFreePages.
1320 */
1321 uint32_t cPages = 0;
1322 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1323 {
1324 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1325 {
1326 /** @todo check that the chunk is hash into the right set. */
1327 cPages += pCur->cFree;
1328 }
1329 }
1330 if (RT_UNLIKELY(cPages != pSet->cFreePages))
1331 {
1332 SUPR0Printf("GMM insanity: found %#x pages in the %s set, expected %#x. (%s, line %u)\n",
1333 cPages, pszSetName, pSet->cFreePages, pszFunction, uLineNo);
1334 cErrors++;
1335 }
1336
1337 return cErrors;
1338}
1339
1340
1341/**
1342 * Performs some sanity checks on the GMM while owning lock.
1343 *
1344 * @returns Error count.
1345 *
1346 * @param pGMM Pointer to the GMM instance.
1347 * @param pszFunction The function from which it is called.
1348 * @param uLineNo The line number.
1349 */
1350static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo)
1351{
1352 uint32_t cErrors = 0;
1353
1354 cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Private, "private", pszFunction, uLineNo);
1355 cErrors += gmmR0SanityCheckSet(pGMM, &pGMM->Shared, "shared", pszFunction, uLineNo);
1356 /** @todo add more sanity checks. */
1357
1358 return cErrors;
1359}
1360
1361
1362/**
1363 * Looks up a chunk in the tree and fill in the TLB entry for it.
1364 *
1365 * This is not expected to fail and will bitch if it does.
1366 *
1367 * @returns Pointer to the allocation chunk, NULL if not found.
1368 * @param pGMM Pointer to the GMM instance.
1369 * @param idChunk The ID of the chunk to find.
1370 * @param pTlbe Pointer to the TLB entry.
1371 */
1372static PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
1373{
1374 PGMMCHUNK pChunk = (PGMMCHUNK)RTAvlU32Get(&pGMM->pChunks, idChunk);
1375 AssertMsgReturn(pChunk, ("Chunk %#x not found!\n", idChunk), NULL);
1376 pTlbe->idChunk = idChunk;
1377 pTlbe->pChunk = pChunk;
1378 return pChunk;
1379}
1380
1381
1382/**
1383 * Finds a allocation chunk.
1384 *
1385 * This is not expected to fail and will bitch if it does.
1386 *
1387 * @returns Pointer to the allocation chunk, NULL if not found.
1388 * @param pGMM Pointer to the GMM instance.
1389 * @param idChunk The ID of the chunk to find.
1390 */
1391DECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
1392{
1393 /*
1394 * Do a TLB lookup, branch if not in the TLB.
1395 */
1396 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
1397 if ( pTlbe->idChunk != idChunk
1398 || !pTlbe->pChunk)
1399 return gmmR0GetChunkSlow(pGMM, idChunk, pTlbe);
1400 return pTlbe->pChunk;
1401}
1402
1403
1404/**
1405 * Finds a page.
1406 *
1407 * This is not expected to fail and will bitch if it does.
1408 *
1409 * @returns Pointer to the page, NULL if not found.
1410 * @param pGMM Pointer to the GMM instance.
1411 * @param idPage The ID of the page to find.
1412 */
1413DECLINLINE(PGMMPAGE) gmmR0GetPage(PGMM pGMM, uint32_t idPage)
1414{
1415 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
1416 if (RT_LIKELY(pChunk))
1417 return &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
1418 return NULL;
1419}
1420
1421
1422/**
1423 * Unlinks the chunk from the free list it's currently on (if any).
1424 *
1425 * @param pChunk The allocation chunk.
1426 */
1427DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk)
1428{
1429 PGMMCHUNKFREESET pSet = pChunk->pSet;
1430 if (RT_LIKELY(pSet))
1431 {
1432 pSet->cFreePages -= pChunk->cFree;
1433
1434 PGMMCHUNK pPrev = pChunk->pFreePrev;
1435 PGMMCHUNK pNext = pChunk->pFreeNext;
1436 if (pPrev)
1437 pPrev->pFreeNext = pNext;
1438 else
1439 pSet->apLists[(pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT] = pNext;
1440 if (pNext)
1441 pNext->pFreePrev = pPrev;
1442
1443 pChunk->pSet = NULL;
1444 pChunk->pFreeNext = NULL;
1445 pChunk->pFreePrev = NULL;
1446 }
1447 else
1448 {
1449 Assert(!pChunk->pFreeNext);
1450 Assert(!pChunk->pFreePrev);
1451 Assert(!pChunk->cFree);
1452 }
1453}
1454
1455
1456/**
1457 * Links the chunk onto the appropriate free list in the specified free set.
1458 *
1459 * If no free entries, it's not linked into any list.
1460 *
1461 * @param pChunk The allocation chunk.
1462 * @param pSet The free set.
1463 */
1464DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet)
1465{
1466 Assert(!pChunk->pSet);
1467 Assert(!pChunk->pFreeNext);
1468 Assert(!pChunk->pFreePrev);
1469
1470 if (pChunk->cFree > 0)
1471 {
1472 pChunk->pSet = pSet;
1473 pChunk->pFreePrev = NULL;
1474 unsigned iList = (pChunk->cFree - 1) >> GMM_CHUNK_FREE_SET_SHIFT;
1475 pChunk->pFreeNext = pSet->apLists[iList];
1476 if (pChunk->pFreeNext)
1477 pChunk->pFreeNext->pFreePrev = pChunk;
1478 pSet->apLists[iList] = pChunk;
1479
1480 pSet->cFreePages += pChunk->cFree;
1481 }
1482}
1483
1484
1485/**
1486 * Frees a Chunk ID.
1487 *
1488 * @param pGMM Pointer to the GMM instance.
1489 * @param idChunk The Chunk ID to free.
1490 */
1491static void gmmR0FreeChunkId(PGMM pGMM, uint32_t idChunk)
1492{
1493 AssertReturnVoid(idChunk != NIL_GMM_CHUNKID);
1494 AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk));
1495 ASMAtomicBitClear(&pGMM->bmChunkId[0], idChunk);
1496}
1497
1498
1499/**
1500 * Allocates a new Chunk ID.
1501 *
1502 * @returns The Chunk ID.
1503 * @param pGMM Pointer to the GMM instance.
1504 */
1505static uint32_t gmmR0AllocateChunkId(PGMM pGMM)
1506{
1507 AssertCompile(!((GMM_CHUNKID_LAST + 1) & 31)); /* must be a multiple of 32 */
1508 AssertCompile(NIL_GMM_CHUNKID == 0);
1509
1510 /*
1511 * Try the next sequential one.
1512 */
1513 int32_t idChunk = ++pGMM->idChunkPrev;
1514#if 0 /* test the fallback first */
1515 if ( idChunk <= GMM_CHUNKID_LAST
1516 && idChunk > NIL_GMM_CHUNKID
1517 && !ASMAtomicBitTestAndSet(&pVMM->bmChunkId[0], idChunk))
1518 return idChunk;
1519#endif
1520
1521 /*
1522 * Scan sequentially from the last one.
1523 */
1524 if ( (uint32_t)idChunk < GMM_CHUNKID_LAST
1525 && idChunk > NIL_GMM_CHUNKID)
1526 {
1527 idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk);
1528 if (idChunk > NIL_GMM_CHUNKID)
1529 {
1530 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
1531 return pGMM->idChunkPrev = idChunk;
1532 }
1533 }
1534
1535 /*
1536 * Ok, scan from the start.
1537 * We're not racing anyone, so there is no need to expect failures or have restart loops.
1538 */
1539 idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1);
1540 AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE);
1541 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID);
1542
1543 return pGMM->idChunkPrev = idChunk;
1544}
1545
1546
1547/**
1548 * Registers a new chunk of memory.
1549 *
1550 * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
1551 * must own the global lock.
1552 *
1553 * @returns VBox status code.
1554 * @param pGMM Pointer to the GMM instance.
1555 * @param pSet Pointer to the set.
1556 * @param MemObj The memory object for the chunk.
1557 * @param hGVM The affinity of the chunk. NIL_GVM_HANDLE for no
1558 * affinity.
1559 * @param enmChunkType Chunk type (continuous or non-continuous)
1560 * @param ppChunk Chunk address (out)
1561 */
1562static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
1563{
1564 Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
1565
1566 int rc;
1567 PGMMCHUNK pChunk = (PGMMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1568 if (pChunk)
1569 {
1570 /*
1571 * Initialize it.
1572 */
1573 pChunk->MemObj = MemObj;
1574 pChunk->cFree = GMM_CHUNK_NUM_PAGES;
1575 pChunk->hGVM = hGVM;
1576 pChunk->iFreeHead = 0;
1577 pChunk->enmType = enmChunkType;
1578 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
1579 {
1580 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE;
1581 pChunk->aPages[iPage].Free.iNext = iPage + 1;
1582 }
1583 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
1584 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
1585
1586 /*
1587 * Allocate a Chunk ID and insert it into the tree.
1588 * This has to be done behind the mutex of course.
1589 */
1590 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1591 {
1592 pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
1593 if ( pChunk->Core.Key != NIL_GMM_CHUNKID
1594 && pChunk->Core.Key <= GMM_CHUNKID_LAST
1595 && RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
1596 {
1597 pGMM->cChunks++;
1598 gmmR0LinkChunk(pChunk, pSet);
1599 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
1600
1601 if (ppChunk)
1602 *ppChunk = pChunk;
1603
1604 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
1605 return VINF_SUCCESS;
1606 }
1607
1608 /* bail out */
1609 rc = VERR_INTERNAL_ERROR;
1610 }
1611 else
1612 rc = VERR_INTERNAL_ERROR_5;
1613
1614 RTMemFree(pChunk);
1615 }
1616 else
1617 rc = VERR_NO_MEMORY;
1618 return rc;
1619}
1620
1621
1622/**
1623 * Allocate one new chunk and add it to the specified free set.
1624 *
1625 * @returns VBox status code.
1626 * @param pGMM Pointer to the GMM instance.
1627 * @param pSet Pointer to the set.
1628 * @param hGVM The affinity of the new chunk.
1629 * @param enmChunkType Chunk type (continuous or non-continuous)
1630 * @param ppChunk Chunk address (out)
1631 *
1632 * @remarks Called without owning the mutex.
1633 */
1634static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
1635{
1636 /*
1637 * Allocate the memory.
1638 */
1639 RTR0MEMOBJ MemObj;
1640 int rc;
1641
1642 AssertCompile(GMM_CHUNK_SIZE == _2M);
1643 AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
1644
1645 /* Leave the lock temporarily as the allocation might take long. */
1646 RTSemFastMutexRelease(pGMM->Mtx);
1647 if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS)
1648 rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
1649 else
1650 rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
1651
1652 /* Grab the lock again. */
1653 int rc2 = RTSemFastMutexRequest(pGMM->Mtx);
1654 AssertRCReturn(rc2, rc2);
1655
1656 if (RT_SUCCESS(rc))
1657 {
1658 rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
1659 if (RT_FAILURE(rc))
1660 RTR0MemObjFree(MemObj, false /* fFreeMappings */);
1661 }
1662 /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
1663 * allocation failure. */
1664 return rc;
1665}
1666
1667
1668/**
1669 * Attempts to allocate more pages until the requested amount is met.
1670 *
1671 * @returns VBox status code.
1672 * @param pGMM Pointer to the GMM instance data.
1673 * @param pGVM The calling VM.
1674 * @param pSet Pointer to the free set to grow.
1675 * @param cPages The number of pages needed.
1676 *
1677 * @remarks Called owning the mutex, but will leave it temporarily while
1678 * allocating the memory!
1679 */
1680static int gmmR0AllocateMoreChunks(PGMM pGMM, PGVM pGVM, PGMMCHUNKFREESET pSet, uint32_t cPages)
1681{
1682 Assert(!pGMM->fLegacyAllocationMode);
1683
1684 if (!GMM_CHECK_SANITY_IN_LOOPS(pGMM))
1685 return VERR_INTERNAL_ERROR_4;
1686
1687 if (!pGMM->fBoundMemoryMode)
1688 {
1689 /*
1690 * Try steal free chunks from the other set first. (Only take 100% free chunks.)
1691 */
1692 PGMMCHUNKFREESET pOtherSet = pSet == &pGMM->Private ? &pGMM->Shared : &pGMM->Private;
1693 while ( pSet->cFreePages < cPages
1694 && pOtherSet->cFreePages >= GMM_CHUNK_NUM_PAGES)
1695 {
1696 PGMMCHUNK pChunk = pOtherSet->apLists[GMM_CHUNK_FREE_SET_LISTS - 1];
1697 while (pChunk && pChunk->cFree != GMM_CHUNK_NUM_PAGES)
1698 pChunk = pChunk->pFreeNext;
1699 if (!pChunk)
1700 break;
1701
1702 gmmR0UnlinkChunk(pChunk);
1703 gmmR0LinkChunk(pChunk, pSet);
1704 }
1705
1706 /*
1707 * If we need still more pages, allocate new chunks.
1708 * Note! We will leave the mutex while doing the allocation,
1709 */
1710 while (pSet->cFreePages < cPages)
1711 {
1712 int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
1713 if (RT_FAILURE(rc))
1714 return rc;
1715 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1716 return VERR_INTERNAL_ERROR_5;
1717 }
1718 }
1719 else
1720 {
1721 /*
1722 * The memory is bound to the VM allocating it, so we have to count
1723 * the free pages carefully as well as making sure we brand them with
1724 * our VM handle.
1725 *
1726 * Note! We will leave the mutex while doing the allocation,
1727 */
1728 uint16_t const hGVM = pGVM->hSelf;
1729 for (;;)
1730 {
1731 /* Count and see if we've reached the goal. */
1732 uint32_t cPagesFound = 0;
1733 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1734 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1735 if (pCur->hGVM == hGVM)
1736 {
1737 cPagesFound += pCur->cFree;
1738 if (cPagesFound >= cPages)
1739 break;
1740 }
1741 if (cPagesFound >= cPages)
1742 break;
1743
1744 /* Allocate more. */
1745 int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
1746 if (RT_FAILURE(rc))
1747 return rc;
1748 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
1749 return VERR_INTERNAL_ERROR_5;
1750 }
1751 }
1752
1753 return VINF_SUCCESS;
1754}
1755
1756
1757/**
1758 * Allocates one private page.
1759 *
1760 * Worker for gmmR0AllocatePages.
1761 *
1762 * @param pGMM Pointer to the GMM instance data.
1763 * @param hGVM The GVM handle of the VM requesting memory.
1764 * @param pChunk The chunk to allocate it from.
1765 * @param pPageDesc The page descriptor.
1766 */
1767static void gmmR0AllocatePage(PGMM pGMM, uint32_t hGVM, PGMMCHUNK pChunk, PGMMPAGEDESC pPageDesc)
1768{
1769 /* update the chunk stats. */
1770 if (pChunk->hGVM == NIL_GVM_HANDLE)
1771 pChunk->hGVM = hGVM;
1772 Assert(pChunk->cFree);
1773 pChunk->cFree--;
1774 pChunk->cPrivate++;
1775
1776 /* unlink the first free page. */
1777 const uint32_t iPage = pChunk->iFreeHead;
1778 AssertReleaseMsg(iPage < RT_ELEMENTS(pChunk->aPages), ("%d\n", iPage));
1779 PGMMPAGE pPage = &pChunk->aPages[iPage];
1780 Assert(GMM_PAGE_IS_FREE(pPage));
1781 pChunk->iFreeHead = pPage->Free.iNext;
1782 Log3(("A pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x iNext=%#x\n",
1783 pPage, iPage, (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage,
1784 pPage->Common.u2State, pChunk->iFreeHead, pPage->Free.iNext));
1785
1786 /* make the page private. */
1787 pPage->u = 0;
1788 AssertCompile(GMM_PAGE_STATE_PRIVATE == 0);
1789 pPage->Private.hGVM = hGVM;
1790 AssertCompile(NIL_RTHCPHYS >= GMM_GCPHYS_LAST);
1791 AssertCompile(GMM_GCPHYS_UNSHAREABLE >= GMM_GCPHYS_LAST);
1792 if (pPageDesc->HCPhysGCPhys <= GMM_GCPHYS_LAST)
1793 pPage->Private.pfn = pPageDesc->HCPhysGCPhys >> PAGE_SHIFT;
1794 else
1795 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */
1796
1797 /* update the page descriptor. */
1798 pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
1799 Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
1800 pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
1801 pPageDesc->idSharedPage = NIL_GMM_PAGEID;
1802}
1803
1804
1805/**
1806 * Common worker for GMMR0AllocateHandyPages and GMMR0AllocatePages.
1807 *
1808 * @returns VBox status code:
1809 * @retval VINF_SUCCESS on success.
1810 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk or
1811 * gmmR0AllocateMoreChunks is necessary.
1812 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
1813 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
1814 * that is we're trying to allocate more than we've reserved.
1815 *
1816 * @param pGMM Pointer to the GMM instance data.
1817 * @param pGVM Pointer to the shared VM structure.
1818 * @param cPages The number of pages to allocate.
1819 * @param paPages Pointer to the page descriptors.
1820 * See GMMPAGEDESC for details on what is expected on input.
1821 * @param enmAccount The account to charge.
1822 */
1823static int gmmR0AllocatePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
1824{
1825 /*
1826 * Check allocation limits.
1827 */
1828 if (RT_UNLIKELY(pGMM->cAllocatedPages + cPages > pGMM->cMaxPages))
1829 return VERR_GMM_HIT_GLOBAL_LIMIT;
1830
1831 switch (enmAccount)
1832 {
1833 case GMMACCOUNT_BASE:
1834 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
1835 {
1836 Log(("gmmR0AllocatePages:Base: Reserved=%#llx Allocated+Ballooned+Requested=%#llx+%#llx+%#x!\n",
1837 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, pGVM->gmm.s.cBalloonedPages, cPages));
1838 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1839 }
1840 break;
1841 case GMMACCOUNT_SHADOW:
1842 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages + cPages > pGVM->gmm.s.Reserved.cShadowPages))
1843 {
1844 Log(("gmmR0AllocatePages:Shadow: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
1845 pGVM->gmm.s.Reserved.cShadowPages, pGVM->gmm.s.Allocated.cShadowPages, cPages));
1846 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1847 }
1848 break;
1849 case GMMACCOUNT_FIXED:
1850 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages + cPages > pGVM->gmm.s.Reserved.cFixedPages))
1851 {
1852 Log(("gmmR0AllocatePages:Fixed: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
1853 pGVM->gmm.s.Reserved.cFixedPages, pGVM->gmm.s.Allocated.cFixedPages, cPages));
1854 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
1855 }
1856 break;
1857 default:
1858 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
1859 }
1860
1861 /*
1862 * Check if we need to allocate more memory or not. In bound memory mode this
1863 * is a bit extra work but it's easier to do it upfront than bailing out later.
1864 */
1865 PGMMCHUNKFREESET pSet = &pGMM->Private;
1866 if (pSet->cFreePages < cPages)
1867 return VERR_GMM_SEED_ME;
1868 if (pGMM->fBoundMemoryMode)
1869 {
1870 uint16_t hGVM = pGVM->hSelf;
1871 uint32_t cPagesFound = 0;
1872 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists); i++)
1873 for (PGMMCHUNK pCur = pSet->apLists[i]; pCur; pCur = pCur->pFreeNext)
1874 if (pCur->hGVM == hGVM)
1875 {
1876 cPagesFound += pCur->cFree;
1877 if (cPagesFound >= cPages)
1878 break;
1879 }
1880 if (cPagesFound < cPages)
1881 return VERR_GMM_SEED_ME;
1882 }
1883
1884 /*
1885 * Pick the pages.
1886 * Try make some effort keeping VMs sharing private chunks.
1887 */
1888 uint16_t hGVM = pGVM->hSelf;
1889 uint32_t iPage = 0;
1890
1891 /* first round, pick from chunks with an affinity to the VM. */
1892 for (unsigned i = 0; i < RT_ELEMENTS(pSet->apLists) && iPage < cPages; i++)
1893 {
1894 PGMMCHUNK pCurFree = NULL;
1895 PGMMCHUNK pCur = pSet->apLists[i];
1896 while (pCur && iPage < cPages)
1897 {
1898 PGMMCHUNK pNext = pCur->pFreeNext;
1899
1900 if ( pCur->hGVM == hGVM
1901 && pCur->cFree < GMM_CHUNK_NUM_PAGES)
1902 {
1903 gmmR0UnlinkChunk(pCur);
1904 for (; pCur->cFree && iPage < cPages; iPage++)
1905 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1906 gmmR0LinkChunk(pCur, pSet);
1907 }
1908
1909 pCur = pNext;
1910 }
1911 }
1912
1913 if (iPage < cPages)
1914 {
1915 /* second round, pick pages from the 100% empty chunks we just skipped above. */
1916 PGMMCHUNK pCurFree = NULL;
1917 PGMMCHUNK pCur = pSet->apLists[RT_ELEMENTS(pSet->apLists) - 1];
1918 while (pCur && iPage < cPages)
1919 {
1920 PGMMCHUNK pNext = pCur->pFreeNext;
1921
1922 if ( pCur->cFree == GMM_CHUNK_NUM_PAGES
1923 && ( pCur->hGVM == hGVM
1924 || !pGMM->fBoundMemoryMode))
1925 {
1926 gmmR0UnlinkChunk(pCur);
1927 for (; pCur->cFree && iPage < cPages; iPage++)
1928 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1929 gmmR0LinkChunk(pCur, pSet);
1930 }
1931
1932 pCur = pNext;
1933 }
1934 }
1935
1936 if ( iPage < cPages
1937 && !pGMM->fBoundMemoryMode)
1938 {
1939 /* third round, disregard affinity. */
1940 unsigned i = RT_ELEMENTS(pSet->apLists);
1941 while (i-- > 0 && iPage < cPages)
1942 {
1943 PGMMCHUNK pCurFree = NULL;
1944 PGMMCHUNK pCur = pSet->apLists[i];
1945 while (pCur && iPage < cPages)
1946 {
1947 PGMMCHUNK pNext = pCur->pFreeNext;
1948
1949 if ( pCur->cFree > GMM_CHUNK_NUM_PAGES / 2
1950 && cPages >= GMM_CHUNK_NUM_PAGES / 2)
1951 pCur->hGVM = hGVM; /* change chunk affinity */
1952
1953 gmmR0UnlinkChunk(pCur);
1954 for (; pCur->cFree && iPage < cPages; iPage++)
1955 gmmR0AllocatePage(pGMM, hGVM, pCur, &paPages[iPage]);
1956 gmmR0LinkChunk(pCur, pSet);
1957
1958 pCur = pNext;
1959 }
1960 }
1961 }
1962
1963 /*
1964 * Update the account.
1965 */
1966 switch (enmAccount)
1967 {
1968 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages += iPage; break;
1969 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages += iPage; break;
1970 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages += iPage; break;
1971 default:
1972 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
1973 }
1974 pGVM->gmm.s.cPrivatePages += iPage;
1975 pGMM->cAllocatedPages += iPage;
1976
1977 AssertMsgReturn(iPage == cPages, ("%u != %u\n", iPage, cPages), VERR_INTERNAL_ERROR);
1978
1979 /*
1980 * Check if we've reached some threshold and should kick one or two VMs and tell
1981 * them to inflate their balloons a bit more... later.
1982 */
1983
1984 return VINF_SUCCESS;
1985}
1986
1987
1988/**
1989 * Updates the previous allocations and allocates more pages.
1990 *
1991 * The handy pages are always taken from the 'base' memory account.
1992 * The allocated pages are not cleared and will contains random garbage.
1993 *
1994 * @returns VBox status code:
1995 * @retval VINF_SUCCESS on success.
1996 * @retval VERR_NOT_OWNER if the caller is not an EMT.
1997 * @retval VERR_GMM_PAGE_NOT_FOUND if one of the pages to update wasn't found.
1998 * @retval VERR_GMM_PAGE_NOT_PRIVATE if one of the pages to update wasn't a
1999 * private page.
2000 * @retval VERR_GMM_PAGE_NOT_SHARED if one of the pages to update wasn't a
2001 * shared page.
2002 * @retval VERR_GMM_NOT_PAGE_OWNER if one of the pages to be updated wasn't
2003 * owned by the VM.
2004 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2005 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2006 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2007 * that is we're trying to allocate more than we've reserved.
2008 *
2009 * @param pVM Pointer to the shared VM structure.
2010 * @param idCpu VCPU id
2011 * @param cPagesToUpdate The number of pages to update (starting from the head).
2012 * @param cPagesToAlloc The number of pages to allocate (starting from the head).
2013 * @param paPages The array of page descriptors.
2014 * See GMMPAGEDESC for details on what is expected on input.
2015 * @thread EMT.
2016 */
2017GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages)
2018{
2019 LogFlow(("GMMR0AllocateHandyPages: pVM=%p cPagesToUpdate=%#x cPagesToAlloc=%#x paPages=%p\n",
2020 pVM, cPagesToUpdate, cPagesToAlloc, paPages));
2021
2022 /*
2023 * Validate, get basics and take the semaphore.
2024 * (This is a relatively busy path, so make predictions where possible.)
2025 */
2026 PGMM pGMM;
2027 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2028 PGVM pGVM;
2029 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2030 if (RT_FAILURE(rc))
2031 return rc;
2032
2033 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2034 AssertMsgReturn( (cPagesToUpdate && cPagesToUpdate < 1024)
2035 || (cPagesToAlloc && cPagesToAlloc < 1024),
2036 ("cPagesToUpdate=%#x cPagesToAlloc=%#x\n", cPagesToUpdate, cPagesToAlloc),
2037 VERR_INVALID_PARAMETER);
2038
2039 unsigned iPage = 0;
2040 for (; iPage < cPagesToUpdate; iPage++)
2041 {
2042 AssertMsgReturn( ( paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
2043 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK))
2044 || paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
2045 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE,
2046 ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys),
2047 VERR_INVALID_PARAMETER);
2048 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2049 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
2050 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2051 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2052 /*|| paPages[iPage].idSharedPage == NIL_GMM_PAGEID*/,
2053 ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2054 }
2055
2056 for (; iPage < cPagesToAlloc; iPage++)
2057 {
2058 AssertMsgReturn(paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS, ("#%#x: %RHp\n", iPage, paPages[iPage].HCPhysGCPhys), VERR_INVALID_PARAMETER);
2059 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2060 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2061 }
2062
2063 rc = RTSemFastMutexRequest(pGMM->Mtx);
2064 AssertRC(rc);
2065 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2066 {
2067
2068 /* No allocations before the initial reservation has been made! */
2069 if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
2070 && pGVM->gmm.s.Reserved.cFixedPages
2071 && pGVM->gmm.s.Reserved.cShadowPages))
2072 {
2073 /*
2074 * Perform the updates.
2075 * Stop on the first error.
2076 */
2077 for (iPage = 0; iPage < cPagesToUpdate; iPage++)
2078 {
2079 if (paPages[iPage].idPage != NIL_GMM_PAGEID)
2080 {
2081 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idPage);
2082 if (RT_LIKELY(pPage))
2083 {
2084 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2085 {
2086 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
2087 {
2088 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
2089 if (RT_LIKELY(paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST))
2090 pPage->Private.pfn = paPages[iPage].HCPhysGCPhys >> PAGE_SHIFT;
2091 else if (paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE)
2092 pPage->Private.pfn = GMM_PAGE_PFN_UNSHAREABLE;
2093 /* else: NIL_RTHCPHYS nothing */
2094
2095 paPages[iPage].idPage = NIL_GMM_PAGEID;
2096 paPages[iPage].HCPhysGCPhys = NIL_RTHCPHYS;
2097 }
2098 else
2099 {
2100 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not owner! hGVM=%#x hSelf=%#x\n",
2101 iPage, paPages[iPage].idPage, pPage->Private.hGVM, pGVM->hSelf));
2102 rc = VERR_GMM_NOT_PAGE_OWNER;
2103 break;
2104 }
2105 }
2106 else
2107 {
2108 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not private! %.*Rhxs (type %d)\n", iPage, paPages[iPage].idPage, sizeof(*pPage), pPage, pPage->Common.u2State));
2109 rc = VERR_GMM_PAGE_NOT_PRIVATE;
2110 break;
2111 }
2112 }
2113 else
2114 {
2115 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (private)\n", iPage, paPages[iPage].idPage));
2116 rc = VERR_GMM_PAGE_NOT_FOUND;
2117 break;
2118 }
2119 }
2120
2121 if (paPages[iPage].idSharedPage != NIL_GMM_PAGEID)
2122 {
2123 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPages[iPage].idSharedPage);
2124 if (RT_LIKELY(pPage))
2125 {
2126 if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
2127 {
2128 AssertCompile(NIL_RTHCPHYS > GMM_GCPHYS_LAST && GMM_GCPHYS_UNSHAREABLE > GMM_GCPHYS_LAST);
2129 Assert(pPage->Shared.cRefs);
2130 Assert(pGVM->gmm.s.cSharedPages);
2131 Assert(pGVM->gmm.s.Allocated.cBasePages);
2132
2133 Log(("GMMR0AllocateHandyPages: free shared page %x cRefs=%d\n", paPages[iPage].idSharedPage, pPage->Shared.cRefs));
2134 pGVM->gmm.s.cSharedPages--;
2135 pGVM->gmm.s.Allocated.cBasePages--;
2136 if (!--pPage->Shared.cRefs)
2137 {
2138 gmmR0FreeSharedPage(pGMM, paPages[iPage].idSharedPage, pPage);
2139 }
2140 else
2141 {
2142 Assert(pGMM->cDuplicatePages);
2143 pGMM->cDuplicatePages--;
2144 }
2145
2146 paPages[iPage].idSharedPage = NIL_GMM_PAGEID;
2147 }
2148 else
2149 {
2150 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not shared!\n", iPage, paPages[iPage].idSharedPage));
2151 rc = VERR_GMM_PAGE_NOT_SHARED;
2152 break;
2153 }
2154 }
2155 else
2156 {
2157 Log(("GMMR0AllocateHandyPages: #%#x/%#x: Not found! (shared)\n", iPage, paPages[iPage].idSharedPage));
2158 rc = VERR_GMM_PAGE_NOT_FOUND;
2159 break;
2160 }
2161 }
2162 }
2163
2164 /*
2165 * Join paths with GMMR0AllocatePages for the allocation.
2166 * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
2167 */
2168 while (RT_SUCCESS(rc))
2169 {
2170 rc = gmmR0AllocatePages(pGMM, pGVM, cPagesToAlloc, paPages, GMMACCOUNT_BASE);
2171 if ( rc != VERR_GMM_SEED_ME
2172 || pGMM->fLegacyAllocationMode)
2173 break;
2174 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPagesToAlloc);
2175 }
2176 }
2177 else
2178 rc = VERR_WRONG_ORDER;
2179 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2180 }
2181 else
2182 rc = VERR_INTERNAL_ERROR_5;
2183 RTSemFastMutexRelease(pGMM->Mtx);
2184 LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc));
2185 return rc;
2186}
2187
2188
2189/**
2190 * Allocate one or more pages.
2191 *
2192 * This is typically used for ROMs and MMIO2 (VRAM) during VM creation.
2193 * The allocated pages are not cleared and will contains random garbage.
2194 *
2195 * @returns VBox status code:
2196 * @retval VINF_SUCCESS on success.
2197 * @retval VERR_NOT_OWNER if the caller is not an EMT.
2198 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2199 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2200 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2201 * that is we're trying to allocate more than we've reserved.
2202 *
2203 * @param pVM Pointer to the shared VM structure.
2204 * @param idCpu VCPU id
2205 * @param cPages The number of pages to allocate.
2206 * @param paPages Pointer to the page descriptors.
2207 * See GMMPAGEDESC for details on what is expected on input.
2208 * @param enmAccount The account to charge.
2209 *
2210 * @thread EMT.
2211 */
2212GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount)
2213{
2214 LogFlow(("GMMR0AllocatePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
2215
2216 /*
2217 * Validate, get basics and take the semaphore.
2218 */
2219 PGMM pGMM;
2220 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2221 PGVM pGVM;
2222 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2223 if (RT_FAILURE(rc))
2224 return rc;
2225
2226 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2227 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
2228 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
2229
2230 for (unsigned iPage = 0; iPage < cPages; iPage++)
2231 {
2232 AssertMsgReturn( paPages[iPage].HCPhysGCPhys == NIL_RTHCPHYS
2233 || paPages[iPage].HCPhysGCPhys == GMM_GCPHYS_UNSHAREABLE
2234 || ( enmAccount == GMMACCOUNT_BASE
2235 && paPages[iPage].HCPhysGCPhys <= GMM_GCPHYS_LAST
2236 && !(paPages[iPage].HCPhysGCPhys & PAGE_OFFSET_MASK)),
2237 ("#%#x: %RHp enmAccount=%d\n", iPage, paPages[iPage].HCPhysGCPhys, enmAccount),
2238 VERR_INVALID_PARAMETER);
2239 AssertMsgReturn(paPages[iPage].idPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2240 AssertMsgReturn(paPages[iPage].idSharedPage == NIL_GMM_PAGEID, ("#%#x: %#x\n", iPage, paPages[iPage].idSharedPage), VERR_INVALID_PARAMETER);
2241 }
2242
2243 rc = RTSemFastMutexRequest(pGMM->Mtx);
2244 AssertRC(rc);
2245 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2246 {
2247
2248 /* No allocations before the initial reservation has been made! */
2249 if (RT_LIKELY( pGVM->gmm.s.Reserved.cBasePages
2250 && pGVM->gmm.s.Reserved.cFixedPages
2251 && pGVM->gmm.s.Reserved.cShadowPages))
2252 {
2253 /*
2254 * gmmR0AllocatePages seed loop.
2255 * Note! gmmR0AllocateMoreChunks may leave the protection of the mutex!
2256 */
2257 while (RT_SUCCESS(rc))
2258 {
2259 rc = gmmR0AllocatePages(pGMM, pGVM, cPages, paPages, enmAccount);
2260 if ( rc != VERR_GMM_SEED_ME
2261 || pGMM->fLegacyAllocationMode)
2262 break;
2263 rc = gmmR0AllocateMoreChunks(pGMM, pGVM, &pGMM->Private, cPages);
2264 }
2265 }
2266 else
2267 rc = VERR_WRONG_ORDER;
2268 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2269 }
2270 else
2271 rc = VERR_INTERNAL_ERROR_5;
2272 RTSemFastMutexRelease(pGMM->Mtx);
2273 LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
2274 return rc;
2275}
2276
2277
2278/**
2279 * VMMR0 request wrapper for GMMR0AllocatePages.
2280 *
2281 * @returns see GMMR0AllocatePages.
2282 * @param pVM Pointer to the shared VM structure.
2283 * @param idCpu VCPU id
2284 * @param pReq The request packet.
2285 */
2286GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq)
2287{
2288 /*
2289 * Validate input and pass it on.
2290 */
2291 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2292 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2293 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0]),
2294 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[0])),
2295 VERR_INVALID_PARAMETER);
2296 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages]),
2297 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMALLOCATEPAGESREQ, aPages[pReq->cPages])),
2298 VERR_INVALID_PARAMETER);
2299
2300 return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
2301}
2302
2303/**
2304 * Allocate a large page to represent guest RAM
2305 *
2306 * The allocated pages are not cleared and will contains random garbage.
2307 *
2308 * @returns VBox status code:
2309 * @retval VINF_SUCCESS on success.
2310 * @retval VERR_NOT_OWNER if the caller is not an EMT.
2311 * @retval VERR_GMM_SEED_ME if seeding via GMMR0SeedChunk is necessary.
2312 * @retval VERR_GMM_HIT_GLOBAL_LIMIT if we've exhausted the available pages.
2313 * @retval VERR_GMM_HIT_VM_ACCOUNT_LIMIT if we've hit the VM account limit,
2314 * that is we're trying to allocate more than we've reserved.
2315 * @returns see GMMR0AllocatePages.
2316 * @param pVM Pointer to the shared VM structure.
2317 * @param idCpu VCPU id
2318 * @param cbPage Large page size
2319 */
2320GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys)
2321{
2322 LogFlow(("GMMR0AllocateLargePage: pVM=%p cbPage=%x\n", pVM, cbPage));
2323
2324 AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER);
2325 AssertPtrReturn(pIdPage, VERR_INVALID_PARAMETER);
2326 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
2327
2328 /*
2329 * Validate, get basics and take the semaphore.
2330 */
2331 PGMM pGMM;
2332 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2333 PGVM pGVM;
2334 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2335 if (RT_FAILURE(rc))
2336 return rc;
2337
2338 /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
2339 if (pGMM->fLegacyAllocationMode)
2340 return VERR_NOT_SUPPORTED;
2341
2342 *pHCPhys = NIL_RTHCPHYS;
2343 *pIdPage = NIL_GMM_PAGEID;
2344
2345 rc = RTSemFastMutexRequest(pGMM->Mtx);
2346 AssertRCReturn(rc, rc);
2347 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2348 {
2349 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
2350 PGMMCHUNK pChunk;
2351 GMMPAGEDESC PageDesc;
2352
2353 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
2354 {
2355 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
2356 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));
2357 RTSemFastMutexRelease(pGMM->Mtx);
2358 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
2359 }
2360
2361 /* Allocate a new continous chunk. */
2362 rc = gmmR0AllocateOneChunk(pGMM, &pGMM->Private, pGVM->hSelf, GMMCHUNKTYPE_CONTINUOUS, &pChunk);
2363 if (RT_FAILURE(rc))
2364 {
2365 RTSemFastMutexRelease(pGMM->Mtx);
2366 return rc;
2367 }
2368
2369 /* Unlink the new chunk from the free list. */
2370 gmmR0UnlinkChunk(pChunk);
2371
2372 /* Allocate all pages. */
2373 gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
2374 /* Return the first page as we'll use the whole chunk as one big page. */
2375 *pIdPage = PageDesc.idPage;
2376 *pHCPhys = PageDesc.HCPhysGCPhys;
2377
2378 for (unsigned i = 1; i < cPages; i++)
2379 gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
2380
2381 /* Update accounting. */
2382 pGVM->gmm.s.Allocated.cBasePages += cPages;
2383 pGVM->gmm.s.cPrivatePages += cPages;
2384 pGMM->cAllocatedPages += cPages;
2385
2386 gmmR0LinkChunk(pChunk, &pGMM->Private);
2387 }
2388 else
2389 rc = VERR_INTERNAL_ERROR_5;
2390
2391 RTSemFastMutexRelease(pGMM->Mtx);
2392 LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
2393 return rc;
2394}
2395
2396
2397/**
2398 * Free a large page
2399 *
2400 * @returns VBox status code:
2401 * @param pVM Pointer to the shared VM structure.
2402 * @param idCpu VCPU id
2403 * @param idPage Large page id
2404 */
2405GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage)
2406{
2407 LogFlow(("GMMR0FreeLargePage: pVM=%p idPage=%x\n", pVM, idPage));
2408
2409 /*
2410 * Validate, get basics and take the semaphore.
2411 */
2412 PGMM pGMM;
2413 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2414 PGVM pGVM;
2415 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2416 if (RT_FAILURE(rc))
2417 return rc;
2418
2419 /* Not supported in legacy mode where we allocate the memory in ring 3 and lock it in ring 0. */
2420 if (pGMM->fLegacyAllocationMode)
2421 return VERR_NOT_SUPPORTED;
2422
2423 rc = RTSemFastMutexRequest(pGMM->Mtx);
2424 AssertRC(rc);
2425 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2426 {
2427 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
2428
2429 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
2430 {
2431 Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
2432 RTSemFastMutexRelease(pGMM->Mtx);
2433 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2434 }
2435
2436 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
2437 if ( RT_LIKELY(pPage)
2438 && RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2439 {
2440 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2441 Assert(pChunk);
2442 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2443 Assert(pChunk->cPrivate > 0);
2444
2445 /* Release the memory immediately. */
2446 gmmR0FreeChunk(pGMM, NULL, pChunk);
2447
2448 /* Update accounting. */
2449 pGVM->gmm.s.Allocated.cBasePages -= cPages;
2450 pGVM->gmm.s.cPrivatePages -= cPages;
2451 pGMM->cAllocatedPages -= cPages;
2452 }
2453 else
2454 rc = VERR_GMM_PAGE_NOT_FOUND;
2455 }
2456 else
2457 rc = VERR_INTERNAL_ERROR_5;
2458
2459 RTSemFastMutexRelease(pGMM->Mtx);
2460 LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc));
2461 return rc;
2462}
2463
2464
2465/**
2466 * VMMR0 request wrapper for GMMR0FreeLargePage.
2467 *
2468 * @returns see GMMR0FreeLargePage.
2469 * @param pVM Pointer to the shared VM structure.
2470 * @param idCpu VCPU id
2471 * @param pReq The request packet.
2472 */
2473GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq)
2474{
2475 /*
2476 * Validate input and pass it on.
2477 */
2478 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2479 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2480 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMFREEPAGESREQ),
2481 ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(GMMFREEPAGESREQ)),
2482 VERR_INVALID_PARAMETER);
2483
2484 return GMMR0FreeLargePage(pVM, idCpu, pReq->idPage);
2485}
2486
2487/**
2488 * Frees a chunk, giving it back to the host OS.
2489 *
2490 * @param pGMM Pointer to the GMM instance.
2491 * @param pGVM This is set when called from GMMR0CleanupVM so we can
2492 * unmap and free the chunk in one go.
2493 * @param pChunk The chunk to free.
2494 */
2495static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
2496{
2497 Assert(pChunk->Core.Key != NIL_GMM_CHUNKID);
2498
2499 /*
2500 * Cleanup hack! Unmap the chunk from the callers address space.
2501 */
2502 if ( pChunk->cMappings
2503 && pGVM)
2504 gmmR0UnmapChunk(pGMM, pGVM, pChunk);
2505
2506 /*
2507 * If there are current mappings of the chunk, then request the
2508 * VMs to unmap them. Reposition the chunk in the free list so
2509 * it won't be a likely candidate for allocations.
2510 */
2511 if (pChunk->cMappings)
2512 {
2513 /** @todo R0 -> VM request */
2514 /* The chunk can be owned by more than one VM if fBoundMemoryMode is false! */
2515 Log(("gmmR0FreeChunk: chunk still has %d mappings; don't free!\n", pChunk->cMappings));
2516 }
2517 else
2518 {
2519 /*
2520 * Try free the memory object.
2521 */
2522 int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);
2523 if (RT_SUCCESS(rc))
2524 {
2525 pChunk->MemObj = NIL_RTR0MEMOBJ;
2526
2527 /*
2528 * Unlink it from everywhere.
2529 */
2530 gmmR0UnlinkChunk(pChunk);
2531
2532 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
2533 Assert(pCore == &pChunk->Core); NOREF(pCore);
2534
2535 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
2536 if (pTlbe->pChunk == pChunk)
2537 {
2538 pTlbe->idChunk = NIL_GMM_CHUNKID;
2539 pTlbe->pChunk = NULL;
2540 }
2541
2542 Assert(pGMM->cChunks > 0);
2543 pGMM->cChunks--;
2544
2545 /*
2546 * Free the Chunk ID and struct.
2547 */
2548 gmmR0FreeChunkId(pGMM, pChunk->Core.Key);
2549 pChunk->Core.Key = NIL_GMM_CHUNKID;
2550
2551 RTMemFree(pChunk->paMappings);
2552 pChunk->paMappings = NULL;
2553
2554 RTMemFree(pChunk);
2555 }
2556 else
2557 AssertRC(rc);
2558 }
2559}
2560
2561
2562/**
2563 * Free page worker.
2564 *
2565 * The caller does all the statistic decrementing, we do all the incrementing.
2566 *
2567 * @param pGMM Pointer to the GMM instance data.
2568 * @param pChunk Pointer to the chunk this page belongs to.
2569 * @param idPage The Page ID.
2570 * @param pPage Pointer to the page.
2571 */
2572static void gmmR0FreePageWorker(PGMM pGMM, PGMMCHUNK pChunk, uint32_t idPage, PGMMPAGE pPage)
2573{
2574 Log3(("F pPage=%p iPage=%#x/%#x u2State=%d iFreeHead=%#x\n",
2575 pPage, pPage - &pChunk->aPages[0], idPage, pPage->Common.u2State, pChunk->iFreeHead)); NOREF(idPage);
2576
2577 /*
2578 * Put the page on the free list.
2579 */
2580 pPage->u = 0;
2581 pPage->Free.u2State = GMM_PAGE_STATE_FREE;
2582 Assert(pChunk->iFreeHead < RT_ELEMENTS(pChunk->aPages) || pChunk->iFreeHead == UINT16_MAX);
2583 pPage->Free.iNext = pChunk->iFreeHead;
2584 pChunk->iFreeHead = pPage - &pChunk->aPages[0];
2585
2586 /*
2587 * Update statistics (the cShared/cPrivate stats are up to date already),
2588 * and relink the chunk if necessary.
2589 */
2590 if ((pChunk->cFree & GMM_CHUNK_FREE_SET_MASK) == 0)
2591 {
2592 gmmR0UnlinkChunk(pChunk);
2593 pChunk->cFree++;
2594 gmmR0LinkChunk(pChunk, pChunk->cShared ? &pGMM->Shared : &pGMM->Private);
2595 }
2596 else
2597 {
2598 pChunk->cFree++;
2599 pChunk->pSet->cFreePages++;
2600
2601 /*
2602 * If the chunk becomes empty, consider giving memory back to the host OS.
2603 *
2604 * The current strategy is to try give it back if there are other chunks
2605 * in this free list, meaning if there are at least 240 free pages in this
2606 * category. Note that since there are probably mappings of the chunk,
2607 * it won't be freed up instantly, which probably screws up this logic
2608 * a bit...
2609 */
2610 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES
2611 && pChunk->pFreeNext
2612 && pChunk->pFreePrev
2613 && !pGMM->fLegacyAllocationMode))
2614 gmmR0FreeChunk(pGMM, NULL, pChunk);
2615 }
2616}
2617
2618
2619/**
2620 * Frees a shared page, the page is known to exist and be valid and such.
2621 *
2622 * @param pGMM Pointer to the GMM instance.
2623 * @param idPage The Page ID
2624 * @param pPage The page structure.
2625 */
2626DECLINLINE(void) gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
2627{
2628 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2629 Assert(pChunk);
2630 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2631 Assert(pChunk->cShared > 0);
2632 Assert(pGMM->cSharedPages > 0);
2633 Assert(pGMM->cAllocatedPages > 0);
2634 Assert(!pPage->Shared.cRefs);
2635
2636 pChunk->cShared--;
2637 pGMM->cAllocatedPages--;
2638 pGMM->cSharedPages--;
2639 gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
2640}
2641
2642#ifdef VBOX_WITH_PAGE_SHARING
2643/**
2644 * Converts a private page to a shared page, the page is known to exist and be valid and such.
2645 *
2646 * @param pGMM Pointer to the GMM instance.
2647 * @param pGVM Pointer to the GVM instance.
2648 * @param HCPhys Host physical address
2649 * @param idPage The Page ID
2650 * @param pPage The page structure.
2651 */
2652DECLINLINE(void) gmmR0ConvertToSharedPage(PGMM pGMM, PGVM pGVM, RTHCPHYS HCPhys, uint32_t idPage, PGMMPAGE pPage)
2653{
2654 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2655 Assert(pChunk);
2656 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2657 Assert(GMM_PAGE_IS_PRIVATE(pPage));
2658
2659 pChunk->cPrivate--;
2660 pChunk->cShared++;
2661
2662 pGMM->cSharedPages++;
2663
2664 pGVM->gmm.s.cSharedPages++;
2665 pGVM->gmm.s.cPrivatePages--;
2666
2667 /* Modify the page structure. */
2668 pPage->Shared.pfn = (uint32_t)(uint64_t)(HCPhys >> PAGE_SHIFT);
2669 pPage->Shared.cRefs = 1;
2670 pPage->Common.u2State = GMM_PAGE_STATE_SHARED;
2671}
2672
2673/**
2674 * Increase the use count of a shared page, the page is known to exist and be valid and such.
2675 *
2676 * @param pGMM Pointer to the GMM instance.
2677 * @param pGVM Pointer to the GVM instance.
2678 * @param pPage The page structure.
2679 */
2680DECLINLINE(void) gmmR0UseSharedPage(PGMM pGMM, PGVM pGVM, PGMMPAGE pPage)
2681{
2682 Assert(pGMM->cSharedPages > 0);
2683 Assert(pGMM->cAllocatedPages > 0);
2684
2685 pGMM->cDuplicatePages++;
2686
2687 pPage->Shared.cRefs++;
2688 pGVM->gmm.s.cSharedPages++;
2689 pGVM->gmm.s.Allocated.cBasePages++;
2690}
2691#endif
2692
2693/**
2694 * Frees a private page, the page is known to exist and be valid and such.
2695 *
2696 * @param pGMM Pointer to the GMM instance.
2697 * @param idPage The Page ID
2698 * @param pPage The page structure.
2699 */
2700DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage)
2701{
2702 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
2703 Assert(pChunk);
2704 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
2705 Assert(pChunk->cPrivate > 0);
2706 Assert(pGMM->cAllocatedPages > 0);
2707
2708 pChunk->cPrivate--;
2709 pGMM->cAllocatedPages--;
2710 gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
2711}
2712
2713/**
2714 * Common worker for GMMR0FreePages and GMMR0BalloonedPages.
2715 *
2716 * @returns VBox status code:
2717 * @retval xxx
2718 *
2719 * @param pGMM Pointer to the GMM instance data.
2720 * @param pGVM Pointer to the shared VM structure.
2721 * @param cPages The number of pages to free.
2722 * @param paPages Pointer to the page descriptors.
2723 * @param enmAccount The account this relates to.
2724 */
2725static int gmmR0FreePages(PGMM pGMM, PGVM pGVM, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
2726{
2727 /*
2728 * Check that the request isn't impossible wrt to the account status.
2729 */
2730 switch (enmAccount)
2731 {
2732 case GMMACCOUNT_BASE:
2733 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages < cPages))
2734 {
2735 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
2736 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2737 }
2738 break;
2739 case GMMACCOUNT_SHADOW:
2740 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cShadowPages < cPages))
2741 {
2742 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cShadowPages, cPages));
2743 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2744 }
2745 break;
2746 case GMMACCOUNT_FIXED:
2747 if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cFixedPages < cPages))
2748 {
2749 Log(("gmmR0FreePages: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cFixedPages, cPages));
2750 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2751 }
2752 break;
2753 default:
2754 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
2755 }
2756
2757 /*
2758 * Walk the descriptors and free the pages.
2759 *
2760 * Statistics (except the account) are being updated as we go along,
2761 * unlike the alloc code. Also, stop on the first error.
2762 */
2763 int rc = VINF_SUCCESS;
2764 uint32_t iPage;
2765 for (iPage = 0; iPage < cPages; iPage++)
2766 {
2767 uint32_t idPage = paPages[iPage].idPage;
2768 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
2769 if (RT_LIKELY(pPage))
2770 {
2771 if (RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
2772 {
2773 if (RT_LIKELY(pPage->Private.hGVM == pGVM->hSelf))
2774 {
2775 Assert(pGVM->gmm.s.cPrivatePages);
2776 pGVM->gmm.s.cPrivatePages--;
2777 gmmR0FreePrivatePage(pGMM, idPage, pPage);
2778 }
2779 else
2780 {
2781 Log(("gmmR0AllocatePages: #%#x/%#x: not owner! hGVM=%#x hSelf=%#x\n", iPage, idPage,
2782 pPage->Private.hGVM, pGVM->hSelf));
2783 rc = VERR_GMM_NOT_PAGE_OWNER;
2784 break;
2785 }
2786 }
2787 else if (RT_LIKELY(GMM_PAGE_IS_SHARED(pPage)))
2788 {
2789 Assert(pGVM->gmm.s.cSharedPages);
2790 pGVM->gmm.s.cSharedPages--;
2791 Assert(pPage->Shared.cRefs);
2792 if (!--pPage->Shared.cRefs)
2793 {
2794 gmmR0FreeSharedPage(pGMM, idPage, pPage);
2795 }
2796 else
2797 {
2798 Assert(pGMM->cDuplicatePages);
2799 pGMM->cDuplicatePages--;
2800 }
2801 }
2802 else
2803 {
2804 Log(("gmmR0AllocatePages: #%#x/%#x: already free!\n", iPage, idPage));
2805 rc = VERR_GMM_PAGE_ALREADY_FREE;
2806 break;
2807 }
2808 }
2809 else
2810 {
2811 Log(("gmmR0AllocatePages: #%#x/%#x: not found!\n", iPage, idPage));
2812 rc = VERR_GMM_PAGE_NOT_FOUND;
2813 break;
2814 }
2815 paPages[iPage].idPage = NIL_GMM_PAGEID;
2816 }
2817
2818 /*
2819 * Update the account.
2820 */
2821 switch (enmAccount)
2822 {
2823 case GMMACCOUNT_BASE: pGVM->gmm.s.Allocated.cBasePages -= iPage; break;
2824 case GMMACCOUNT_SHADOW: pGVM->gmm.s.Allocated.cShadowPages -= iPage; break;
2825 case GMMACCOUNT_FIXED: pGVM->gmm.s.Allocated.cFixedPages -= iPage; break;
2826 default:
2827 AssertMsgFailedReturn(("enmAccount=%d\n", enmAccount), VERR_INTERNAL_ERROR);
2828 }
2829
2830 /*
2831 * Any threshold stuff to be done here?
2832 */
2833
2834 return rc;
2835}
2836
2837
2838/**
2839 * Free one or more pages.
2840 *
2841 * This is typically used at reset time or power off.
2842 *
2843 * @returns VBox status code:
2844 * @retval xxx
2845 *
2846 * @param pVM Pointer to the shared VM structure.
2847 * @param idCpu VCPU id
2848 * @param cPages The number of pages to allocate.
2849 * @param paPages Pointer to the page descriptors containing the Page IDs for each page.
2850 * @param enmAccount The account this relates to.
2851 * @thread EMT.
2852 */
2853GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount)
2854{
2855 LogFlow(("GMMR0FreePages: pVM=%p cPages=%#x paPages=%p enmAccount=%d\n", pVM, cPages, paPages, enmAccount));
2856
2857 /*
2858 * Validate input and get the basics.
2859 */
2860 PGMM pGMM;
2861 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2862 PGVM pGVM;
2863 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2864 if (RT_FAILURE(rc))
2865 return rc;
2866
2867 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2868 AssertMsgReturn(enmAccount > GMMACCOUNT_INVALID && enmAccount < GMMACCOUNT_END, ("%d\n", enmAccount), VERR_INVALID_PARAMETER);
2869 AssertMsgReturn(cPages > 0 && cPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cPages), VERR_INVALID_PARAMETER);
2870
2871 for (unsigned iPage = 0; iPage < cPages; iPage++)
2872 AssertMsgReturn( paPages[iPage].idPage <= GMM_PAGEID_LAST
2873 /*|| paPages[iPage].idPage == NIL_GMM_PAGEID*/,
2874 ("#%#x: %#x\n", iPage, paPages[iPage].idPage), VERR_INVALID_PARAMETER);
2875
2876 /*
2877 * Take the semaphore and call the worker function.
2878 */
2879 rc = RTSemFastMutexRequest(pGMM->Mtx);
2880 AssertRC(rc);
2881 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2882 {
2883 rc = gmmR0FreePages(pGMM, pGVM, cPages, paPages, enmAccount);
2884 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
2885 }
2886 else
2887 rc = VERR_INTERNAL_ERROR_5;
2888 RTSemFastMutexRelease(pGMM->Mtx);
2889 LogFlow(("GMMR0FreePages: returns %Rrc\n", rc));
2890 return rc;
2891}
2892
2893
2894/**
2895 * VMMR0 request wrapper for GMMR0FreePages.
2896 *
2897 * @returns see GMMR0FreePages.
2898 * @param pVM Pointer to the shared VM structure.
2899 * @param idCpu VCPU id
2900 * @param pReq The request packet.
2901 */
2902GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq)
2903{
2904 /*
2905 * Validate input and pass it on.
2906 */
2907 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
2908 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
2909 AssertMsgReturn(pReq->Hdr.cbReq >= RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0]),
2910 ("%#x < %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[0])),
2911 VERR_INVALID_PARAMETER);
2912 AssertMsgReturn(pReq->Hdr.cbReq == RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages]),
2913 ("%#x != %#x\n", pReq->Hdr.cbReq, RT_UOFFSETOF(GMMFREEPAGESREQ, aPages[pReq->cPages])),
2914 VERR_INVALID_PARAMETER);
2915
2916 return GMMR0FreePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
2917}
2918
2919
2920/**
2921 * Report back on a memory ballooning request.
2922 *
2923 * The request may or may not have been initiated by the GMM. If it was initiated
2924 * by the GMM it is important that this function is called even if no pages were
2925 * ballooned.
2926 *
2927 * @returns VBox status code:
2928 * @retval VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH
2929 * @retval VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH
2930 * @retval VERR_GMM_OVERCOMMITED_TRY_AGAIN_IN_A_BIT - reset condition
2931 * indicating that we won't necessarily have sufficient RAM to boot
2932 * the VM again and that it should pause until this changes (we'll try
2933 * balloon some other VM). (For standard deflate we have little choice
2934 * but to hope the VM won't use the memory that was returned to it.)
2935 *
2936 * @param pVM Pointer to the shared VM structure.
2937 * @param idCpu VCPU id
2938 * @param enmAction Inflate/deflate/reset
2939 * @param cBalloonedPages The number of pages that was ballooned.
2940 *
2941 * @thread EMT.
2942 */
2943GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
2944{
2945 LogFlow(("GMMR0BalloonedPages: pVM=%p enmAction=%d cBalloonedPages=%#x\n",
2946 pVM, enmAction, cBalloonedPages));
2947
2948 AssertMsgReturn(cBalloonedPages < RT_BIT(32 - PAGE_SHIFT), ("%#x\n", cBalloonedPages), VERR_INVALID_PARAMETER);
2949
2950 /*
2951 * Validate input and get the basics.
2952 */
2953 PGMM pGMM;
2954 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
2955 PGVM pGVM;
2956 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
2957 if (RT_FAILURE(rc))
2958 return rc;
2959
2960 /*
2961 * Take the sempahore and do some more validations.
2962 */
2963 rc = RTSemFastMutexRequest(pGMM->Mtx);
2964 AssertRC(rc);
2965 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
2966 {
2967 switch (enmAction)
2968 {
2969 case GMMBALLOONACTION_INFLATE:
2970 {
2971 if (RT_LIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cBalloonedPages <= pGVM->gmm.s.Reserved.cBasePages))
2972 {
2973 /*
2974 * Record the ballooned memory.
2975 */
2976 pGMM->cBalloonedPages += cBalloonedPages;
2977 if (pGVM->gmm.s.cReqBalloonedPages)
2978 {
2979 /* Codepath never taken. Might be interesting in the future to request ballooned memory from guests in low memory conditions.. */
2980 AssertFailed();
2981
2982 pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
2983 pGVM->gmm.s.cReqActuallyBalloonedPages += cBalloonedPages;
2984 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx Req=%#llx Actual=%#llx (pending)\n", cBalloonedPages,
2985 pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqBalloonedPages, pGVM->gmm.s.cReqActuallyBalloonedPages));
2986 }
2987 else
2988 {
2989 pGVM->gmm.s.cBalloonedPages += cBalloonedPages;
2990 Log(("GMMR0BalloonedPages: +%#x - Global=%#llx / VM: Total=%#llx (user)\n",
2991 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
2992 }
2993 }
2994 else
2995 rc = VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
2996 break;
2997 }
2998
2999 case GMMBALLOONACTION_DEFLATE:
3000 {
3001 /* Deflate. */
3002 if (pGVM->gmm.s.cBalloonedPages >= cBalloonedPages)
3003 {
3004 /*
3005 * Record the ballooned memory.
3006 */
3007 Assert(pGMM->cBalloonedPages >= cBalloonedPages);
3008 pGMM->cBalloonedPages -= cBalloonedPages;
3009 pGVM->gmm.s.cBalloonedPages -= cBalloonedPages;
3010 if (pGVM->gmm.s.cReqDeflatePages)
3011 {
3012 AssertFailed(); /* This is path is for later. */
3013 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx Req=%#llx\n",
3014 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages, pGVM->gmm.s.cReqDeflatePages));
3015
3016 /*
3017 * Anything we need to do here now when the request has been completed?
3018 */
3019 pGVM->gmm.s.cReqDeflatePages = 0;
3020 }
3021 else
3022 Log(("GMMR0BalloonedPages: -%#x - Global=%#llx / VM: Total=%#llx (user)\n",
3023 cBalloonedPages, pGMM->cBalloonedPages, pGVM->gmm.s.cBalloonedPages));
3024 }
3025 else
3026 rc = VERR_GMM_ATTEMPT_TO_DEFLATE_TOO_MUCH;
3027 break;
3028 }
3029
3030 case GMMBALLOONACTION_RESET:
3031 {
3032 /* Reset to an empty balloon. */
3033 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
3034
3035 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
3036 pGVM->gmm.s.cBalloonedPages = 0;
3037 break;
3038 }
3039
3040 default:
3041 rc = VERR_INVALID_PARAMETER;
3042 break;
3043 }
3044 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3045 }
3046 else
3047 rc = VERR_INTERNAL_ERROR_5;
3048
3049 RTSemFastMutexRelease(pGMM->Mtx);
3050 LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc));
3051 return rc;
3052}
3053
3054
3055/**
3056 * VMMR0 request wrapper for GMMR0BalloonedPages.
3057 *
3058 * @returns see GMMR0BalloonedPages.
3059 * @param pVM Pointer to the shared VM structure.
3060 * @param idCpu VCPU id
3061 * @param pReq The request packet.
3062 */
3063GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq)
3064{
3065 /*
3066 * Validate input and pass it on.
3067 */
3068 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3069 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3070 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMBALLOONEDPAGESREQ),
3071 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMBALLOONEDPAGESREQ)),
3072 VERR_INVALID_PARAMETER);
3073
3074 return GMMR0BalloonedPages(pVM, idCpu, pReq->enmAction, pReq->cBalloonedPages);
3075}
3076
3077/**
3078 * Return memory statistics for the hypervisor
3079 *
3080 * @returns VBox status code:
3081 * @param pVM Pointer to the shared VM structure.
3082 * @param pReq The request packet.
3083 */
3084GMMR0DECL(int) GMMR0QueryHypervisorMemoryStatsReq(PVM pVM, PGMMMEMSTATSREQ pReq)
3085{
3086 /*
3087 * Validate input and pass it on.
3088 */
3089 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3090 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3091 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
3092 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
3093 VERR_INVALID_PARAMETER);
3094
3095 /*
3096 * Validate input and get the basics.
3097 */
3098 PGMM pGMM;
3099 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3100 pReq->cAllocPages = pGMM->cAllocatedPages;
3101 pReq->cFreePages = (pGMM->cChunks << (GMM_CHUNK_SHIFT- PAGE_SHIFT)) - pGMM->cAllocatedPages;
3102 pReq->cBalloonedPages = pGMM->cBalloonedPages;
3103 pReq->cMaxPages = pGMM->cMaxPages;
3104 pReq->cSharedPages = pGMM->cDuplicatePages;
3105 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3106
3107 return VINF_SUCCESS;
3108}
3109
3110/**
3111 * Return memory statistics for the VM
3112 *
3113 * @returns VBox status code:
3114 * @param pVM Pointer to the shared VM structure.
3115 * @parma idCpu Cpu id.
3116 * @param pReq The request packet.
3117 */
3118GMMR0DECL(int) GMMR0QueryMemoryStatsReq(PVM pVM, VMCPUID idCpu, PGMMMEMSTATSREQ pReq)
3119{
3120 /*
3121 * Validate input and pass it on.
3122 */
3123 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3124 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3125 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(GMMMEMSTATSREQ),
3126 ("%#x < %#x\n", pReq->Hdr.cbReq, sizeof(GMMMEMSTATSREQ)),
3127 VERR_INVALID_PARAMETER);
3128
3129 /*
3130 * Validate input and get the basics.
3131 */
3132 PGMM pGMM;
3133 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3134 PGVM pGVM;
3135 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3136 if (RT_FAILURE(rc))
3137 return rc;
3138
3139 /*
3140 * Take the sempahore and do some more validations.
3141 */
3142 rc = RTSemFastMutexRequest(pGMM->Mtx);
3143 AssertRC(rc);
3144 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3145 {
3146 pReq->cAllocPages = pGVM->gmm.s.Allocated.cBasePages;
3147 pReq->cBalloonedPages = pGVM->gmm.s.cBalloonedPages;
3148 pReq->cMaxPages = pGVM->gmm.s.Reserved.cBasePages;
3149 pReq->cFreePages = pReq->cMaxPages - pReq->cAllocPages;
3150 }
3151 else
3152 rc = VERR_INTERNAL_ERROR_5;
3153
3154 RTSemFastMutexRelease(pGMM->Mtx);
3155 LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc));
3156 return rc;
3157}
3158
3159/**
3160 * Unmaps a chunk previously mapped into the address space of the current process.
3161 *
3162 * @returns VBox status code.
3163 * @param pGMM Pointer to the GMM instance data.
3164 * @param pGVM Pointer to the Global VM structure.
3165 * @param pChunk Pointer to the chunk to be unmapped.
3166 */
3167static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
3168{
3169 if (!pGMM->fLegacyAllocationMode)
3170 {
3171 /*
3172 * Find the mapping and try unmapping it.
3173 */
3174 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3175 {
3176 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3177 if (pChunk->paMappings[i].pGVM == pGVM)
3178 {
3179 /* unmap */
3180 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
3181 if (RT_SUCCESS(rc))
3182 {
3183 /* update the record. */
3184 pChunk->cMappings--;
3185 if (i < pChunk->cMappings)
3186 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
3187 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
3188 pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
3189 }
3190 return rc;
3191 }
3192 }
3193 }
3194 else if (pChunk->hGVM == pGVM->hSelf)
3195 return VINF_SUCCESS;
3196
3197 Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
3198 return VERR_GMM_CHUNK_NOT_MAPPED;
3199}
3200
3201
3202/**
3203 * Maps a chunk into the user address space of the current process.
3204 *
3205 * @returns VBox status code.
3206 * @param pGMM Pointer to the GMM instance data.
3207 * @param pGVM Pointer to the Global VM structure.
3208 * @param pChunk Pointer to the chunk to be mapped.
3209 * @param ppvR3 Where to store the ring-3 address of the mapping.
3210 * In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
3211 * contain the address of the existing mapping.
3212 */
3213static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
3214{
3215 /*
3216 * If we're in legacy mode this is simple.
3217 */
3218 if (pGMM->fLegacyAllocationMode)
3219 {
3220 if (pChunk->hGVM != pGVM->hSelf)
3221 {
3222 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
3223 return VERR_GMM_CHUNK_NOT_FOUND;
3224 }
3225
3226 *ppvR3 = RTR0MemObjAddressR3(pChunk->MemObj);
3227 return VINF_SUCCESS;
3228 }
3229
3230 /*
3231 * Check to see if the chunk is already mapped.
3232 */
3233 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3234 {
3235 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3236 if (pChunk->paMappings[i].pGVM == pGVM)
3237 {
3238 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
3239 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
3240#ifdef VBOX_WITH_PAGE_SHARING
3241 /* The ring-3 chunk cache can be out of sync; don't fail. */
3242 return VINF_SUCCESS;
3243#else
3244 return VERR_GMM_CHUNK_ALREADY_MAPPED;
3245#endif
3246 }
3247 }
3248
3249 /*
3250 * Do the mapping.
3251 */
3252 RTR0MEMOBJ MapObj;
3253 int rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3254 if (RT_SUCCESS(rc))
3255 {
3256 /* reallocate the array? */
3257 if ((pChunk->cMappings & 1 /*7*/) == 0)
3258 {
3259 void *pvMappings = RTMemRealloc(pChunk->paMappings, (pChunk->cMappings + 2 /*8*/) * sizeof(pChunk->paMappings[0]));
3260 if (RT_UNLIKELY(!pvMappings))
3261 {
3262 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */);
3263 AssertRC(rc);
3264 return VERR_NO_MEMORY;
3265 }
3266 pChunk->paMappings = (PGMMCHUNKMAP)pvMappings;
3267 }
3268
3269 /* insert new entry */
3270 pChunk->paMappings[pChunk->cMappings].MapObj = MapObj;
3271 pChunk->paMappings[pChunk->cMappings].pGVM = pGVM;
3272 pChunk->cMappings++;
3273
3274 *ppvR3 = RTR0MemObjAddressR3(MapObj);
3275 }
3276
3277 return rc;
3278}
3279
3280/**
3281 * Check if a chunk is mapped into the specified VM
3282 *
3283 * @returns mapped yes/no
3284 * @param pGVM Pointer to the Global VM structure.
3285 * @param pChunk Pointer to the chunk to be mapped.
3286 * @param ppvR3 Where to store the ring-3 address of the mapping.
3287 */
3288static int gmmR0IsChunkMapped(PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
3289{
3290 /*
3291 * Check to see if the chunk is already mapped.
3292 */
3293 for (uint32_t i = 0; i < pChunk->cMappings; i++)
3294 {
3295 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
3296 if (pChunk->paMappings[i].pGVM == pGVM)
3297 {
3298 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
3299 return true;
3300 }
3301 }
3302 *ppvR3 = NULL;
3303 return false;
3304}
3305
3306/**
3307 * Map a chunk and/or unmap another chunk.
3308 *
3309 * The mapping and unmapping applies to the current process.
3310 *
3311 * This API does two things because it saves a kernel call per mapping when
3312 * when the ring-3 mapping cache is full.
3313 *
3314 * @returns VBox status code.
3315 * @param pVM The VM.
3316 * @param idChunkMap The chunk to map. NIL_GMM_CHUNKID if nothing to map.
3317 * @param idChunkUnmap The chunk to unmap. NIL_GMM_CHUNKID if nothing to unmap.
3318 * @param ppvR3 Where to store the address of the mapped chunk. NULL is ok if nothing to map.
3319 * @thread EMT
3320 */
3321GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
3322{
3323 LogFlow(("GMMR0MapUnmapChunk: pVM=%p idChunkMap=%#x idChunkUnmap=%#x ppvR3=%p\n",
3324 pVM, idChunkMap, idChunkUnmap, ppvR3));
3325
3326 /*
3327 * Validate input and get the basics.
3328 */
3329 PGMM pGMM;
3330 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3331 PGVM pGVM;
3332 int rc = GVMMR0ByVM(pVM, &pGVM);
3333 if (RT_FAILURE(rc))
3334 return rc;
3335
3336 AssertCompile(NIL_GMM_CHUNKID == 0);
3337 AssertMsgReturn(idChunkMap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkMap), VERR_INVALID_PARAMETER);
3338 AssertMsgReturn(idChunkUnmap <= GMM_CHUNKID_LAST, ("%#x\n", idChunkUnmap), VERR_INVALID_PARAMETER);
3339
3340 if ( idChunkMap == NIL_GMM_CHUNKID
3341 && idChunkUnmap == NIL_GMM_CHUNKID)
3342 return VERR_INVALID_PARAMETER;
3343
3344 if (idChunkMap != NIL_GMM_CHUNKID)
3345 {
3346 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
3347 *ppvR3 = NIL_RTR3PTR;
3348 }
3349
3350 /*
3351 * Take the semaphore and do the work.
3352 *
3353 * The unmapping is done last since it's easier to undo a mapping than
3354 * undoing an unmapping. The ring-3 mapping cache cannot not be so big
3355 * that it pushes the user virtual address space to within a chunk of
3356 * it it's limits, so, no problem here.
3357 */
3358 rc = RTSemFastMutexRequest(pGMM->Mtx);
3359 AssertRC(rc);
3360 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3361 {
3362 PGMMCHUNK pMap = NULL;
3363 if (idChunkMap != NIL_GVM_HANDLE)
3364 {
3365 pMap = gmmR0GetChunk(pGMM, idChunkMap);
3366 if (RT_LIKELY(pMap))
3367 rc = gmmR0MapChunk(pGMM, pGVM, pMap, ppvR3);
3368 else
3369 {
3370 Log(("GMMR0MapUnmapChunk: idChunkMap=%#x\n", idChunkMap));
3371 rc = VERR_GMM_CHUNK_NOT_FOUND;
3372 }
3373 }
3374
3375 if ( idChunkUnmap != NIL_GMM_CHUNKID
3376 && RT_SUCCESS(rc))
3377 {
3378 PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap);
3379 if (RT_LIKELY(pUnmap))
3380 rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap);
3381 else
3382 {
3383 Log(("GMMR0MapUnmapChunk: idChunkUnmap=%#x\n", idChunkUnmap));
3384 rc = VERR_GMM_CHUNK_NOT_FOUND;
3385 }
3386
3387 if (RT_FAILURE(rc) && pMap)
3388 gmmR0UnmapChunk(pGMM, pGVM, pMap);
3389 }
3390
3391 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3392 }
3393 else
3394 rc = VERR_INTERNAL_ERROR_5;
3395 RTSemFastMutexRelease(pGMM->Mtx);
3396
3397 LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc));
3398 return rc;
3399}
3400
3401
3402/**
3403 * VMMR0 request wrapper for GMMR0MapUnmapChunk.
3404 *
3405 * @returns see GMMR0MapUnmapChunk.
3406 * @param pVM Pointer to the shared VM structure.
3407 * @param pReq The request packet.
3408 */
3409GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, PGMMMAPUNMAPCHUNKREQ pReq)
3410{
3411 /*
3412 * Validate input and pass it on.
3413 */
3414 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3415 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3416 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3417
3418 return GMMR0MapUnmapChunk(pVM, pReq->idChunkMap, pReq->idChunkUnmap, &pReq->pvR3);
3419}
3420
3421
3422/**
3423 * Legacy mode API for supplying pages.
3424 *
3425 * The specified user address points to a allocation chunk sized block that
3426 * will be locked down and used by the GMM when the GM asks for pages.
3427 *
3428 * @returns VBox status code.
3429 * @param pVM The VM.
3430 * @param idCpu VCPU id
3431 * @param pvR3 Pointer to the chunk size memory block to lock down.
3432 */
3433GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3)
3434{
3435 /*
3436 * Validate input and get the basics.
3437 */
3438 PGMM pGMM;
3439 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3440 PGVM pGVM;
3441 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3442 if (RT_FAILURE(rc))
3443 return rc;
3444
3445 AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
3446 AssertReturn(!(PAGE_OFFSET_MASK & pvR3), VERR_INVALID_POINTER);
3447
3448 if (!pGMM->fLegacyAllocationMode)
3449 {
3450 Log(("GMMR0SeedChunk: not in legacy allocation mode!\n"));
3451 return VERR_NOT_SUPPORTED;
3452 }
3453
3454 /*
3455 * Lock the memory before taking the semaphore.
3456 */
3457 RTR0MEMOBJ MemObj;
3458 rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
3459 if (RT_SUCCESS(rc))
3460 {
3461 /* Grab the lock. */
3462 rc = RTSemFastMutexRequest(pGMM->Mtx);
3463 AssertRCReturn(rc, rc);
3464
3465 /*
3466 * Add a new chunk with our hGVM.
3467 */
3468 rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
3469 RTSemFastMutexRelease(pGMM->Mtx);
3470
3471 if (RT_FAILURE(rc))
3472 RTR0MemObjFree(MemObj, false /* fFreeMappings */);
3473 }
3474
3475 LogFlow(("GMMR0SeedChunk: rc=%d (pvR3=%p)\n", rc, pvR3));
3476 return rc;
3477}
3478
3479typedef struct
3480{
3481 PAVLGCPTRNODECORE pNode;
3482 char *pszModuleName;
3483 char *pszVersion;
3484 VBOXOSFAMILY enmGuestOS;
3485} GMMFINDMODULEBYNAME, *PGMMFINDMODULEBYNAME;
3486
3487/**
3488 * Tree enumeration callback for finding identical modules by name and version
3489 */
3490DECLCALLBACK(int) gmmR0CheckForIdenticalModule(PAVLGCPTRNODECORE pNode, void *pvUser)
3491{
3492 PGMMFINDMODULEBYNAME pInfo = (PGMMFINDMODULEBYNAME)pvUser;
3493 PGMMSHAREDMODULE pModule = (PGMMSHAREDMODULE)pNode;
3494
3495 if ( pInfo
3496 && pInfo->enmGuestOS == pModule->enmGuestOS
3497 /** todo replace with RTStrNCmp */
3498 && !strcmp(pModule->szName, pInfo->pszModuleName)
3499 && !strcmp(pModule->szVersion, pInfo->pszVersion))
3500 {
3501 pInfo->pNode = pNode;
3502 return 1; /* stop search */
3503 }
3504 return 0;
3505}
3506
3507
3508/**
3509 * Registers a new shared module for the VM
3510 *
3511 * @returns VBox status code.
3512 * @param pVM VM handle
3513 * @param idCpu VCPU id
3514 * @param enmGuestOS Guest OS type
3515 * @param pszModuleName Module name
3516 * @param pszVersion Module version
3517 * @param GCBaseAddr Module base address
3518 * @param cbModule Module size
3519 * @param cRegions Number of shared region descriptors
3520 * @param pRegions Shared region(s)
3521 */
3522GMMR0DECL(int) GMMR0RegisterSharedModule(PVM pVM, VMCPUID idCpu, VBOXOSFAMILY enmGuestOS, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule,
3523 unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions)
3524{
3525#ifdef VBOX_WITH_PAGE_SHARING
3526 /*
3527 * Validate input and get the basics.
3528 */
3529 PGMM pGMM;
3530 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3531 PGVM pGVM;
3532 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3533 if (RT_FAILURE(rc))
3534 return rc;
3535
3536 Log(("GMMR0RegisterSharedModule %s %s base %RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
3537
3538 /*
3539 * Take the sempahore and do some more validations.
3540 */
3541 rc = RTSemFastMutexRequest(pGMM->Mtx);
3542 AssertRC(rc);
3543 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3544 {
3545 bool fNewModule = false;
3546
3547 /* Check if this module is already locally registered. */
3548 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3549 if (!pRecVM)
3550 {
3551 pRecVM = (PGMMSHAREDMODULEPERVM)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULEPERVM, aRegions[cRegions]));
3552 if (!pRecVM)
3553 {
3554 AssertFailed();
3555 rc = VERR_NO_MEMORY;
3556 goto end;
3557 }
3558 pRecVM->Core.Key = GCBaseAddr;
3559 pRecVM->cRegions = cRegions;
3560
3561 /* Save the region data as they can differ between VMs (address space scrambling or simply different loading order) */
3562 for (unsigned i = 0; i < cRegions; i++)
3563 {
3564 pRecVM->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
3565 pRecVM->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
3566 pRecVM->aRegions[i].u32Alignment = 0;
3567 pRecVM->aRegions[i].paHCPhysPageID = NULL; /* unused */
3568 }
3569
3570 bool ret = RTAvlGCPtrInsert(&pGVM->gmm.s.pSharedModuleTree, &pRecVM->Core);
3571 Assert(ret);
3572
3573 Log(("GMMR0RegisterSharedModule: new local module %s\n", pszModuleName));
3574 fNewModule = true;
3575 }
3576 else
3577 rc = VINF_PGM_SHARED_MODULE_ALREADY_REGISTERED;
3578
3579 /* Check if this module is already globally registered. */
3580 PGMMSHAREDMODULE pGlobalModule = (PGMMSHAREDMODULE)RTAvlGCPtrGet(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
3581 if ( !pGlobalModule
3582 && enmGuestOS == VBOXOSFAMILY_Windows64)
3583 {
3584 /* Two identical copies of e.g. Win7 x64 will typically not have a similar virtual address space layout for dlls or kernel modules.
3585 * Try to find identical binaries based on name and version.
3586 */
3587 GMMFINDMODULEBYNAME Info;
3588
3589 Info.pNode = NULL;
3590 Info.pszVersion = pszVersion;
3591 Info.pszModuleName = pszModuleName;
3592 Info.enmGuestOS = enmGuestOS;
3593
3594 Log(("Try to find identical module %s\n", pszModuleName));
3595 int ret = RTAvlGCPtrDoWithAll(&pGMM->pGlobalSharedModuleTree, true /* fFromLeft */, gmmR0CheckForIdenticalModule, &Info);
3596 if (ret == 1)
3597 {
3598 Assert(Info.pNode);
3599 pGlobalModule = (PGMMSHAREDMODULE)Info.pNode;
3600 Log(("Found identical module at %RGv\n", pGlobalModule->Core.Key));
3601 }
3602 }
3603
3604 if (!pGlobalModule)
3605 {
3606 Assert(fNewModule);
3607 Assert(!pRecVM->fCollision);
3608
3609 pGlobalModule = (PGMMSHAREDMODULE)RTMemAllocZ(RT_OFFSETOF(GMMSHAREDMODULE, aRegions[cRegions]));
3610 if (!pGlobalModule)
3611 {
3612 AssertFailed();
3613 rc = VERR_NO_MEMORY;
3614 goto end;
3615 }
3616
3617 pGlobalModule->Core.Key = GCBaseAddr;
3618 pGlobalModule->cbModule = cbModule;
3619 /* Input limit already safe; no need to check again. */
3620 /** todo replace with RTStrCopy */
3621 strcpy(pGlobalModule->szName, pszModuleName);
3622 strcpy(pGlobalModule->szVersion, pszVersion);
3623
3624 pGlobalModule->enmGuestOS = enmGuestOS;
3625 pGlobalModule->cRegions = cRegions;
3626
3627 for (unsigned i = 0; i < cRegions; i++)
3628 {
3629 Log(("New region %d base=%RGv size %x\n", i, pRegions[i].GCRegionAddr, pRegions[i].cbRegion));
3630 pGlobalModule->aRegions[i].GCRegionAddr = pRegions[i].GCRegionAddr;
3631 pGlobalModule->aRegions[i].cbRegion = RT_ALIGN_T(pRegions[i].cbRegion, PAGE_SIZE, uint32_t);
3632 pGlobalModule->aRegions[i].u32Alignment = 0;
3633 pGlobalModule->aRegions[i].paHCPhysPageID = NULL; /* uninitialized. */
3634 }
3635
3636 /* Save reference. */
3637 pRecVM->pGlobalModule = pGlobalModule;
3638 pRecVM->fCollision = false;
3639 pGlobalModule->cUsers++;
3640 rc = VINF_SUCCESS;
3641
3642 bool ret = RTAvlGCPtrInsert(&pGMM->pGlobalSharedModuleTree, &pGlobalModule->Core);
3643 Assert(ret);
3644
3645 Log(("GMMR0RegisterSharedModule: new global module %s\n", pszModuleName));
3646 }
3647 else
3648 {
3649 Assert(pGlobalModule->cUsers > 0);
3650
3651 /* Make sure the name and version are identical. */
3652 /** todo replace with RTStrNCmp */
3653 if ( !strcmp(pGlobalModule->szName, pszModuleName)
3654 && !strcmp(pGlobalModule->szVersion, pszVersion))
3655 {
3656 /* Save reference. */
3657 pRecVM->pGlobalModule = pGlobalModule;
3658 if ( fNewModule
3659 || pRecVM->fCollision == true) /* colliding module unregistered and new one registerd since the last check */
3660 {
3661 pGlobalModule->cUsers++;
3662 Log(("GMMR0RegisterSharedModule: using existing module %s cUser=%d!\n", pszModuleName, pGlobalModule->cUsers));
3663 }
3664 pRecVM->fCollision = false;
3665 rc = VINF_SUCCESS;
3666 }
3667 else
3668 {
3669 Log(("GMMR0RegisterSharedModule: module %s collision!\n", pszModuleName));
3670 pRecVM->fCollision = true;
3671 rc = VINF_PGM_SHARED_MODULE_COLLISION;
3672 goto end;
3673 }
3674 }
3675
3676 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3677 }
3678 else
3679 rc = VERR_INTERNAL_ERROR_5;
3680
3681end:
3682 RTSemFastMutexRelease(pGMM->Mtx);
3683 return rc;
3684#else
3685 return VERR_NOT_IMPLEMENTED;
3686#endif
3687}
3688
3689
3690/**
3691 * VMMR0 request wrapper for GMMR0RegisterSharedModule.
3692 *
3693 * @returns see GMMR0RegisterSharedModule.
3694 * @param pVM Pointer to the shared VM structure.
3695 * @param idCpu VCPU id
3696 * @param pReq The request packet.
3697 */
3698GMMR0DECL(int) GMMR0RegisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMREGISTERSHAREDMODULEREQ pReq)
3699{
3700 /*
3701 * Validate input and pass it on.
3702 */
3703 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3704 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3705 AssertMsgReturn(pReq->Hdr.cbReq >= sizeof(*pReq) && pReq->Hdr.cbReq == RT_UOFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3706
3707 /* Pass back return code in the request packet to preserve informational codes. (VMMR3CallR0 chokes on them) */
3708 pReq->rc = GMMR0RegisterSharedModule(pVM, idCpu, pReq->enmGuestOS, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule, pReq->cRegions, pReq->aRegions);
3709 return VINF_SUCCESS;
3710}
3711
3712/**
3713 * Unregisters a shared module for the VM
3714 *
3715 * @returns VBox status code.
3716 * @param pVM VM handle
3717 * @param idCpu VCPU id
3718 * @param pszModuleName Module name
3719 * @param pszVersion Module version
3720 * @param GCBaseAddr Module base address
3721 * @param cbModule Module size
3722 */
3723GMMR0DECL(int) GMMR0UnregisterSharedModule(PVM pVM, VMCPUID idCpu, char *pszModuleName, char *pszVersion, RTGCPTR GCBaseAddr, uint32_t cbModule)
3724{
3725#ifdef VBOX_WITH_PAGE_SHARING
3726 /*
3727 * Validate input and get the basics.
3728 */
3729 PGMM pGMM;
3730 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3731 PGVM pGVM;
3732 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
3733 if (RT_FAILURE(rc))
3734 return rc;
3735
3736 Log(("GMMR0UnregisterSharedModule %s %s base=%RGv size %x\n", pszModuleName, pszVersion, GCBaseAddr, cbModule));
3737
3738 /*
3739 * Take the sempahore and do some more validations.
3740 */
3741 rc = RTSemFastMutexRequest(pGMM->Mtx);
3742 AssertRC(rc);
3743 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
3744 {
3745 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)RTAvlGCPtrGet(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3746 if (!pRecVM)
3747 {
3748 rc = VERR_PGM_SHARED_MODULE_NOT_FOUND;
3749 goto end;
3750 }
3751 /* Remove reference to global shared module. */
3752 if (!pRecVM->fCollision)
3753 {
3754 PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
3755 Assert(pRec);
3756
3757 if (pRec) /* paranoia */
3758 {
3759 Assert(pRec->cUsers);
3760 pRec->cUsers--;
3761 if (pRec->cUsers == 0)
3762 {
3763 /* Free the ranges, but leave the pages intact as there might still be references; they will be cleared by the COW mechanism. */
3764 for (unsigned i = 0; i < pRec->cRegions; i++)
3765 if (pRec->aRegions[i].paHCPhysPageID)
3766 RTMemFree(pRec->aRegions[i].paHCPhysPageID);
3767
3768 /* Remove from the tree and free memory. */
3769 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, GCBaseAddr);
3770 RTMemFree(pRec);
3771 }
3772 }
3773 else
3774 rc = VERR_PGM_SHARED_MODULE_REGISTRATION_INCONSISTENCY;
3775 }
3776 else
3777 Assert(!pRecVM->pGlobalModule);
3778
3779 /* Remove from the tree and free memory. */
3780 RTAvlGCPtrRemove(&pGVM->gmm.s.pSharedModuleTree, GCBaseAddr);
3781 RTMemFree(pRecVM);
3782
3783 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
3784 }
3785 else
3786 rc = VERR_INTERNAL_ERROR_5;
3787
3788end:
3789 RTSemFastMutexRelease(pGMM->Mtx);
3790 return rc;
3791#else
3792 return VERR_NOT_IMPLEMENTED;
3793#endif
3794}
3795
3796/**
3797 * VMMR0 request wrapper for GMMR0UnregisterSharedModule.
3798 *
3799 * @returns see GMMR0UnregisterSharedModule.
3800 * @param pVM Pointer to the shared VM structure.
3801 * @param idCpu VCPU id
3802 * @param pReq The request packet.
3803 */
3804GMMR0DECL(int) GMMR0UnregisterSharedModuleReq(PVM pVM, VMCPUID idCpu, PGMMUNREGISTERSHAREDMODULEREQ pReq)
3805{
3806 /*
3807 * Validate input and pass it on.
3808 */
3809 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
3810 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
3811 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
3812
3813 return GMMR0UnregisterSharedModule(pVM, idCpu, pReq->szName, pReq->szVersion, pReq->GCBaseAddr, pReq->cbModule);
3814}
3815
3816
3817#ifdef VBOX_WITH_PAGE_SHARING
3818/**
3819 * Checks specified shared module range for changes
3820 *
3821 * Performs the following tasks:
3822 * - if a shared page is new, then it changes the GMM page type to shared and returns it in the paPageDesc array
3823 * - if a shared page already exists, then it checks if the VM page is identical and if so frees the VM page and returns the shared page in the paPageDesc array
3824 *
3825 * Note: assumes the caller has acquired the GMM semaphore!!
3826 *
3827 * @returns VBox status code.
3828 * @param pGMM Pointer to the GMM instance data.
3829 * @param pGVM Pointer to the GVM instance data.
3830 * @param pModule Module description
3831 * @param idxRegion Region index
3832 * @param cPages Number of entries in the paPageDesc array
3833 * @param paPageDesc Page descriptor array (in/out)
3834 */
3835GMMR0DECL(int) GMMR0SharedModuleCheckRange(PGVM pGVM, PGMMSHAREDMODULE pModule, unsigned idxRegion, unsigned cPages, PGMMSHAREDPAGEDESC paPageDesc)
3836{
3837 int rc = VINF_SUCCESS;
3838 PGMM pGMM;
3839 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3840
3841 AssertReturn(idxRegion < pModule->cRegions, VERR_INVALID_PARAMETER);
3842 AssertReturn(cPages == (pModule->aRegions[idxRegion].cbRegion >> PAGE_SHIFT), VERR_INVALID_PARAMETER);
3843
3844 Log(("GMMR0SharedModuleCheckRange %s base %RGv region %d cPages %d\n", pModule->szName, pModule->Core.Key, idxRegion, cPages));
3845
3846 PGMMSHAREDREGIONDESC pGlobalRegion = &pModule->aRegions[idxRegion];
3847
3848 if (!pGlobalRegion->paHCPhysPageID)
3849 {
3850 /* First time; create a page descriptor array. */
3851 Log(("Allocate page descriptor array for %d pages\n", cPages));
3852 pGlobalRegion->paHCPhysPageID = (uint32_t *)RTMemAlloc(cPages * sizeof(*pGlobalRegion->paHCPhysPageID));
3853 if (!pGlobalRegion->paHCPhysPageID)
3854 {
3855 AssertFailed();
3856 rc = VERR_NO_MEMORY;
3857 goto end;
3858 }
3859 /* Invalidate all descriptors. */
3860 for (unsigned i = 0; i < cPages; i++)
3861 pGlobalRegion->paHCPhysPageID[i] = NIL_GMM_PAGEID;
3862 }
3863
3864 /* Check all pages in the region. */
3865 for (unsigned i = 0; i < cPages; i++)
3866 {
3867 /* Valid page present? */
3868 if (paPageDesc[i].uHCPhysPageId != NIL_GMM_PAGEID)
3869 {
3870 /* We've seen this shared page for the first time? */
3871 if (pGlobalRegion->paHCPhysPageID[i] == NIL_GMM_PAGEID)
3872 {
3873new_shared_page:
3874 Log(("New shared page guest %RGp host %RHp\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys));
3875
3876 /* Easy case: just change the internal page type. */
3877 PGMMPAGE pPage = gmmR0GetPage(pGMM, paPageDesc[i].uHCPhysPageId);
3878 if (!pPage)
3879 {
3880 AssertFailed();
3881 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3882 goto end;
3883 }
3884
3885 AssertMsg(paPageDesc[i].GCPhys == (pPage->Private.pfn << 12), ("desc %RGp gmm %RGp\n", paPageDesc[i].HCPhys, (pPage->Private.pfn << 12)));
3886
3887 gmmR0ConvertToSharedPage(pGMM, pGVM, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pPage);
3888
3889 /* Keep track of these references. */
3890 pGlobalRegion->paHCPhysPageID[i] = paPageDesc[i].uHCPhysPageId;
3891 }
3892 else
3893 {
3894 uint8_t *pbLocalPage, *pbSharedPage;
3895 uint8_t *pbChunk;
3896 PGMMCHUNK pChunk;
3897
3898 Assert(paPageDesc[i].uHCPhysPageId != pGlobalRegion->paHCPhysPageID[i]);
3899
3900 Log(("Replace existing page guest %RGp host %RHp id %x -> id %x\n", paPageDesc[i].GCPhys, paPageDesc[i].HCPhys, paPageDesc[i].uHCPhysPageId, pGlobalRegion->paHCPhysPageID[i]));
3901
3902 /* Get the shared page source. */
3903 PGMMPAGE pPage = gmmR0GetPage(pGMM, pGlobalRegion->paHCPhysPageID[i]);
3904 if (!pPage)
3905 {
3906 AssertFailed();
3907 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3908 goto end;
3909 }
3910 if (pPage->Common.u2State != GMM_PAGE_STATE_SHARED)
3911 {
3912 /* Page was freed at some point; invalidate this entry. */
3913 /** todo this isn't really bullet proof. */
3914 Log(("Old shared page was freed -> create a new one\n"));
3915 pGlobalRegion->paHCPhysPageID[i] = NIL_GMM_PAGEID;
3916 goto new_shared_page; /* ugly goto */
3917 }
3918
3919 Log(("Replace existing page guest host %RHp -> %RHp\n", paPageDesc[i].HCPhys, ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT));
3920
3921 /* Calculate the virtual address of the local page. */
3922 pChunk = gmmR0GetChunk(pGMM, paPageDesc[i].uHCPhysPageId >> GMM_CHUNKID_SHIFT);
3923 if (pChunk)
3924 {
3925 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
3926 {
3927 AssertFailed();
3928 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3929 goto end;
3930 }
3931 pbLocalPage = pbChunk + ((paPageDesc[i].uHCPhysPageId & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
3932 }
3933 else
3934 {
3935 AssertFailed();
3936 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
3937 goto end;
3938 }
3939
3940 /* Calculate the virtual address of the shared page. */
3941 pChunk = gmmR0GetChunk(pGMM, pGlobalRegion->paHCPhysPageID[i] >> GMM_CHUNKID_SHIFT);
3942 Assert(pChunk); /* can't fail as gmmR0GetPage succeeded. */
3943
3944 /* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
3945 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
3946 {
3947 Log(("Map chunk into process!\n"));
3948 rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
3949 if (rc != VINF_SUCCESS)
3950 {
3951 AssertRC(rc);
3952 goto end;
3953 }
3954 }
3955 pbSharedPage = pbChunk + ((pGlobalRegion->paHCPhysPageID[i] & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
3956
3957 /** todo write ASMMemComparePage. */
3958 if (memcmp(pbSharedPage, pbLocalPage, PAGE_SIZE))
3959 {
3960 Log(("Unexpected differences found between local and shared page; skip\n"));
3961 /* Signal to the caller that this one hasn't changed. */
3962 paPageDesc[i].uHCPhysPageId = NIL_GMM_PAGEID;
3963 continue;
3964 }
3965
3966 /* Free the old local page. */
3967 GMMFREEPAGEDESC PageDesc;
3968
3969 PageDesc.idPage = paPageDesc[i].uHCPhysPageId;
3970 rc = gmmR0FreePages(pGMM, pGVM, 1, &PageDesc, GMMACCOUNT_BASE);
3971 AssertRC(rc);
3972
3973 gmmR0UseSharedPage(pGMM, pGVM, pPage);
3974
3975 /* Pass along the new physical address & page id. */
3976 paPageDesc[i].HCPhys = ((uint64_t)pPage->Shared.pfn) << PAGE_SHIFT;
3977 paPageDesc[i].uHCPhysPageId = pGlobalRegion->paHCPhysPageID[i];
3978 }
3979 }
3980 }
3981end:
3982 return rc;
3983}
3984
3985/**
3986 * RTAvlU32Destroy callback.
3987 *
3988 * @returns 0
3989 * @param pNode The node to destroy.
3990 * @param pvGVM The GVM handle.
3991 */
3992static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM)
3993{
3994 PGVM pGVM = (PGVM)pvGVM;
3995 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
3996 PGMM pGMM;
3997 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
3998
3999 Assert(pRecVM->pGlobalModule || pRecVM->fCollision);
4000 if (pRecVM->pGlobalModule)
4001 {
4002 PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
4003 Assert(pRec);
4004 Assert(pRec->cUsers);
4005
4006 Log(("gmmR0CleanupSharedModule: %s %s cUsers=%d\n", pRec->szName, pRec->szVersion, pRec->cUsers));
4007 pRec->cUsers--;
4008 if (pRec->cUsers == 0)
4009 {
4010 for (unsigned i = 0; i < pRec->cRegions; i++)
4011 if (pRec->aRegions[i].paHCPhysPageID)
4012 RTMemFree(pRec->aRegions[i].paHCPhysPageID);
4013
4014 /* Remove from the tree and free memory. */
4015 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key);
4016 RTMemFree(pRec);
4017 }
4018 }
4019 RTMemFree(pRecVM);
4020 return 0;
4021}
4022#endif
4023
4024/**
4025 * Removes all shared modules for the specified VM
4026 *
4027 * @returns VBox status code.
4028 * @param pVM VM handle
4029 * @param idCpu VCPU id
4030 */
4031GMMR0DECL(int) GMMR0ResetSharedModules(PVM pVM, VMCPUID idCpu)
4032{
4033#ifdef VBOX_WITH_PAGE_SHARING
4034 /*
4035 * Validate input and get the basics.
4036 */
4037 PGMM pGMM;
4038 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4039 PGVM pGVM;
4040 int rc = GVMMR0ByVMAndEMT(pVM, idCpu, &pGVM);
4041 if (RT_FAILURE(rc))
4042 return rc;
4043
4044 /*
4045 * Take the sempahore and do some more validations.
4046 */
4047 rc = RTSemFastMutexRequest(pGMM->Mtx);
4048 AssertRC(rc);
4049 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4050 {
4051 Log(("GMMR0ResetSharedModules\n"));
4052 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
4053
4054 rc = VINF_SUCCESS;
4055 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
4056 }
4057 else
4058 rc = VERR_INTERNAL_ERROR_5;
4059
4060 RTSemFastMutexRelease(pGMM->Mtx);
4061 return rc;
4062#else
4063 return VERR_NOT_IMPLEMENTED;
4064#endif
4065}
4066
4067#ifdef VBOX_WITH_PAGE_SHARING
4068typedef struct
4069{
4070 PGVM pGVM;
4071 VMCPUID idCpu;
4072} GMMCHECKSHAREDMODULEINFO, *PGMMCHECKSHAREDMODULEINFO;
4073
4074/**
4075 * Tree enumeration callback for checking a shared module.
4076 */
4077DECLCALLBACK(int) gmmR0CheckSharedModule(PAVLGCPTRNODECORE pNode, void *pvUser)
4078{
4079 PGMMCHECKSHAREDMODULEINFO pInfo = (PGMMCHECKSHAREDMODULEINFO)pvUser;
4080 PGMMSHAREDMODULEPERVM pLocalModule = (PGMMSHAREDMODULEPERVM)pNode;
4081 PGMMSHAREDMODULE pGlobalModule = pLocalModule->pGlobalModule;
4082
4083 if ( !pLocalModule->fCollision
4084 && pGlobalModule)
4085 {
4086 Log(("gmmR0CheckSharedModule: check %s %s base=%RGv size=%x collision=%d\n", pGlobalModule->szName, pGlobalModule->szVersion, pGlobalModule->Core.Key, pGlobalModule->cbModule, pLocalModule->fCollision));
4087 PGMR0SharedModuleCheck(pInfo->pGVM->pVM, pInfo->pGVM, pInfo->idCpu, pGlobalModule, pLocalModule->cRegions, pLocalModule->aRegions);
4088 }
4089 return 0;
4090}
4091#endif
4092
4093#ifdef DEBUG_sandervl
4094/**
4095 * Setup for a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
4096 *
4097 * @returns VBox status code.
4098 * @param pVM VM handle
4099 */
4100GMMR0DECL(int) GMMR0CheckSharedModulesStart(PVM pVM)
4101{
4102 /*
4103 * Validate input and get the basics.
4104 */
4105 PGMM pGMM;
4106 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4107
4108 /*
4109 * Take the sempahore and do some more validations.
4110 */
4111 int rc = RTSemFastMutexRequest(pGMM->Mtx);
4112 AssertRC(rc);
4113 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4114 rc = VERR_INTERNAL_ERROR_5;
4115 else
4116 rc = VINF_SUCCESS;
4117
4118 return rc;
4119}
4120
4121/**
4122 * Clean up after a GMMR0CheckSharedModules call (to allow log flush jumps back to ring 3)
4123 *
4124 * @returns VBox status code.
4125 * @param pVM VM handle
4126 */
4127GMMR0DECL(int) GMMR0CheckSharedModulesEnd(PVM pVM)
4128{
4129 /*
4130 * Validate input and get the basics.
4131 */
4132 PGMM pGMM;
4133 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4134
4135 RTSemFastMutexRelease(pGMM->Mtx);
4136 return VINF_SUCCESS;
4137}
4138#endif
4139
4140/**
4141 * Check all shared modules for the specified VM
4142 *
4143 * @returns VBox status code.
4144 * @param pVM VM handle
4145 * @param pVCpu VMCPU handle
4146 */
4147GMMR0DECL(int) GMMR0CheckSharedModules(PVM pVM, PVMCPU pVCpu)
4148{
4149#ifdef VBOX_WITH_PAGE_SHARING
4150 /*
4151 * Validate input and get the basics.
4152 */
4153 PGMM pGMM;
4154 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4155 PGVM pGVM;
4156 int rc = GVMMR0ByVMAndEMT(pVM, pVCpu->idCpu, &pGVM);
4157 if (RT_FAILURE(rc))
4158 return rc;
4159
4160# ifndef DEBUG_sandervl
4161 /*
4162 * Take the sempahore and do some more validations.
4163 */
4164 rc = RTSemFastMutexRequest(pGMM->Mtx);
4165 AssertRC(rc);
4166# endif
4167 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4168 {
4169 GMMCHECKSHAREDMODULEINFO Info;
4170
4171 Log(("GMMR0CheckSharedModules\n"));
4172 Info.pGVM = pGVM;
4173 Info.idCpu = pVCpu->idCpu;
4174
4175 RTAvlGCPtrDoWithAll(&pGVM->gmm.s.pSharedModuleTree, true /* fFromLeft */, gmmR0CheckSharedModule, &Info);
4176 rc = VINF_SUCCESS;
4177
4178 Log(("GMMR0CheckSharedModules done!\n"));
4179
4180 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
4181 }
4182 else
4183 rc = VERR_INTERNAL_ERROR_5;
4184
4185# ifndef DEBUG_sandervl
4186 RTSemFastMutexRelease(pGMM->Mtx);
4187# endif
4188 return rc;
4189#else
4190 return VERR_NOT_IMPLEMENTED;
4191#endif
4192}
4193
4194#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
4195typedef struct
4196{
4197 PGVM pGVM;
4198 PGMM pGMM;
4199 uint8_t *pSourcePage;
4200 bool fFoundDuplicate;
4201} GMMFINDDUPPAGEINFO, *PGMMFINDDUPPAGEINFO;
4202
4203/**
4204 * RTAvlU32DoWithAll callback.
4205 *
4206 * @returns 0
4207 * @param pNode The node to search.
4208 * @param pvInfo Pointer to the input parameters
4209 */
4210static DECLCALLBACK(int) gmmR0FindDupPageInChunk(PAVLU32NODECORE pNode, void *pvInfo)
4211{
4212 PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
4213 PGMMFINDDUPPAGEINFO pInfo = (PGMMFINDDUPPAGEINFO)pvInfo;
4214 PGVM pGVM = pInfo->pGVM;
4215 PGMM pGMM = pInfo->pGMM;
4216 uint8_t *pbChunk;
4217
4218 /* Only take chunks not mapped into this VM process; not entirely correct. */
4219 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
4220 {
4221 int rc = gmmR0MapChunk(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk);
4222 if (rc != VINF_SUCCESS)
4223 goto end;
4224
4225 /*
4226 * Look for duplicate pages
4227 */
4228 unsigned iPage = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
4229 while (iPage-- > 0)
4230 {
4231 if (GMM_PAGE_IS_PRIVATE(&pChunk->aPages[iPage]))
4232 {
4233 uint8_t *pbDestPage = pbChunk + (iPage << PAGE_SHIFT);
4234
4235 if (!memcmp(pInfo->pSourcePage, pbDestPage, PAGE_SIZE))
4236 {
4237 pInfo->fFoundDuplicate = true;
4238 break;
4239 }
4240 }
4241 }
4242 gmmR0UnmapChunk(pGMM, pGVM, pChunk);
4243 }
4244end:
4245 if (pInfo->fFoundDuplicate)
4246 return 1; /* stop search */
4247 else
4248 return 0;
4249}
4250
4251/**
4252 * Find a duplicate of the specified page in other active VMs
4253 *
4254 * @returns VBox status code.
4255 * @param pVM VM handle
4256 * @param pReq Request packet
4257 */
4258GMMR0DECL(int) GMMR0FindDuplicatePageReq(PVM pVM, PGMMFINDDUPLICATEPAGEREQ pReq)
4259{
4260 /*
4261 * Validate input and pass it on.
4262 */
4263 AssertPtrReturn(pVM, VERR_INVALID_POINTER);
4264 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
4265 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x != %#x\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
4266
4267 PGMM pGMM;
4268 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
4269
4270 /*
4271 * Take the sempahore and do some more validations.
4272 */
4273 int rc = RTSemFastMutexRequest(pGMM->Mtx);
4274 AssertRC(rc);
4275 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
4276 {
4277 PGVM pGVM;
4278 rc = GVMMR0ByVM(pVM, &pGVM);
4279 if (RT_FAILURE(rc))
4280 goto end;
4281
4282 uint8_t *pbChunk;
4283 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, pReq->idPage >> GMM_CHUNKID_SHIFT);
4284 if (!pChunk)
4285 {
4286 AssertFailed();
4287 goto end;
4288 }
4289
4290 if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
4291 {
4292 AssertFailed();
4293 goto end;
4294 }
4295
4296 uint8_t *pbSourcePage = pbChunk + ((pReq->idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
4297
4298 PGMMPAGE pPage = gmmR0GetPage(pGMM, pReq->idPage);
4299 if (!pPage)
4300 {
4301 AssertFailed();
4302 rc = VERR_PGM_PHYS_INVALID_PAGE_ID;
4303 goto end;
4304 }
4305 GMMFINDDUPPAGEINFO Info;
4306
4307 Info.pGVM = pGVM;
4308 Info.pGMM = pGMM;
4309 Info.pSourcePage = pbSourcePage;
4310 Info.fFoundDuplicate = false;
4311 RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0FindDupPageInChunk, &Info);
4312
4313 pReq->fDuplicate = Info.fFoundDuplicate;
4314 }
4315 else
4316 rc = VERR_INTERNAL_ERROR_5;
4317
4318end:
4319 RTSemFastMutexRelease(pGMM->Mtx);
4320 return rc;
4321}
4322
4323#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
4324
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette